Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:com.ibm.crail.tools.CrailBenchmark.java

void collectionTest(int size, int loop) throws Exception {
    System.out.println("collectionTest, size " + size + ", loop " + loop);

    RingBuffer<Object> ringBuffer = new RingBuffer<Object>(10);
    ArrayBlockingQueue<Object> arrayQueue = new ArrayBlockingQueue<Object>(10);
    LinkedBlockingQueue<Object> listQueue = new LinkedBlockingQueue<Object>();

    Object obj = new Object();
    long start = System.currentTimeMillis();
    for (int i = 0; i < loop; i++) {
        for (int j = 0; j < size; j++) {
            ringBuffer.add(obj);//  ww w. j  a  v a  2 s .  c  o m
            Object tmp = ringBuffer.peek();
            tmp = ringBuffer.poll();
        }
    }
    long end = System.currentTimeMillis();
    double executionTime = ((double) (end - start));
    System.out.println("ringbuffer, execution time [ms] " + executionTime);

    start = System.currentTimeMillis();
    for (int i = 0; i < loop; i++) {
        for (int j = 0; j < size; j++) {
            arrayQueue.add(obj);
            Object tmp = arrayQueue.peek();
            tmp = arrayQueue.poll();
        }
    }
    end = System.currentTimeMillis();
    executionTime = ((double) (end - start));
    System.out.println("arrayQueue, execution time [ms] " + executionTime);

    start = System.currentTimeMillis();
    for (int i = 0; i < loop; i++) {
        for (int j = 0; j < size; j++) {
            listQueue.add(obj);
            Object tmp = listQueue.peek();
            tmp = listQueue.poll();
        }
    }
    end = System.currentTimeMillis();
    executionTime = ((double) (end - start));
    System.out.println("arrayQueue, execution time [ms] " + executionTime);
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

/**
 * Creates a new instance of GemFireCache and populates it according to the
 * <code>cache.xml</code>, if appropriate.
 * //from   w  w w.  ja  v a 2 s. c om
 * @param typeRegistry: currently only unit tests set this parameter to a non-null value
 */
private GemFireCacheImpl(boolean isClient, PoolFactory pf, DistributedSystem system, CacheConfig cacheConfig,
        boolean asyncEventListeners, TypeRegistry typeRegistry) {
    this.isClient = isClient;
    this.clientpf = pf;
    this.cacheConfig = cacheConfig; // do early for bug 43213
    this.pdxRegistry = typeRegistry;

    // Synchronized to prevent a new cache from being created
    // before an old one has finished closing
    synchronized (GemFireCacheImpl.class) {

        // start JTA transaction manager within this synchronized block
        // to prevent race with cache close. fixes bug 43987
        JNDIInvoker.mapTransactions(system);
        this.system = (InternalDistributedSystem) system;
        this.dm = this.system.getDistributionManager();
        if (!this.isClient && PoolManager.getAll().isEmpty()) {
            // We only support management on members of a distributed system
            // Should do this: if (!getSystem().isLoner()) {
            // but it causes quickstart.CqClientTest to hang
            this.listener = new ManagementListener();
            this.system.addResourceListener(listener);
            if (this.system.isLoner()) {
                this.system.getInternalLogWriter()
                        .info(LocalizedStrings.GemFireCacheImpl_RUNNING_IN_LOCAL_MODE);
            }
        } else {
            getLogger().info("Running in client mode");
            this.listener = null;
        }

        // Don't let admin-only VMs create Cache's just yet.
        DM dm = this.system.getDistributionManager();
        if (dm instanceof DistributionManager) {
            if (((DistributionManager) dm).getDMType() == DistributionManager.ADMIN_ONLY_DM_TYPE) {
                throw new IllegalStateException(
                        LocalizedStrings.GemFireCache_CANNOT_CREATE_A_CACHE_IN_AN_ADMINONLY_VM
                                .toLocalizedString());
            }
        }

        this.rootRegions = new HashMap();

        this.cqService = CqServiceProvider.create(this);

        this.rmqFactory = new ReliableMessageQueueFactoryImpl();

        // Create the CacheStatistics
        this.cachePerfStats = new CachePerfStats(system);
        CachePerfStats.enableClockStats = this.system.getConfig().getEnableTimeStatistics();

        this.txMgr = new TXManagerImpl(this.cachePerfStats, this);
        dm.addMembershipListener(this.txMgr);

        this.creationDate = new Date();

        this.persistentMemberManager = new PersistentMemberManager();

        if (asyncEventListeners) {
            final ThreadGroup group = LoggingThreadGroup.createThreadGroup("Message Event Threads", logger);
            ThreadFactory tf = new ThreadFactory() {
                public Thread newThread(final Runnable command) {
                    final Runnable r = new Runnable() {
                        public void run() {
                            ConnectionTable.threadWantsSharedResources();
                            command.run();
                        }
                    };
                    Thread thread = new Thread(group, r, "Message Event Thread");
                    thread.setDaemon(true);
                    return thread;
                }
            };
            ArrayBlockingQueue q = new ArrayBlockingQueue(EVENT_QUEUE_LIMIT);
            this.eventThreadPool = new PooledExecutorWithDMStats(q, EVENT_THREAD_LIMIT,
                    this.cachePerfStats.getEventPoolHelper(), tf, 1000);
        } else {
            this.eventThreadPool = null;
        }

        // Initialize the advisor here, but wait to exchange profiles until cache is fully built
        this.resourceAdvisor = ResourceAdvisor.createResourceAdvisor(this);
        // Initialize the advisor here, but wait to exchange profiles until cache is fully built
        this.jmxAdvisor = JmxManagerAdvisor.createJmxManagerAdvisor(new JmxManagerAdvisee(this));

        resourceManager = InternalResourceManager.createResourceManager(this);
        this.serialNumber = DistributionAdvisor.createSerialNumber();

        getResourceManager().addResourceListener(ResourceType.HEAP_MEMORY, getHeapEvictor());

        /*
         * Only bother creating an off-heap evictor if we have off-heap memory enabled.
         */
        if (null != getOffHeapStore()) {
            getResourceManager().addResourceListener(ResourceType.OFFHEAP_MEMORY, getOffHeapEvictor());
        }

        recordedEventSweeper = EventTracker.startTrackerServices(this);
        tombstoneService = TombstoneService.initialize(this);

        TypeRegistry.init();
        basicSetPdxSerializer(this.cacheConfig.getPdxSerializer());
        TypeRegistry.open();

        if (!isClient()) {
            // Initialize the QRM thread freqeuncy to default (1 second )to prevent spill
            // over from previous Cache , as the interval is stored in a static
            // volatile field.
            HARegionQueue.setMessageSyncInterval(HARegionQueue.DEFAULT_MESSAGE_SYNC_INTERVAL);
        }
        FunctionService.registerFunction(new PRContainsValueFunction());
        this.expirationScheduler = new ExpirationScheduler(this.system);

        // uncomment following line when debugging CacheExistsException
        if (DEBUG_CREATION_STACK) {
            this.creationStack = new Exception(
                    LocalizedStrings.GemFireCache_CREATED_GEMFIRECACHE_0.toLocalizedString(toString()));
        }

        this.txEntryStateFactory = TXEntryState.getFactory();
        if (xmlParameterizationEnabled) {
            /** If product properties file is available replace properties from there */
            Properties userProps = this.system.getConfig().getUserDefinedProps();
            if (userProps != null && !userProps.isEmpty()) {
                resolver = new CacheXmlPropertyResolver(false, PropertyResolver.NO_SYSTEM_PROPERTIES_OVERRIDE,
                        userProps);
            } else {
                resolver = new CacheXmlPropertyResolver(false, PropertyResolver.NO_SYSTEM_PROPERTIES_OVERRIDE,
                        null);
            }
        }

        SystemFailure.signalCacheCreate();

        diskMonitor = new DiskStoreMonitor();
    } // synchronized
}

From source file:la2launcher.MainFrame.java

private void processValidation(boolean full) {
    final long initTime = new Date().getTime();
    final String patcherUrl = "http://" + updateHost + "/hf/updater.lst.la2";//new ArrayBlockingQueue<Runnable>(10000)
    final ThreadPoolExecutor tpe = new ThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS,
            new ArrayBlockingQueue<Runnable>(10000));
    tpe.execute(new Runnable() {
        @Override//w ww .jav  a2s .c om
        public void run() {
            jTextArea2.setText("");
            try {
                if (full) {
                    jTextArea2.setText(jTextArea2.getText() + "\r\n?  ");
                } else {
                    jTextArea2.setText(jTextArea2.getText() + "\r\n?  system");
                }
                File patcher = File.createTempFile("la2", "la2");
                patcher.deleteOnExit();
                File patcherExt = File.createTempFile("la2", "la2");
                patcherExt.deleteOnExit();
                FileOutputStream fos = new FileOutputStream(patcher);
                CloseableHttpClient httpclient = HttpClients.createDefault();
                HttpGet httpGet = new HttpGet(patcherUrl);
                CloseableHttpResponse response1 = httpclient.execute(httpGet);

                HttpEntity entity1 = response1.getEntity();
                copyStream(entity1.getContent(), fos, null);
                response1.close();
                fos.close();
                jTextArea2.setText(jTextArea2.getText()
                        + "\r\n??  ? ?: " + patcherUrl);

                fixBzip2File(patcher);
                jTextArea2.setText(jTextArea2.getText() + "\r\n ?");

                BZip2CompressorInputStream bz = new BZip2CompressorInputStream(new FileInputStream(patcher));
                OutputStream pout = new FileOutputStream(patcherExt);
                copyStream(bz, pout, new CopyListener() {
                    @Override
                    public void transfered(int n) {
                        bytesRecieved += n;
                        bytesRecievedTotal += n;
                    }
                });
                pout.close();
                bz.close();
                jTextArea2.setText(jTextArea2.getText() + "\r\n? ?");

                if (full) {
                    jTextArea2.setText(jTextArea2.getText() + "\r\n  ");
                } else {
                    jTextArea2.setText(jTextArea2.getText()
                            + "\r\n     system");
                }

                DefaultTableModel model = (DefaultTableModel) jTable2.getModel();
                model.setRowCount(0);

                int filesCount = scanSumFilesCount(patcherExt, full);
                jProgressBar1.setMinimum(0);
                jProgressBar1.setMaximum(filesCount);
                jProgressBar1.setValue(0);
                jLabel4.setText("0/" + filesCount);
                scanSumFile(patcherExt, new SumHandler() {

                    private ReentrantLock lock = new ReentrantLock();

                    @Override
                    public void handle(MDNamePair pair) {
                        try {
                            jProgressBar1.setIndeterminate(false);
                            //lock.unlock();
                            tpe.execute(new Runnable() {
                                @Override
                                public void run() {
                                    try {
                                        lock.lock();
                                        //printMsg(pair.filename);
                                        String crc = digest(new File(gamePath + pair.filename));
                                        //printMsg("    : " + pair.crc);
                                        //printMsg("   ? ? : " + crc);
                                        if (!pair.crc.equals(crc)) {
                                            DefaultTableModel dtm = (DefaultTableModel) jTable2.getModel();
                                            dtm.addRow(new Object[] { pair.filename, false });
                                        }
                                        jProgressBar1.setValue(jProgressBar1.getValue() + 1);
                                        jLabel4.setText(jProgressBar1.getValue() + "/" + filesCount);
                                        lock.unlock();
                                    } catch (NoSuchAlgorithmException ex) {
                                        Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
                                    } catch (IOException ex) {
                                        Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
                                    } finally {
                                        //if (lock.isLocked()) lock.unlock();
                                    }
                                }
                            });
                        } finally {
                            //if (lock.isLocked()) lock.unlock();
                        }
                    }
                }, full);
            } catch (IOException ex) {
                Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
            }
        }
    });

    jButton5.setEnabled(false);
    jButton6.setEnabled(false);
    jButton7.setEnabled(false);
    jButton8.setEnabled(false);
    jButton10.setEnabled(false);
    jProgressBar1.setIndeterminate(true);
    new Thread() {
        @Override
        public void run() {
            do {
                long millis = new Date().getTime();
                try {
                    sleep(300);
                } catch (InterruptedException ex) {
                    Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
                }
                millis = new Date().getTime() - millis;
                BigDecimal totBig = new BigDecimal(bytesRecievedTotal / (1024 * 1024.0));
                totBig = totBig.setScale(2, BigDecimal.ROUND_CEILING);
                jLabel5.setText("?: " + (bytesRecieved / millis) + "KB/s. : "
                        + totBig + " MB");
                bytesRecieved = 0;
            } while (tpe.getActiveCount() > 0);
            tpe.shutdown();
            jButton5.setEnabled(true);
            jButton6.setEnabled(true);
            jButton7.setEnabled(true);
            jButton8.setEnabled(true);
            jButton10.setEnabled(true);
            jProgressBar1.setIndeterminate(false);
            printMsg("  " + (new Date().getTime() - initTime)
                    + " ?.");
        }
    }.start();
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Save the vertices using the user-defined VertexOutputFormat from our
 * vertexArray based on the split.//ww  w  .ja v a 2  s. com
 *
 * @param numLocalVertices Number of local vertices
 * @throws InterruptedException
 */
private void saveVertices(long numLocalVertices) throws IOException, InterruptedException {
    ImmutableClassesGiraphConfiguration<I, V, E> conf = getConfiguration();

    if (conf.getVertexOutputFormatClass() == null) {
        LOG.warn("saveVertices: " + GiraphConstants.VERTEX_OUTPUT_FORMAT_CLASS
                + " not specified -- there will be no saved output");
        return;
    }
    if (conf.doOutputDuringComputation()) {
        if (LOG.isInfoEnabled()) {
            LOG.info("saveVertices: The option for doing output during "
                    + "computation is selected, so there will be no saving of the "
                    + "output in the end of application");
        }
        return;
    }

    final int numPartitions = getPartitionStore().getNumPartitions();
    int numThreads = Math.min(getConfiguration().getNumOutputThreads(), numPartitions);
    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO, "saveVertices: Starting to save "
            + numLocalVertices + " vertices " + "using " + numThreads + " threads");
    final VertexOutputFormat<I, V, E> vertexOutputFormat = getConfiguration().createWrappedVertexOutputFormat();

    final Queue<Integer> partitionIdQueue = (numPartitions == 0) ? new LinkedList<Integer>()
            : new ArrayBlockingQueue<Integer>(numPartitions);
    Iterables.addAll(partitionIdQueue, getPartitionStore().getPartitionIds());

    long verticesToStore = 0;
    PartitionStore<I, V, E> partitionStore = getPartitionStore();
    for (int partitionId : partitionStore.getPartitionIds()) {
        Partition<I, V, E> partition = partitionStore.getOrCreatePartition(partitionId);
        verticesToStore += partition.getVertexCount();
        partitionStore.putPartition(partition);
    }
    WorkerProgress.get().startStoring(verticesToStore, getPartitionStore().getNumPartitions());

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {
                /** How often to update WorkerProgress */
                private static final long VERTICES_TO_UPDATE_PROGRESS = 100000;

                @Override
                public Void call() throws Exception {
                    VertexWriter<I, V, E> vertexWriter = vertexOutputFormat.createVertexWriter(getContext());
                    vertexWriter.setConf(getConfiguration());
                    vertexWriter.initialize(getContext());
                    long nextPrintVertices = 0;
                    long nextUpdateProgressVertices = VERTICES_TO_UPDATE_PROGRESS;
                    long nextPrintMsecs = System.currentTimeMillis() + 15000;
                    int partitionIndex = 0;
                    int numPartitions = getPartitionStore().getNumPartitions();
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }

                        Partition<I, V, E> partition = getPartitionStore().getOrCreatePartition(partitionId);
                        long verticesWritten = 0;
                        for (Vertex<I, V, E> vertex : partition) {
                            vertexWriter.writeVertex(vertex);
                            ++verticesWritten;

                            // Update status at most every 250k vertices or 15 seconds
                            if (verticesWritten > nextPrintVertices
                                    && System.currentTimeMillis() > nextPrintMsecs) {
                                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                                        "saveVertices: Saved " + verticesWritten + " out of "
                                                + partition.getVertexCount() + " partition vertices, "
                                                + "on partition " + partitionIndex + " out of "
                                                + numPartitions);
                                nextPrintMsecs = System.currentTimeMillis() + 15000;
                                nextPrintVertices = verticesWritten + 250000;
                            }

                            if (verticesWritten >= nextUpdateProgressVertices) {
                                WorkerProgress.get().addVerticesStored(VERTICES_TO_UPDATE_PROGRESS);
                                nextUpdateProgressVertices += VERTICES_TO_UPDATE_PROGRESS;
                            }
                        }
                        getPartitionStore().putPartition(partition);
                        ++partitionIndex;
                        WorkerProgress.get().addVerticesStored(verticesWritten % VERTICES_TO_UPDATE_PROGRESS);
                        WorkerProgress.get().incrementPartitionsStored();
                    }
                    vertexWriter.close(getContext()); // the temp results are saved now
                    return null;
                }
            };
        }
    };
    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "save-vertices-%d", getContext());

    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO, "saveVertices: Done saving vertices.");
    // YARN: must complete the commit the "task" output, Hadoop isn't there.
    if (getConfiguration().isPureYarnJob() && getConfiguration().getVertexOutputFormatClass() != null) {
        try {
            OutputCommitter outputCommitter = vertexOutputFormat.getOutputCommitter(getContext());
            if (outputCommitter.needsTaskCommit(getContext())) {
                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                        "OutputCommitter: committing task output.");
                // transfer from temp dirs to "task commit" dirs to prep for
                // the master's OutputCommitter#commitJob(context) call to finish.
                outputCommitter.commitTask(getContext());
            }
        } catch (InterruptedException ie) {
            LOG.error("Interrupted while attempting to obtain " + "OutputCommitter.", ie);
        } catch (IOException ioe) {
            LOG.error("Master task's attempt to commit output has " + "FAILED.", ioe);
        }
    }
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Save the edges using the user-defined EdgeOutputFormat from our
 * vertexArray based on the split./*from   w ww  . j  a  v  a  2  s. co  m*/
 *
 * @throws InterruptedException
 */
private void saveEdges() throws IOException, InterruptedException {
    final ImmutableClassesGiraphConfiguration<I, V, E> conf = getConfiguration();

    if (conf.getEdgeOutputFormatClass() == null) {
        LOG.warn("saveEdges: " + GiraphConstants.EDGE_OUTPUT_FORMAT_CLASS
                + "Make sure that the EdgeOutputFormat is not required.");
        return;
    }

    final int numPartitions = getPartitionStore().getNumPartitions();
    int numThreads = Math.min(conf.getNumOutputThreads(), numPartitions);
    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
            "saveEdges: Starting to save the edges using " + numThreads + " threads");
    final EdgeOutputFormat<I, V, E> edgeOutputFormat = conf.createWrappedEdgeOutputFormat();

    final Queue<Integer> partitionIdQueue = (numPartitions == 0) ? new LinkedList<Integer>()
            : new ArrayBlockingQueue<Integer>(numPartitions);
    Iterables.addAll(partitionIdQueue, getPartitionStore().getPartitionIds());

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    EdgeWriter<I, V, E> edgeWriter = edgeOutputFormat.createEdgeWriter(getContext());
                    edgeWriter.setConf(conf);
                    edgeWriter.initialize(getContext());

                    long nextPrintVertices = 0;
                    long nextPrintMsecs = System.currentTimeMillis() + 15000;
                    int partitionIndex = 0;
                    int numPartitions = getPartitionStore().getNumPartitions();
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }

                        Partition<I, V, E> partition = getPartitionStore().getOrCreatePartition(partitionId);
                        long vertices = 0;
                        long edges = 0;
                        long partitionEdgeCount = partition.getEdgeCount();
                        for (Vertex<I, V, E> vertex : partition) {
                            for (Edge<I, E> edge : vertex.getEdges()) {
                                edgeWriter.writeEdge(vertex.getId(), vertex.getValue(), edge);
                                ++edges;
                            }
                            ++vertices;

                            // Update status at most every 250k vertices or 15 seconds
                            if (vertices > nextPrintVertices && System.currentTimeMillis() > nextPrintMsecs) {
                                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                                        "saveEdges: Saved " + edges + " edges out of " + partitionEdgeCount
                                                + " partition edges, on partition " + partitionIndex
                                                + " out of " + numPartitions);
                                nextPrintMsecs = System.currentTimeMillis() + 15000;
                                nextPrintVertices = vertices + 250000;
                            }
                        }
                        getPartitionStore().putPartition(partition);
                        ++partitionIndex;
                    }
                    edgeWriter.close(getContext()); // the temp results are saved now
                    return null;
                }
            };
        }
    };
    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "save-vertices-%d", getContext());

    LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO, "saveEdges: Done saving edges.");
    // YARN: must complete the commit the "task" output, Hadoop isn't there.
    if (conf.isPureYarnJob() && conf.getVertexOutputFormatClass() != null) {
        try {
            OutputCommitter outputCommitter = edgeOutputFormat.getOutputCommitter(getContext());
            if (outputCommitter.needsTaskCommit(getContext())) {
                LoggerUtils.setStatusAndLog(getContext(), LOG, Level.INFO,
                        "OutputCommitter: committing task output.");
                // transfer from temp dirs to "task commit" dirs to prep for
                // the master's OutputCommitter#commitJob(context) call to finish.
                outputCommitter.commitTask(getContext());
            }
        } catch (InterruptedException ie) {
            LOG.error("Interrupted while attempting to obtain " + "OutputCommitter.", ie);
        } catch (IOException ioe) {
            LOG.error("Master task's attempt to commit output has " + "FAILED.", ioe);
        }
    }
}

From source file:org.pentaho.di.trans.Trans.java

/**
 * Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them.
 *
 * @throws KettleException// w ww.  j av a  2 s .c om
 *           if there is a communication error with a remote output socket.
 */
public void startThreads() throws KettleException {
    // Now prepare to start all the threads...
    //
    nrOfFinishedSteps = 0;
    nrOfActiveSteps = 0;

    ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationStartThreads.id, this);

    fireTransStartedListeners();

    for (int i = 0; i < steps.size(); i++) {
        final StepMetaDataCombi sid = steps.get(i);
        sid.step.markStart();
        sid.step.initBeforeStart();

        // also attach a Step Listener to detect when we're done...
        //
        StepListener stepListener = new StepListener() {
            public void stepActive(Trans trans, StepMeta stepMeta, StepInterface step) {
                nrOfActiveSteps++;
                if (nrOfActiveSteps == 1) {
                    // Transformation goes from in-active to active...
                    // PDI-5229 sync added
                    synchronized (transListeners) {
                        for (TransListener listener : transListeners) {
                            listener.transActive(Trans.this);
                        }
                    }
                }
            }

            public void stepFinished(Trans trans, StepMeta stepMeta, StepInterface step) {
                synchronized (Trans.this) {
                    nrOfFinishedSteps++;

                    if (nrOfFinishedSteps >= steps.size()) {
                        // Set the finished flag
                        //
                        setFinished(true);

                        // Grab the performance statistics one last time (if enabled)
                        //
                        addStepPerformanceSnapShot();

                        try {
                            fireTransFinishedListeners();
                        } catch (Exception e) {
                            step.setErrors(step.getErrors() + 1L);
                            log.logError(getName() + " : " + BaseMessages.getString(PKG,
                                    "Trans.Log.UnexpectedErrorAtTransformationEnd"), e);
                        }
                    }

                    // If a step fails with an error, we want to kill/stop the others
                    // too...
                    //
                    if (step.getErrors() > 0) {

                        log.logMinimal(BaseMessages.getString(PKG, "Trans.Log.TransformationDetectedErrors"));
                        log.logMinimal(
                                BaseMessages.getString(PKG, "Trans.Log.TransformationIsKillingTheOtherSteps"));

                        killAllNoWait();
                    }
                }
            }
        };
        // Make sure this is called first!
        //
        if (sid.step instanceof BaseStep) {
            ((BaseStep) sid.step).getStepListeners().add(0, stepListener);
        } else {
            sid.step.addStepListener(stepListener);
        }
    }

    if (transMeta.isCapturingStepPerformanceSnapShots()) {
        stepPerformanceSnapshotSeqNr = new AtomicInteger(0);
        stepPerformanceSnapShots = new ConcurrentHashMap<String, List<StepPerformanceSnapShot>>();

        // Calculate the maximum number of snapshots to be kept in memory
        //
        String limitString = environmentSubstitute(transMeta.getStepPerformanceCapturingSizeLimit());
        if (Const.isEmpty(limitString)) {
            limitString = EnvUtil.getSystemProperty(Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT);
        }
        stepPerformanceSnapshotSizeLimit = Const.toInt(limitString, 0);

        // Set a timer to collect the performance data from the running threads...
        //
        stepPerformanceSnapShotTimer = new Timer("stepPerformanceSnapShot Timer: " + transMeta.getName());
        TimerTask timerTask = new TimerTask() {
            public void run() {
                if (!isFinished()) {
                    addStepPerformanceSnapShot();
                }
            }
        };
        stepPerformanceSnapShotTimer.schedule(timerTask, 100, transMeta.getStepPerformanceCapturingDelay());
    }

    // Now start a thread to monitor the running transformation...
    //
    setFinished(false);
    paused.set(false);
    stopped.set(false);

    transFinishedBlockingQueue = new ArrayBlockingQueue<Object>(10);

    TransListener transListener = new TransAdapter() {
        public void transFinished(Trans trans) {

            try {
                ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationFinish.id,
                        trans);
            } catch (KettleException e) {
                throw new RuntimeException("Error calling extension point at end of transformation", e);
            }

            // First of all, stop the performance snapshot timer if there is is
            // one...
            //
            if (transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null) {
                stepPerformanceSnapShotTimer.cancel();
            }

            setFinished(true);
            running = false; // no longer running

            log.snap(Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP);

            // If the user ran with metrics gathering enabled and a metrics logging table is configured, add another
            // listener...
            //
            MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
            if (metricsLogTable.isDefined()) {
                try {
                    writeMetricsInformation();
                } catch (Exception e) {
                    log.logError("Error writing metrics information", e);
                    errors.incrementAndGet();
                }
            }

            // Close the unique connections when running database transactionally.
            // This will commit or roll back the transaction based on the result of this transformation.
            //
            if (transMeta.isUsingUniqueConnections()) {
                trans.closeUniqueDatabaseConnections(getResult());
            }
        }
    };
    // This should always be done first so that the other listeners achieve a clean state to start from (setFinished and
    // so on)
    //
    transListeners.add(0, transListener);

    running = true;

    switch (transMeta.getTransformationType()) {
    case Normal:

        // Now start all the threads...
        //
        for (int i = 0; i < steps.size(); i++) {
            final StepMetaDataCombi combi = steps.get(i);
            RunThread runThread = new RunThread(combi);
            Thread thread = new Thread(runThread);
            thread.setName(getName() + " - " + combi.stepname);
            ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepBeforeStart.id, combi);
            // Call an extension point at the end of the step
            //
            combi.step.addStepListener(new StepAdapter() {

                @Override
                public void stepFinished(Trans trans, StepMeta stepMeta, StepInterface step) {
                    try {
                        ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepFinished.id,
                                combi);
                    } catch (KettleException e) {
                        throw new RuntimeException(
                                "Unexpected error in calling extension point upon step finish", e);
                    }
                }

            });

            thread.start();
        }
        break;

    case SerialSingleThreaded:
        new Thread(new Runnable() {
            public void run() {
                try {
                    // Always disable thread priority management, it will always slow us
                    // down...
                    //
                    for (StepMetaDataCombi combi : steps) {
                        combi.step.setUsingThreadPriorityManagment(false);
                    }

                    //
                    // This is a single threaded version...
                    //

                    // Sort the steps from start to finish...
                    //
                    Collections.sort(steps, new Comparator<StepMetaDataCombi>() {
                        public int compare(StepMetaDataCombi c1, StepMetaDataCombi c2) {

                            boolean c1BeforeC2 = transMeta.findPrevious(c2.stepMeta, c1.stepMeta);
                            if (c1BeforeC2) {
                                return -1;
                            } else {
                                return 1;
                            }
                        }
                    });

                    boolean[] stepDone = new boolean[steps.size()];
                    int nrDone = 0;
                    while (nrDone < steps.size() && !isStopped()) {
                        for (int i = 0; i < steps.size() && !isStopped(); i++) {
                            StepMetaDataCombi combi = steps.get(i);
                            if (!stepDone[i]) {
                                // if (combi.step.canProcessOneRow() ||
                                // !combi.step.isRunning()) {
                                boolean cont = combi.step.processRow(combi.meta, combi.data);
                                if (!cont) {
                                    stepDone[i] = true;
                                    nrDone++;
                                }
                                // }
                            }
                        }
                    }
                } catch (Exception e) {
                    errors.addAndGet(1);
                    log.logError("Error executing single threaded", e);
                } finally {
                    for (int i = 0; i < steps.size(); i++) {
                        StepMetaDataCombi combi = steps.get(i);
                        combi.step.dispose(combi.meta, combi.data);
                        combi.step.markStop();
                    }
                }
            }
        }).start();
        break;

    case SingleThreaded:
        // Don't do anything, this needs to be handled by the transformation
        // executor!
        //
        break;
    default:
        break;

    }

    ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationStarted.id, this);

    if (log.isDetailed()) {
        log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationHasAllocated",
                String.valueOf(steps.size()), String.valueOf(rowsets.size())));
    }
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Save partitions. To speed up this operation
 * runs in multiple threads.//from  w w  w.ja  va  2s  .c  o m
 */
private void storeCheckpointVertices() {
    final int numPartitions = getPartitionStore().getNumPartitions();
    int numThreads = Math.min(GiraphConstants.NUM_CHECKPOINT_IO_THREADS.get(getConfiguration()), numPartitions);

    final Queue<Integer> partitionIdQueue = (numPartitions == 0) ? new LinkedList<Integer>()
            : new ArrayBlockingQueue<Integer>(numPartitions);
    Iterables.addAll(partitionIdQueue, getPartitionStore().getPartitionIds());

    final CompressionCodec codec = new CompressionCodecFactory(getConfiguration())
            .getCodec(new Path(GiraphConstants.CHECKPOINT_COMPRESSION_CODEC.get(getConfiguration())));

    long t0 = System.currentTimeMillis();

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }
                        Path path = createCheckpointFilePathSafe(
                                "_" + partitionId + CheckpointingUtils.CHECKPOINT_VERTICES_POSTFIX);

                        FSDataOutputStream uncompressedStream = getFs().create(path);

                        DataOutputStream stream = codec == null ? uncompressedStream
                                : new DataOutputStream(codec.createOutputStream(uncompressedStream));

                        Partition<I, V, E> partition = getPartitionStore().getOrCreatePartition(partitionId);

                        partition.write(stream);

                        getPartitionStore().putPartition(partition);

                        stream.close();
                        uncompressedStream.close();
                    }
                    return null;
                }

            };
        }
    };

    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "checkpoint-vertices-%d",
            getContext());

    LOG.info("Save checkpoint in " + (System.currentTimeMillis() - t0) + " ms, using " + numThreads
            + " threads");
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * This function is to scan the root path of the file system to get either the
 * mapping between the region name and its best locality region server or the
 * degree of locality of each region on each of the servers having at least
 * one block of that region. The output map parameters are both optional.
 *
 * @param conf/*from w w w  .j av  a2  s.  co  m*/
 *          the configuration to use
 * @param desiredTable
 *          the table you wish to scan locality for
 * @param threadPoolSize
 *          the thread pool size to use
 * @param regionToBestLocalityRSMapping
 *          the map into which to put the best locality mapping or null
 * @param regionDegreeLocalityMapping
 *          the map into which to put the locality degree mapping or null,
 *          must be a thread-safe implementation
 * @throws IOException
 *           in case of file system errors or interrupts
 */
private static void getRegionLocalityMappingFromFS(final Configuration conf, final String desiredTable,
        int threadPoolSize, Map<String, String> regionToBestLocalityRSMapping,
        Map<String, Map<String, Float>> regionDegreeLocalityMapping) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    Path rootPath = FSUtils.getRootDir(conf);
    long startTime = EnvironmentEdgeManager.currentTimeMillis();
    Path queryPath;
    // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
    if (null == desiredTable) {
        queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
    } else {
        queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
    }

    // reject all paths that are not appropriate
    PathFilter pathFilter = new PathFilter() {
        @Override
        public boolean accept(Path path) {
            // this is the region name; it may get some noise data
            if (null == path) {
                return false;
            }

            // no parent?
            Path parent = path.getParent();
            if (null == parent) {
                return false;
            }

            String regionName = path.getName();
            if (null == regionName) {
                return false;
            }

            if (!regionName.toLowerCase().matches("[0-9a-f]+")) {
                return false;
            }
            return true;
        }
    };

    FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);

    if (null == statusList) {
        return;
    } else {
        LOG.debug("Query Path: " + queryPath + " ; # list of files: " + statusList.length);
    }

    // lower the number of threads in case we have very few expected regions
    threadPoolSize = Math.min(threadPoolSize, statusList.length);

    // run in multiple threads
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 60, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(statusList.length));
    try {
        // ignore all file status items that are not of interest
        for (FileStatus regionStatus : statusList) {
            if (null == regionStatus) {
                continue;
            }

            if (!regionStatus.isDirectory()) {
                continue;
            }

            Path regionPath = regionStatus.getPath();
            if (null == regionPath) {
                continue;
            }

            tpe.execute(new FSRegionScanner(fs, regionPath, regionToBestLocalityRSMapping,
                    regionDegreeLocalityMapping));
        }
    } finally {
        tpe.shutdown();
        int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 60 * 1000);
        try {
            // here we wait until TPE terminates, which is either naturally or by
            // exceptions in the execution of the threads
            while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) {
                // printing out rough estimate, so as to not introduce
                // AtomicInteger
                LOG.info("Locality checking is underway: { Scanned Regions : " + tpe.getCompletedTaskCount()
                        + "/" + tpe.getTaskCount() + " }");
            }
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }

    long overhead = EnvironmentEdgeManager.currentTimeMillis() - startTime;
    String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms";

    LOG.info(overheadMsg);
}