Example usage for java.util.concurrent ExecutorService isTerminated

List of usage examples for java.util.concurrent ExecutorService isTerminated

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService isTerminated.

Prototype

boolean isTerminated();

Source Link

Document

Returns true if all tasks have completed following shut down.

Usage

From source file:com.bittorrent.mpetazzoni.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(Torrent.PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / Torrent.PIECE_LENGTH)) });

        length += file.length();//w w  w .  jav  a2 s  .  co  m

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / Torrent.PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:ga.rugal.jpt.common.tracker.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        LOG.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(), threads,
                (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();//from w ww  .jav a  2  s . co m

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    LOG.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    LOG.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.p2p.peercds.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / PIECE_LENGTH)) });

        length += file.length();// w w w .j a v a2 s. com

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.oneops.boo.workflow.BuildAllPlatforms.java

/**
 * Right now support components with two layers config.
 *
 * @param platformName Platform name.//w  w w. j  av  a2s . com
 * @param componentName Component name.
 * @param attributes Component variables.
 * @throws OneOpsClientAPIException the one ops client API exception
 */
@SuppressWarnings({ "unchecked", "rawtypes" })
private void updateComponentVariables(String platformName, String componentName, Map<String, Object> attributes)
        throws OneOpsClientAPIException {
    // Create thread pool to add users parallel
    ExecutorService executor = Executors.newFixedThreadPool(numOfThreads);

    for (Map.Entry<String, Object> entry : attributes.entrySet()) {
        String key = entry.getKey();
        Object value = entry.getValue();
        // Another Map, so key is ciName
        if (value instanceof Map) {
            Map<String, String> attris = (Map<String, String>) value;
            if (attris.containsKey(Constants.AUTHO_KEYS)) {
                Runnable worker = new UpdateComponentTask(this, platformName, componentName, key, attris);
                executor.execute(worker);
            } else {
                this.updateComponentVariablesInternal(platformName, componentName, key, attris);
            }
        } else if (value instanceof String) {
            Map<String, String> att = (Map) attributes;
            if (att.containsKey(Constants.AUTHO_KEYS)) {
                Runnable worker = new UpdateComponentTask(this, platformName, componentName, key, att);
                executor.execute(worker);
            } else {
                this.updateComponentVariablesInternal(platformName, componentName, componentName, att);
            }
            break;
        }
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
        Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
    }
}

From source file:org.wso2.carbon.event.input.adapter.file.FileEventAdapter.java

public void processFiles() {
    try {//from  w  w  w  . j av a  2  s.  c om
        // collect file in the source directory
        File folder = new File(this.sourcePath);
        File[] listOfFiles = folder.listFiles();
        //String patternString = ".*\\.csv$";

        for (int i = 0; i < listOfFiles.length; i++) {

            boolean isMatch = Pattern.matches(filenameRegex, listOfFiles[i].getName());
            if (isMatch) {
                BufferedReader in = null;
                ExecutorService executor = null;
                try {
                    // initialize thread pool
                    executor = Executors.newFixedThreadPool(this.threads);

                    // loading file
                    in = new BufferedReader(new FileReader(listOfFiles[i].toPath().toString()));
                    String line = null;

                    // skip lines

                    int lineSkipped = 0;
                    while (lineSkipped < this.skipLine && (line = in.readLine()) != null) {
                        lineSkipped = lineSkipped + 1;
                    }

                    // process line by line
                    int lineCount = 0;
                    String jsonArray = "";
                    line = null;
                    while ((line = in.readLine()) != null) {

                        lineCount = lineCount + 1;
                        jsonArray = jsonArray + formatLineToWSO2JSONEvent(line) + ",";

                        if (lineCount % this.batchSize == 0) {
                            executor.execute(new eventProcessorThread(this.eventAdapterListener, this.tenantId,
                                    "[" + jsonArray + "]"));
                            jsonArray = "";
                        }
                    }
                    executor.execute(new eventProcessorThread(this.eventAdapterListener, this.tenantId,
                            "[" + jsonArray + "]"));

                    executor.shutdown();
                    // wait until all threads completes
                    while (!executor.isTerminated()) {
                    }

                } catch (Exception e) {
                    e.printStackTrace();
                } finally {
                    // release resources
                    executor = null;
                    in.close();
                    in = null;
                    //System.gc();
                    // move current file to archive location
                    Files.move(listOfFiles[i].toPath(),
                            new File(this.arcPath + "/" + listOfFiles[i].getName()).toPath(), REPLACE_EXISTING);
                }
            }
        }
    } catch (Exception ex) {
        ex.printStackTrace();
    }
}

From source file:com.jayway.maven.plugins.android.AbstractAndroidMojo.java

/**
 * Performs the callback action on the devices determined by
 * {@link #shouldDoWithThisDevice(com.android.ddmlib.IDevice)}
 *
 * @param deviceCallback the action to perform on each device
 * @throws org.apache.maven.plugin.MojoExecutionException
 *          in case there is a problem//from  w  ww .ja  v a 2s  . c om
 * @throws org.apache.maven.plugin.MojoFailureException
 *          in case there is a problem
 */
protected void doWithDevices(final DeviceCallback deviceCallback)
        throws MojoExecutionException, MojoFailureException {
    final AndroidDebugBridge androidDebugBridge = initAndroidDebugBridge();

    if (!androidDebugBridge.isConnected()) {
        throw new MojoExecutionException("Android Debug Bridge is not connected.");
    }

    waitForInitialDeviceList(androidDebugBridge);
    List<IDevice> devices = Arrays.asList(androidDebugBridge.getDevices());
    int numberOfDevices = devices.size();
    getLog().debug("Found " + numberOfDevices + " devices connected with the Android Debug Bridge");
    if (devices.size() == 0) {
        throw new MojoExecutionException("No online devices attached.");
    }

    int threadCount = getDeviceThreads();
    if (getDeviceThreads() == 0) {
        getLog().info("android.devicesThreads parameter not set, using a thread for each attached device");
        threadCount = numberOfDevices;
    } else {
        getLog().info("android.devicesThreads parameter set to " + getDeviceThreads());
    }

    boolean shouldRunOnAllDevices = getDevices().size() == 0;
    if (shouldRunOnAllDevices) {
        getLog().info("android.devices parameter not set, using all attached devices");
    } else {
        getLog().info("android.devices parameter set to " + getDevices().toString());
    }

    ArrayList<DoThread> doThreads = new ArrayList<DoThread>();
    ExecutorService executor = Executors.newFixedThreadPool(threadCount);
    for (final IDevice idevice : devices) {
        if (shouldRunOnAllDevices) {
            String deviceType = idevice.isEmulator() ? "Emulator " : "Device ";
            getLog().info(deviceType + DeviceHelper.getDescriptiveName(idevice) + " found.");
        }
        if (shouldRunOnAllDevices || shouldDoWithThisDevice(idevice)) {
            DoThread deviceDoThread = new DoThread() {
                public void runDo() throws MojoFailureException, MojoExecutionException {
                    deviceCallback.doWithDevice(idevice);
                }
            };
            doThreads.add(deviceDoThread);
            executor.execute(deviceDoThread);
        }
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
        // waiting for threads finish
    }
    throwAnyDoThreadErrors(doThreads);

    if (!shouldRunOnAllDevices && doThreads.isEmpty()) {
        throw new MojoExecutionException("No device found for android.device=" + getDevices().toString());
    }
}

From source file:org.gbif.portal.harvest.taxonomy.TaxonomyUtils.java

/**
 * A utility that will effectively ensure that the taxonomy from one data resource is represented fully in another.
 * For all concepts that exists in the source, the target is checked to see if there exists a concept representing the same
 * classification (note that the target may be a more complete classification that the source).  If the concept does not exist,
 * then the concept is created. /*from ww w  .  ja  v  a 2s. c o m*/
 * 
 * Typically this method would be used to build a NUB taxonomy.  Taxonomic data resources would be imported with allowCreateKingdoms first,
 * and then inferred taxonomies would be imported with unknownKingdoms collated.
 * 
 * This will import accepted concepts and then non accepted concepts in order of rank
 * 
 * @param sourceDataResourceId The resource holding the concepts that are to be imported into the target  
 * @param targetDataResourceId The target resource to ensure encapsualtes all concepts in the source
 * @param targetDataProviderId The data provider for the resource owning the taxonomy being built - this MUST own the targetDataResourceId
 * @param allowCreateUnknownKingdoms If this is set to false then the TaxonomyUtils.nameOfUnknownKingdom is used for any kingdom that 
 * @param majorRanksOnly If this is set to true, then only major ranks will be imported
 * @param unpartneredOnly If this is set to true, then only concepts with no partner concept id will be imported
 * is not represented in the target taxonomy.  If set to true, then the kingdoms are imported from the source.  
 * @throws InterruptedException 
 */
public void importTaxonomyFromDataResource(long sourceDataResourceId, long targetDataResourceId,
        long targetDataProviderId, boolean allowCreateUnknownKingdoms, boolean majorRanksOnly,
        boolean unpartneredOnly) throws InterruptedException {

    List<Integer> ranksToImport = null;
    if (unpartneredOnly) {
        ranksToImport = taxonConceptDAO.getUnpartneredRanksWithinResource(sourceDataResourceId);
    } else {
        ranksToImport = taxonConceptDAO.getRanksWithinResource(sourceDataResourceId);
    }
    logger.debug("There are " + ranksToImport.size() + " ranks to import from data resource["
            + sourceDataResourceId + "]: " + ranksToImport);

    ExecutorService es = Executors.newCachedThreadPool();
    for (int i = 0; i < ranksToImport.size(); i++) {
        int rank = (ranksToImport.get(i));
        es.execute(new Thread(new TaxonomyThread(relationshipAssertionDAO, taxonConceptDAO,
                targetDataResourceId, targetDataProviderId, allowCreateUnknownKingdoms, majorRanksOnly,
                unpartneredOnly, sourceDataResourceId, rank)));
    }
    es.shutdown();
    while (!es.isTerminated()) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
    System.out.println("Finalizados todos los hilos del recurso : " + sourceDataResourceId);
}

From source file:com.xn.interfacetest.service.impl.TestCaseServiceImpl.java

@Transactional(propagation = Propagation.NOT_SUPPORTED)
private void excute(final List<TestCaseDto> testCaseDtoList, final TestEnvironmentDto testEnvironmentDto,
        final Long planId, final TestReportDto testReportDto, final TestSuitDto suitDto) {
    logger.info("==================");
    ///*from  w  w w. j a  va2s . co  m*/
    ExecutorService threadPool = Executors.newFixedThreadPool(10);
    ;
    //??
    for (int i = 0; i < testCaseDtoList.size(); i++) {
        final int finalI = i;
        threadPool.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    logger.info("========" + finalI);
                    excuteCase(testCaseDtoList.get(finalI), testEnvironmentDto, planId, testReportDto, suitDto);
                } catch (Exception e) {
                    logger.error("", e);
                }

            }
        });
    }

    try {
        logger.info("sleep-----" + 1000);
        Thread.sleep(1000);
    } catch (InterruptedException e) {
        logger.info("InterruptedException-----" + e.getMessage());
    }

    threadPool.shutdown();
    while (true) {
        if (threadPool.isTerminated()) {
            break;
        }
    }
}

From source file:org.apache.sentry.tests.e2e.dbprovider.TestConcurrentClients.java

/**
 * Test when concurrent sentry clients talking to sentry server, threads data are synchronized
 * @throws Exception//from  w  w w  .ja v  a 2 s . com
 */
@Test
public void testConcurrentSentryClient() throws Exception {
    final String HIVE_KEYTAB_PATH = System.getProperty("sentry.e2etest.hive.policyOwnerKeytab");
    final SentryPolicyServiceClient client = getSentryClient("hive", HIVE_KEYTAB_PATH);
    ExecutorService executor = Executors.newFixedThreadPool(NUM_OF_THREADS);

    final TestRuntimeState state = new TestRuntimeState();
    for (int i = 0; i < NUM_OF_TASKS; i++) {
        LOGGER.info("Start to test sentry client with task id [" + i + "]");
        executor.execute(new Runnable() {
            @Override
            public void run() {
                if (state.failed) {
                    LOGGER.error("found one failed state, abort test from here.");
                    return;
                }
                try {
                    String randStr = randomString(5);
                    String test_role = "test_role_" + randStr;
                    LOGGER.info("Start to test role: " + test_role);
                    Long startTime = System.currentTimeMillis();
                    Long elapsedTime = 0L;
                    while (Long.compare(elapsedTime, SENTRY_CLIENT_TEST_DURATION_MS) <= 0) {
                        LOGGER.info("Test role " + test_role + " runs " + elapsedTime + " ms.");
                        client.createRole(ADMIN1, test_role);
                        client.listRoles(ADMIN1);
                        client.grantServerPrivilege(ADMIN1, test_role, "server1", false);
                        client.listAllPrivilegesByRoleName(ADMIN1, test_role);
                        client.dropRole(ADMIN1, test_role);
                        elapsedTime = System.currentTimeMillis() - startTime;
                    }
                    state.setNumSuccess();
                } catch (Exception e) {
                    LOGGER.error("Sentry Client Testing Exception: ", e);
                    state.setFirstException(e);
                }
            }
        });
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(1000); //millisecond
    }
    Throwable ex = state.getFirstException();
    assertFalse(ex == null ? "Test failed" : ex.toString(), state.failed);
    assertEquals(NUM_OF_TASKS, state.getNumSuccess());
}

From source file:org.apache.cassandra.service.StorageService.java

/** shuts node off to writes, empties memtables and the commit log. */
public synchronized void drain() throws IOException, InterruptedException, ExecutionException {
    ExecutorService mutationStage = StageManager.getStage(Stage.MUTATION);
    if (mutationStage.isTerminated()) {
        logger_.warn("Cannot drain node (did it already happen?)");
        return;/*from w  w w . j a va2  s.  c  om*/
    }
    setMode("Starting drain process", true);
    Gossiper.instance.stop();
    setMode("Draining: shutting down MessageService", false);
    MessagingService.instance().shutdown();
    setMode("Draining: emptying MessageService pools", false);
    MessagingService.instance().waitFor();

    setMode("Draining: clearing mutation stage", false);
    mutationStage.shutdown();
    mutationStage.awaitTermination(3600, TimeUnit.SECONDS);

    // lets flush.
    setMode("Draining: flushing column families", false);
    List<ColumnFamilyStore> cfses = new ArrayList<ColumnFamilyStore>();
    for (String tableName : DatabaseDescriptor.getNonSystemTables()) {
        Table table = Table.open(tableName);
        cfses.addAll(table.getColumnFamilyStores());
    }
    totalCFs = remainingCFs = cfses.size();
    for (ColumnFamilyStore cfs : cfses) {
        cfs.forceBlockingFlush();
        remainingCFs--;
    }

    ColumnFamilyStore.postFlushExecutor.shutdown();
    ColumnFamilyStore.postFlushExecutor.awaitTermination(60, TimeUnit.SECONDS);

    CommitLog.instance.shutdownBlocking();

    // want to make sure that any segments deleted as a result of flushing are gone.
    DeletionService.waitFor();

    setMode("Node is drained", true);
}