Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:com.thruzero.common.web.model.container.builder.xml.XmlPanelSetBuilder.java

protected PanelSet buildConcurrently() throws Exception {
    PanelSet result = new PanelSet(panelSetId);

    if (!panelNodes.isEmpty()) {
        // Build the panels in parallel (e.g., RSS Feed panels should be created in parallel).
        ExecutorService executorService = Executors.newFixedThreadPool(panelNodes.size());
        logHelper.logExecutorServiceCreated(panelSetId);

        final Map<String, AbstractPanel> panels = new HashMap<String, AbstractPanel>();
        for (final InfoNodeElement panelNode : panelNodes) {
            final AbstractXmlPanelBuilder panelBuilder = panelBuilderTypeRegistry
                    .createBuilder(panelNode.getName(), panelNode);
            final String panelKey = Integer.toHexString(panelNode.hashCode());

            if (panelBuilder == null) {
                panels.put(panelKey, new ErrorHtmlPanel("error", "Panel ERROR",
                        "PanelBuilder not found for panel type " + panelNode.getName()));
            } else {
                //logger.debug("  - prepare to build: " + panelNode.getName());
                executorService.execute(new Runnable() {
                    @Override//from  w ww. j  a  v a 2s  . com
                    public void run() {
                        try {
                            AbstractPanel panel = panelBuilder.build();
                            panels.put(panelKey, panel);
                        } catch (Exception e) {
                            panels.put(panelKey, panelBuilder.buildErrorPanel(panelBuilder.getPanelId(),
                                    "Panel ERROR",
                                    "PanelBuilder encountered an Exception: " + e.getClass().getSimpleName()));
                        }
                    }

                    @Override
                    public String toString() {
                        return panelBuilder.getPanelInfoForError();
                    }
                });
            }
        }

        // Wait for all panels to be built
        executorService.shutdown();
        logHelper.logExecutorServiceShutdown(panelSetId);
        try {
            executorService.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            // ignore (handled below)
            logHelper.logExecutorServiceInterrupted(panelSetId);
        }

        if (executorService.isTerminated()) {
            logHelper.logExecutorServiceIsTerminated(panelSetId);
        } else {
            logHelper.logExecutorServiceIsNotTerminated(executorService, executorService.shutdownNow(),
                    panelSetId);
        }

        // add panels in the same order as defined
        for (InfoNodeElement panelNode : panelNodes) {
            String panelKey = Integer.toHexString(panelNode.hashCode());
            AbstractPanel panel = panels.get(panelKey);
            if (panel == null) {
                // if it wasn't added to the panelNodes map, then there must have been a timeout error
                AbstractXmlPanelBuilder panelBuilder = panelBuilderTypeRegistry
                        .createBuilder(panelNode.getName(), panelNode);

                result.addPanel(panelBuilder.buildErrorPanel(panelKey, "Panel ERROR",
                        "PanelBuilder encountered a timeout error: " + panelNode.getName()));
            } else {
                result.addPanel(panel);
            }
        }
    }
    logHelper.logPanelSetCompleted(panelSetId);

    return result;
}

From source file:com.ifesdjeen.cascading.cassandra.hadoop.ColumnFamilyInputFormat.java

public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();

    validateConfiguration(conf);//  w  w  w.j a  v  a2 s .c om

    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);

    keyspace = ConfigHelper.getInputKeyspace(context.getConfiguration());
    cfName = ConfigHelper.getInputColumnFamily(context.getConfiguration());
    partitioner = ConfigHelper.getInputPartitioner(context.getConfiguration());
    logger.debug("partitioner is " + partitioner);

    // cannonical ranges, split into pieces, fetching the splits in parallel
    ExecutorService executor = Executors.newCachedThreadPool();
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {
        List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>();
        KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf);
        Range<Token> jobRange = null;
        if (jobKeyRange != null && jobKeyRange.start_token != null) {
            assert partitioner
                    .preservesOrder() : "ConfigHelper.setInputKeyRange(..) can only be used with a order preserving paritioner";
            assert jobKeyRange.start_key == null : "only start_token supported";
            assert jobKeyRange.end_key == null : "only end_token supported";
            jobRange = new Range<Token>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token),
                    partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner);
        }

        for (TokenRange range : masterRangeNodes) {
            if (jobRange == null) {
                // for each range, pick a live owner and ask it to compute bite-sized splits
                splitfutures.add(executor.submit(new SplitCallable(range, conf)));
            } else {
                Range<Token> dhtRange = new Range<Token>(
                        partitioner.getTokenFactory().fromString(range.start_token),
                        partitioner.getTokenFactory().fromString(range.end_token), partitioner);

                if (dhtRange.intersects(jobRange)) {
                    for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
                        range.start_token = partitioner.getTokenFactory().toString(intersection.left);
                        range.end_token = partitioner.getTokenFactory().toString(intersection.right);
                        // for each range, pick a live owner and ask it to compute bite-sized splits
                        splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                    }
                }
            }
        }

        // wait until we have all the results back
        for (Future<List<InputSplit>> futureInputSplits : splitfutures) {
            try {
                splits.addAll(futureInputSplits.get());
            } catch (Exception e) {
                throw new IOException("Could not get input splits", e);
            }
        }
    } finally {
        executor.shutdownNow();
    }

    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}

From source file:org.apache.sysml.runtime.instructions.cp.ParamservBuiltinCPInstruction.java

@Override
public void processInstruction(ExecutionContext ec) {
    Timing tSetup = DMLScript.STATISTICS ? new Timing(true) : null;

    PSModeType mode = getPSMode();//from  www . ja v  a  2  s  . com
    int workerNum = getWorkerNum(mode);
    BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("workers-pool-thread-%d")
            .build();
    ExecutorService es = Executors.newFixedThreadPool(workerNum, factory);
    String updFunc = getParam(PS_UPDATE_FUN);
    String aggFunc = getParam(PS_AGGREGATION_FUN);

    int k = getParLevel(workerNum);

    // Get the compiled execution context
    // Create workers' execution context
    LocalVariableMap newVarsMap = createVarsMap(ec);
    List<ExecutionContext> newECs = ParamservUtils.createExecutionContexts(ec, newVarsMap, updFunc, aggFunc,
            workerNum, k);

    // Create workers' execution context
    List<ExecutionContext> workerECs = newECs.subList(0, newECs.size() - 1);

    // Create the agg service's execution context
    ExecutionContext aggServiceEC = newECs.get(newECs.size() - 1);

    PSFrequency freq = getFrequency();
    PSUpdateType updateType = getUpdateType();
    int epochs = getEpochs();

    // Create the parameter server
    ListObject model = ec.getListObject(getParam(PS_MODEL));
    ParamServer ps = createPS(mode, aggFunc, updateType, workerNum, model, aggServiceEC);

    // Create the local workers
    MatrixObject valFeatures = ec.getMatrixObject(getParam(PS_VAL_FEATURES));
    MatrixObject valLabels = ec.getMatrixObject(getParam(PS_VAL_LABELS));
    List<LocalPSWorker> workers = IntStream.range(0, workerNum).mapToObj(i -> new LocalPSWorker(i, updFunc,
            freq, epochs, getBatchSize(), valFeatures, valLabels, workerECs.get(i), ps))
            .collect(Collectors.toList());

    // Do data partition
    PSScheme scheme = getScheme();
    doDataPartitioning(scheme, ec, workers);

    if (DMLScript.STATISTICS)
        Statistics.accPSSetupTime((long) tSetup.stop());

    if (LOG.isDebugEnabled()) {
        LOG.debug(String.format(
                "\nConfiguration of paramserv func: " + "\nmode: %s \nworkerNum: %d \nupdate frequency: %s "
                        + "\nstrategy: %s \ndata partitioner: %s",
                mode, workerNum, freq, updateType, scheme));
    }

    try {
        // Launch the worker threads and wait for completion
        for (Future<Void> ret : es.invokeAll(workers))
            ret.get(); //error handling
        // Fetch the final model from ps
        ListObject result = ps.getResult();
        ec.setVariable(output.getName(), result);
    } catch (InterruptedException | ExecutionException e) {
        throw new DMLRuntimeException("ParamservBuiltinCPInstruction: some error occurred: ", e);
    } finally {
        es.shutdownNow();
        // Should shutdown the thread pool in param server
        ps.shutdown();
    }
}

From source file:io.undertow.server.handlers.proxy.LoadBalancingProxyHTTP2TestCase.java

@Test
public void testHttp2ClientMultipleStreamsThreadSafety()
        throws IOException, URISyntaxException, ExecutionException, InterruptedException, TimeoutException {
    //not actually a proxy test
    //but convent to put it here
    UndertowXnioSsl ssl = new UndertowXnioSsl(DefaultServer.getWorker().getXnio(), OptionMap.EMPTY,
            DefaultServer.SSL_BUFFER_POOL, DefaultServer.createClientSslContext());
    final UndertowClient client = UndertowClient.getInstance();
    final ClientConnection connection = client.connect(
            new URI("https", null, DefaultServer.getHostAddress(), DefaultServer.getHostPort() + 1, "/", null,
                    null),//w w  w  .j a  v a  2 s .c om
            DefaultServer.getWorker(), ssl, DefaultServer.getBufferPool(),
            OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)).get();
    final ExecutorService service = Executors.newFixedThreadPool(10);
    try {
        Deque<FutureResult<String>> futures = new ArrayDeque<>();
        for (int i = 0; i < 100; ++i) {
            final FutureResult<String> future = new FutureResult<>();
            futures.add(future);
            service.submit(new Callable<String>() {

                @Override
                public String call() throws Exception {
                    ClientRequest cr = new ClientRequest().setMethod(Methods.GET).setPath("/path")
                            .setProtocol(Protocols.HTTP_1_1);
                    connection.sendRequest(cr, new ClientCallback<ClientExchange>() {
                        @Override
                        public void completed(ClientExchange result) {
                            result.setResponseListener(new ClientCallback<ClientExchange>() {
                                @Override
                                public void completed(ClientExchange result) {
                                    new StringReadChannelListener(DefaultServer.getBufferPool()) {
                                        @Override
                                        protected void stringDone(String string) {
                                            future.setResult(string);
                                        }

                                        @Override
                                        protected void error(IOException e) {
                                            future.setException(e);
                                        }
                                    }.setup(result.getResponseChannel());
                                }

                                @Override
                                public void failed(IOException e) {
                                    future.setException(e);
                                }
                            });
                        }

                        @Override
                        public void failed(IOException e) {
                            future.setException(e);
                        }
                    });
                    return null;
                }
            });
        }
        while (!futures.isEmpty()) {
            FutureResult<String> future = futures.poll();
            Assert.assertNotEquals(IoFuture.Status.WAITING,
                    future.getIoFuture().awaitInterruptibly(10, TimeUnit.SECONDS));
            Assert.assertEquals("/path", future.getIoFuture().get());
        }
    } finally {
        service.shutdownNow();
    }
}

From source file:com.topsec.tsm.sim.report.model.ReportModel.java

/**
 *  1.mail 2.??//w ww .  j a v a  2s.c  om
 * 
 * @param RptMasterTbService
 *            rptMasterTbImp DAO
 * @param ExpStruct
 *            exp 
 * @param HttpServletRequest
 *            request HttpServletRequest
 * @return LinkedHashMap<String, List> exp?
 * @throws Exception
 *             2.
 */
public static LinkedHashMap<String, List> expMstReport(RptMasterTbService rptMasterTbImp, ExpStruct exp,
        HttpServletRequest request) throws Exception {
    String mstRptId = exp.getMstrptid();// ID
    Integer mstRptIdInt = 0;
    if (!GlobalUtil.isNullOrEmpty(mstRptId)) {
        mstRptIdInt = Integer.valueOf(mstRptId);
    }
    String mstSql = ReportUiConfig.MstSubSql;
    Object[] subParam = { mstRptIdInt };
    //      List<Map<String,Object>> subResult = rptMasterTbImp.queryTmpList(mstSql, subParam);

    List<Map<String, Object>> subResult = new ArrayList<Map<String, Object>>();
    Map<Integer, Integer> rowColumns = new HashMap<Integer, Integer>();

    List<Map<String, Object>> subResultTemp = rptMasterTbImp.queryTmpList(mstSql, subParam);
    if (subResultTemp.size() > 0) {
        Map subMap = subResultTemp.get(0);
        String viewItem = StringUtil.toString(subMap.get("viewItem"), "");
        if (viewItem.indexOf("2") < 0) {
            exp.setRptType(ReportUiConfig.rptDirection);
            String[] time = ReportUiUtil.getExpTime("month");
            exp.setRptTimeS(time[0]);
            exp.setRptTimeE(time[1]);
        }
    }
    int evtRptsize = subResultTemp.size();
    if (!GlobalUtil.isNullOrEmpty(subResultTemp)) {
        subResult.addAll(subResultTemp);
    }
    ReportBean bean = new ReportBean();
    if (!GlobalUtil.isNullOrEmpty(request)) {
        bean = ReportUiUtil.tidyFormBean(bean, request);
    }
    String nodeType = bean.getNodeType();
    String dvcaddress = bean.getDvcaddress();
    DataSourceService dataSourceService = (DataSourceService) SpringContextServlet.springCtx
            .getBean("dataSourceService");
    if (!GlobalUtil.isNullOrEmpty(bean.getDvctype()) && bean.getDvctype().startsWith("Profession/Group")
            && !GlobalUtil.isNullOrEmpty(nodeType) && !GlobalUtil.isNullOrEmpty(dvcaddress)) {
        Map map = TopoUtil.getAssetEvtMstMap();
        String mstIds = null;
        List<SimDatasource> simDatasources = dataSourceService.getByIp(dvcaddress);
        if (!GlobalUtil.isNullOrEmpty(simDatasources)) {
            mstIds = "";
            for (SimDatasource simDatasource : simDatasources) {
                if (map.containsKey(simDatasource.getSecurityObjectType())) {
                    mstIds += map.get(simDatasource.getSecurityObjectType()).toString() + ":::";
                } else {
                    String keyString = getStartStringKey(map, simDatasource.getSecurityObjectType());
                    if (!GlobalUtil.isNullOrEmpty(keyString)) {
                        mstIds += map.get(keyString).toString() + ":::";
                    }
                }
            }
            if (mstIds.length() > 3) {
                mstIds = mstIds.substring(0, mstIds.length() - 3);
            }
        } else {
            if (map.containsKey(nodeType)) {
                mstIds = map.get(nodeType).toString();
            } else {
                String keyString = getStartStringKey(map, nodeType);
                if (!GlobalUtil.isNullOrEmpty(keyString)) {
                    mstIds = map.get(keyString).toString();
                }
            }
        }
        if (!GlobalUtil.isNullOrEmpty(mstIds)) {
            String[] mstIdArr = mstIds.split(":::");
            for (String string : mstIdArr) {
                List<Map<String, Object>> subTemp = rptMasterTbImp.queryTmpList(mstSql,
                        new Object[] { StringUtil.toInt(string, 5) });
                if (!GlobalUtil.isNullOrEmpty(subTemp)) {
                    int maxCol = 0;
                    if (!GlobalUtil.isNullOrEmpty(rowColumns)) {
                        maxCol = getMaxOrMinKey(rowColumns, 1);
                    }
                    for (Map map2 : subTemp) {
                        Integer row = (Integer) map2.get("subRow") + maxCol;
                        map2.put("subRow", row);
                    }
                    subResult.addAll(subTemp);
                }
            }
        }
    }

    if (!GlobalUtil.isNullOrEmpty(bean.getDvctype()) && bean.getDvctype().startsWith("Comprehensive")) {
        List<String> dvcTypes = dvcTypes = new ArrayList<String>();
        dvcTypes.add(bean.getDvctype().replace("Comprehensive", ""));

        List<String> mstrptidAndNodeTypeList = new ArrayList<String>();
        setMstIdAndScanNodeType(dvcTypes, mstrptidAndNodeTypeList);
        subResultTemp = null;
        if (!GlobalUtil.isNullOrEmpty(mstrptidAndNodeTypeList)) {
            subResultTemp = rptMasterTbImp.queryTmpList(ReportUiConfig.MstSubSql,
                    new Object[] { StringUtil.toInt((mstrptidAndNodeTypeList.get(0).split("IDandNODEtype"))[0],
                            StringUtil.toInt(bean.getTalTop(), 5)) });
            Map<Integer, Integer> rowColumnsTeMap = ReportModel.getRowColumns(subResultTemp);
            evtRptsize = subResultTemp.size();
            if (!GlobalUtil.isNullOrEmpty(subResultTemp)) {
                for (Map map2 : subResultTemp) {
                    map2.put("subject", (mstrptidAndNodeTypeList.get(0).split("IDandNODEtype"))[1]);
                }
                subResult.addAll(subResultTemp);
                rowColumns.putAll(rowColumnsTeMap);
            }

            int len = mstrptidAndNodeTypeList.size();
            for (int i = 1; i < len; i++) {
                String mstrptidAndNodeType = mstrptidAndNodeTypeList.get(i);
                String string = mstrptidAndNodeType.split("IDandNODEtype")[0];
                List<Map<String, Object>> subTemp = rptMasterTbImp.queryTmpList(ReportUiConfig.MstSubSql,
                        new Object[] { StringUtil.toInt(string, StringUtil.toInt(bean.getTalTop(), 5)) });
                if (!GlobalUtil.isNullOrEmpty(subTemp)) {
                    int maxCol = 0;
                    if (!GlobalUtil.isNullOrEmpty(rowColumns)) {
                        maxCol = getMaxOrMinKey(rowColumns, 1);
                    }
                    for (Map map2 : subTemp) {
                        Integer row = (Integer) map2.get("subRow") + maxCol;
                        map2.put("subRow", row);
                        map2.put("subject", mstrptidAndNodeType.split("IDandNODEtype")[1]);
                    }
                    subResult.addAll(subTemp);
                    Map<Integer, Integer> rowColTemp = ReportModel.getRowColumns(subTemp);
                    rowColumns.putAll(rowColTemp);
                }
            }
        }
    }

    List<ExpDateStruct> expList = new ArrayList<ExpDateStruct>(); // ?
    Map<ReportExecutor.SubjectKey, Map<Integer, ExpDateStruct>> exportMap = Collections
            .synchronizedMap(new LinkedHashMap());
    ExecutorService threadPool = Executors.newFixedThreadPool(subResult.size(),
            new TsmThreadFactory("ReportSubjectExport"));
    LinkedHashMap<String, List> expMap = null;
    try {
        List<ReportExecutor> tasks = new ArrayList<ReportExecutor>(subResult.size());
        int order = 0;
        for (Map sub : subResult) {
            order += 100;
            tasks.add(new ReportExecutor(order, rptMasterTbImp, exp, exportMap, expList, sub, request,
                    SID.currentUser()));
        }
        threadPool.invokeAll(tasks);
        expMap = new LinkedHashMap<String, List>(exportMap.size());
        for (Map.Entry<ReportExecutor.SubjectKey, Map<Integer, ExpDateStruct>> entry : exportMap.entrySet()) {
            expMap.put(entry.getKey().subject, new ArrayList(entry.getValue().values()));
        }
    } finally {
        threadPool.shutdownNow();
    }
    return expMap;
}

From source file:com.splout.db.integration.TestMultiThreadedQueryAndDeploy.java

@Test
@Ignore // Causes some non-deterministic problems, to be analyzed
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }/*w w w  .j  a  v a 2s .  c o  m*/

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final AtomicInteger iteration = new AtomicInteger(0);
    final Set<Integer> iterationsSeen = new HashSet<Integer>();

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // These threads will continuously perform queries and check that the results is consistent.
        // They will also count how many deploys have happened since the beginning.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, (randomDNode * 10) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            Integer seenIteration = (Integer) jsonResult.get("iteration");
                            synchronized (iterationsSeen) {
                                iterationsSeen.add(seenIteration);
                            }
                            assertTrue(seenIteration <= iteration.get());
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        final SploutConfiguration config = SploutConfiguration.getTestConfig();
        final int iterationsToPerform = config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE) + 5;
        for (int i = 0; i < iterationsToPerform; i++) {
            iteration.incrementAndGet();
            log.info("Deploy iteration: " + iteration.get());
            deployIteration(iteration.get(), random, client, testTablespace);

            new TestUtils.NotWaitingForeverCondition() {
                @Override
                public boolean endCondition() {
                    synchronized (iterationsSeen) {
                        return iterationsSeen.size() == (iteration.get() + 1);
                    }
                }
            }.waitAtMost(5000);
        }

        assertEquals(false, failed.get());

        service.shutdownNow(); // will interrupt all threads
        while (!service.isTerminated()) {
            Thread.sleep(100);
        }

        CoordinationStructures coord = TestUtils.getCoordinationStructures(config);
        assertNotNull(coord.getCopyVersionsBeingServed().get(TABLESPACE));

        // Assert that there is only MAX_VERSIONS versions of the tablespace (due to old version cleanup)
        new TestUtils.NotWaitingForeverCondition() {

            @Override
            public boolean endCondition() {
                QNodeHandler handler = (QNodeHandler) qNodes.get(0).getHandler();
                int seenVersions = 0;
                for (Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion : handler.getContext()
                        .getTablespaceVersionsMap().entrySet()) {
                    if (tablespaceVersion.getKey().getTablespace().equals(TABLESPACE)) {
                        seenVersions++;
                    }
                }
                return seenVersions <= config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE);
            }
        }.waitAtMost(5000);
    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:org.cloudifysource.esc.driver.provisioning.BaseProvisioningDriver.java

/*********
 * .//from  w  ww .j a va  2s . com
 * @param endTime .
 * @param numberOfManagementMachines .
 * @return .
 * @throws TimeoutException .
 * @throws CloudProvisioningException .
 */
protected MachineDetails[] doStartManagementMachines(final long endTime, final int numberOfManagementMachines)
        throws TimeoutException, CloudProvisioningException {
    final ExecutorService executors = Executors.newFixedThreadPool(numberOfManagementMachines);

    @SuppressWarnings("unchecked")
    final Future<MachineDetails>[] futures = (Future<MachineDetails>[]) new Future<?>[numberOfManagementMachines];

    final ComputeTemplate managementTemplate = this.cloud.getCloudCompute().getTemplates()
            .get(this.cloud.getConfiguration().getManagementMachineTemplate());
    try {
        // Call startMachine asynchronously once for each management machine
        for (int i = 0; i < numberOfManagementMachines; i++) {
            final int index = i + 1;
            futures[i] = executors.submit(new Callable<MachineDetails>() {

                @Override
                public MachineDetails call() throws Exception {
                    return createServer(serverNamePrefix + index, endTime, managementTemplate);
                }
            });

        }

        // Wait for each of the async calls to terminate.
        int numberOfErrors = 0;
        Exception firstCreationException = null;
        final MachineDetails[] createdManagementMachines = new MachineDetails[numberOfManagementMachines];
        for (int i = 0; i < createdManagementMachines.length; i++) {
            try {
                createdManagementMachines[i] = futures[i].get(endTime - System.currentTimeMillis(),
                        TimeUnit.MILLISECONDS);
            } catch (final InterruptedException e) {
                ++numberOfErrors;
                publishEvent("failed_to_create_management_vm", e.getMessage());
                logger.log(Level.SEVERE, "Failed to start a management machine", e);
                if (firstCreationException == null) {
                    firstCreationException = e;
                }

            } catch (final ExecutionException e) {
                ++numberOfErrors;
                publishEvent("failed_to_create_management_vm", e.getMessage());
                logger.log(Level.SEVERE, "Failed to start a management machine", e);
                if (firstCreationException == null) {
                    firstCreationException = e;
                }
            }
        }

        // In case of a partial error, shutdown all servers that did start up
        if (numberOfErrors > 0) {
            handleProvisioningFailure(numberOfManagementMachines, numberOfErrors, firstCreationException,
                    createdManagementMachines);
        }

        return createdManagementMachines;
    } finally {
        if (executors != null) {
            executors.shutdownNow();
        }
    }
}

From source file:org.springframework.amqp.rabbit.connection.CachingConnectionFactoryTests.java

public void testConsumerChannelPhysicallyClosedWhenNotIsOpenGuts(boolean confirms) throws Exception {
    ExecutorService executor = Executors.newSingleThreadExecutor();
    try {//from   www.  ja v a  2s .  com
        com.rabbitmq.client.ConnectionFactory mockConnectionFactory = mock(
                com.rabbitmq.client.ConnectionFactory.class);
        com.rabbitmq.client.Connection mockConnection = mock(com.rabbitmq.client.Connection.class);
        Channel mockChannel = mock(Channel.class);

        when(mockConnectionFactory.newConnection(any(ExecutorService.class), anyString()))
                .thenReturn(mockConnection);
        when(mockConnection.createChannel()).thenReturn(mockChannel);
        when(mockChannel.isOpen()).thenReturn(true);
        when(mockConnection.isOpen()).thenReturn(true);

        CachingConnectionFactory ccf = new CachingConnectionFactory(mockConnectionFactory);
        ccf.setExecutor(executor);
        ccf.setPublisherConfirms(confirms);
        Connection con = ccf.createConnection();

        Channel channel = con.createChannel(false);
        RabbitUtils.setPhysicalCloseRequired(channel, true);
        when(mockChannel.isOpen()).thenReturn(false);
        final CountDownLatch physicalCloseLatch = new CountDownLatch(1);
        doAnswer(i -> {
            physicalCloseLatch.countDown();
            return null;
        }).when(mockChannel).close();
        channel.close();
        con.close(); // should be ignored

        assertTrue(physicalCloseLatch.await(10, TimeUnit.SECONDS));
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.cassandra.hadoop2.AbstractColumnFamilyInputFormat.java

public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();

    validateConfiguration(conf);/*  www  .j  av a2s .c o  m*/

    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);

    keyspace = ConfigHelper.getInputKeyspace(context.getConfiguration());
    cfName = ConfigHelper.getInputColumnFamily(context.getConfiguration());
    partitioner = ConfigHelper.getInputPartitioner(context.getConfiguration());
    logger.debug("partitioner is " + partitioner);

    // cannonical ranges, split into pieces, fetching the splits in parallel
    ExecutorService executor = Executors.newCachedThreadPool();
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {
        List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>();
        KeyRange jobKeyRange = ConfigHelper.getInputKeyRange(conf);
        Range<Token> jobRange = null;
        if (jobKeyRange != null) {
            if (jobKeyRange.start_key == null) {
                logger.warn("ignoring jobKeyRange specified without start_key");
            } else {
                if (!partitioner.preservesOrder())
                    throw new UnsupportedOperationException(
                            "KeyRange based on keys can only be used with a order preserving paritioner");
                if (jobKeyRange.start_token != null)
                    throw new IllegalArgumentException("only start_key supported");
                if (jobKeyRange.end_token != null)
                    throw new IllegalArgumentException("only start_key supported");
                jobRange = new Range<Token>(partitioner.getToken(jobKeyRange.start_key),
                        partitioner.getToken(jobKeyRange.end_key), partitioner);
            }
        }

        for (TokenRange range : masterRangeNodes) {
            if (jobRange == null) {
                // for each range, pick a live owner and ask it to compute bite-sized splits
                splitfutures.add(executor.submit(new SplitCallable(range, conf)));
            } else {
                Range<Token> dhtRange = new Range<Token>(
                        partitioner.getTokenFactory().fromString(range.start_token),
                        partitioner.getTokenFactory().fromString(range.end_token), partitioner);

                if (dhtRange.intersects(jobRange)) {
                    for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
                        range.start_token = partitioner.getTokenFactory().toString(intersection.left);
                        range.end_token = partitioner.getTokenFactory().toString(intersection.right);
                        // for each range, pick a live owner and ask it to compute bite-sized splits
                        splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                    }
                }
            }
        }

        // wait until we have all the results back
        for (Future<List<InputSplit>> futureInputSplits : splitfutures) {
            try {
                splits.addAll(futureInputSplits.get());
            } catch (Exception e) {
                throw new IOException("Could not get input splits", e);
            }
        }
    } finally {
        executor.shutdownNow();
    }

    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}

From source file:gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java

private int getPagesSize(final String startDate, final String endDate, final String country,
        final List<Dimension> requestedDimensions, final List<ApiDimensionFilter> apiDimensionFilters)
        throws IOException {
    final ExecutorService es = Executors.newCachedThreadPool(ExecutorsUtils
            .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName())));

    int startRow = 0;
    long groupSize = Math.max(1, Math.round(API_REQUESTS_PER_SECOND));
    List<Future<Integer>> results = new ArrayList<>((int) groupSize);

    while (true) {
        for (int i = 0; i < groupSize; ++i) {
            startRow += GoogleWebmasterClient.API_ROW_LIMIT;
            final int start = startRow;
            final String interruptedMsg = String.format(
                    "Interrupted while trying to get the size of all pages for %s. Current start row is %d.",
                    country, start);/*from  w  ww.  j a va2s.  co  m*/

            Future<Integer> submit = es.submit(new Callable<Integer>() {
                @Override
                public Integer call() {
                    log.info(String.format("Getting page size from %s...", start));
                    while (true) {
                        try {
                            LIMITER.acquirePermits(1);
                        } catch (InterruptedException e) {
                            log.error("RateBasedLimiter: " + interruptedMsg, e);
                            return -1;
                        }

                        if (Thread.interrupted()) {
                            log.error(interruptedMsg);
                            return -1;
                        }

                        try {
                            List<String> pages = _client.getPages(_siteProperty, startDate, endDate, country,
                                    GoogleWebmasterClient.API_ROW_LIMIT, requestedDimensions,
                                    apiDimensionFilters, start);
                            if (pages.size() < GoogleWebmasterClient.API_ROW_LIMIT) {
                                return pages.size() + start; //Figured out the size
                            } else {
                                return -1;
                            }
                        } catch (IOException e) {
                            log.info(String.format("Getting page size from %s failed. Retrying...", start));
                        }
                    }
                }
            });
            results.add(submit);
        }
        //Check the results group in order. The first non-negative count indicates the size of total pages.
        for (Future<Integer> result : results) {
            try {
                Integer integer = result.get(GET_PAGE_SIZE_TIME_OUT, TimeUnit.MINUTES);
                if (integer >= 0) {
                    es.shutdownNow();
                    return integer;
                }
            } catch (InterruptedException | ExecutionException e) {
                throw new RuntimeException(e);
            } catch (TimeoutException e) {
                throw new RuntimeException(String.format(
                        "Exceeding the timeout of %d minutes while getting the total size of all pages.",
                        GET_PAGE_SIZE_TIME_OUT), e);
            }
        }
        results.clear();
    }
}