Example usage for java.util.concurrent Executors newSingleThreadExecutor

List of usage examples for java.util.concurrent Executors newSingleThreadExecutor

Introduction

In this page you can find the example usage for java.util.concurrent Executors newSingleThreadExecutor.

Prototype

public static ExecutorService newSingleThreadExecutor() 

Source Link

Document

Creates an Executor that uses a single worker thread operating off an unbounded queue.

Usage

From source file:info.archinnov.achilles.embedded.ServerStarter.java

private void start(final TypedMap parameters) {
    if (isAlreadyRunning()) {
        log.debug("Cassandra is already running, not starting new one");
        return;//from  w  w w.  j a va  2s. c o  m
    }

    final String triggersDir = createTriggersFolder();

    log.info(" Random embedded Cassandra RPC port/Thrift port = {}",
            parameters.<Integer>getTyped(CASSANDRA_THRIFT_PORT));
    log.info(" Random embedded Cassandra Native port/CQL port = {}",
            parameters.<Integer>getTyped(CASSANDRA_CQL_PORT));
    log.info(" Random embedded Cassandra Storage port = {}",
            parameters.<Integer>getTyped(CASSANDRA_STORAGE_PORT));
    log.info(" Random embedded Cassandra Storage SSL port = {}",
            parameters.<Integer>getTyped(CASSANDRA_STORAGE_SSL_PORT));
    log.info(" Random embedded Cassandra Remote JMX port = {}",
            System.getProperty("com.sun.management.jmxremote.port", "null"));
    log.info(" Embedded Cassandra triggers directory = {}", triggersDir);

    log.info("Starting Cassandra...");

    System.setProperty("cassandra.triggers_dir", triggersDir);
    System.setProperty("cassandra-foreground", "true");
    System.setProperty("cassandra.embedded.concurrent.reads",
            parameters.getTypedOr(CASSANDRA_CONCURRENT_READS, 32).toString());
    System.setProperty("cassandra.embedded.concurrent.writes",
            parameters.getTypedOr(CASSANDRA_CONCURRENT_WRITES, 32).toString());
    System.setProperty("cassandra-foreground", "true");

    final boolean useUnsafeCassandra = parameters.getTyped(USE_UNSAFE_CASSANDRA_DAEMON);

    if (useUnsafeCassandra) {
        System.setProperty("cassandra-num-tokens", "1");
    }

    System.setProperty("cassandra.config.loader", "info.archinnov.achilles.embedded.AchillesCassandraConfig");

    final CountDownLatch startupLatch = new CountDownLatch(1);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<CassandraDaemon> daemonRef = new AtomicReference<>();
    executor.execute(() -> {
        if (useUnsafeCassandra) {
            LOGGER.warn(
                    "******* WARNING, starting unsafe embedded Cassandra deamon. This should be only used for unit testing or development and not for production !");
        }

        CassandraDaemon cassandraDaemon = useUnsafeCassandra == true ? new AchillesCassandraDaemon()
                : new CassandraDaemon();

        cassandraDaemon.completeSetup();
        cassandraDaemon.activate();
        daemonRef.getAndSet(cassandraDaemon);
        startupLatch.countDown();
    });

    try {
        startupLatch.await(30, SECONDS);
    } catch (InterruptedException e) {
        log.error("Timeout starting Cassandra embedded", e);
        throw new IllegalStateException("Timeout starting Cassandra embedded", e);
    }

    // Generate an OrderedShutdownHook to shutdown all connections from java clients before closing the server
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            log.info("Calling stop on Embedded Cassandra server");
            daemonRef.get().stop();

            log.info("Calling shutdown on all Cluster instances");
            // First call shutdown on all registered Java driver Cluster instances
            orderedShutdownHook.callShutDown();

            log.info("Shutting down embedded Cassandra server");
            // Then shutdown the server
            executor.shutdownNow();
        }
    });
}

From source file:com.sdl.dxa.modules.degrees51.api.Degrees51DataProvider.java

@SneakyThrows(IOException.class)
private boolean updateFile(final String licenseKey, final String fileName, RequestPending requestPending) {
    if (!isUpdateNeeded(fileName)) {
        log.info("51degrees data file {} is up-to-date, update is not needed", fileName);
        return true;
    }//from  w ww  .j  a v a  2  s  .c o m

    boolean fileExists = new File(fileName).exists();

    if (isPaused(fileName)) {
        return fileExists;
    }

    if (!fileExists && requestPending == RequestPending.PENDING_REQUEST) {
        log.info("File {} needs an update but we have a pending request. "
                + "So we fallback to the next option (lite or default), set this file on pause, and update file in background",
                fileName);
        memorize(fileDelaysByNames, fileName, now().plusMinutes(fileUpdateReattemptDelayMinutes));
        Executors.newSingleThreadExecutor().execute(new Runnable() {
            @Override
            public void run() {
                if (licenseKey == null) {
                    updateLiteFileInternal();
                } else {
                    updateDataFileInternal(licenseKey, fileName);
                }
            }
        });
        return false;
    }

    log.info("File {} needs an update", fileName);

    return licenseKey == null ? updateLiteFileInternal() : updateDataFileInternal(licenseKey, fileName);
}

From source file:com.spotify.reaper.unit.service.SegmentRunnerTest.java

@Test
public void successTest() throws InterruptedException, ReaperException, ExecutionException {
    final IStorage storage = new MemoryStorage();
    RepairUnit cf = storage//from www.  ja  v  a  2  s .  com
            .addRepairUnit(new RepairUnit.Builder("reaper", "reaper", Sets.newHashSet("reaper")));
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder("reaper", cf.getId(), DateTime.now(), 0.5, 1, RepairParallelism.PARALLEL));
    storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf.getId())),
            run.getId());
    final long segmentId = storage.getNextFreeSegment(run.getId()).get().getId();

    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final MutableObject<Future<?>> future = new MutableObject<>();

    AppContext context = new AppContext();
    context.storage = storage;
    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host) {
            JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn("reaper");
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            storage.getRepairSegment(segmentId).get().getState());
                                    future.setValue(executor.submit(new Runnable() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED,
                                                    "Repair command 1 has started");
                                            assertEquals(RepairSegment.State.RUNNING,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            // report about an unrelated repair. Shouldn't affect anything.
                                            handler.get().handle(2, ActiveRepairService.Status.SESSION_FAILED,
                                                    "Repair command 2 has failed");
                                            handler.get().handle(1, ActiveRepairService.Status.SESSION_SUCCESS,
                                                    "Repair session succeeded in command 1");
                                            assertEquals(RepairSegment.State.DONE,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            handler.get().handle(1, ActiveRepairService.Status.FINISHED,
                                                    "Repair command 1 has finished");
                                            assertEquals(RepairSegment.State.DONE,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                        }
                                    }));
                                    return 1;
                                }
                            });

            return jmx;
        }
    };
    RepairRunner rr = mock(RepairRunner.class);
    RepairUnit ru = mock(RepairUnit.class);
    SegmentRunner sr = new SegmentRunner(context, segmentId, Collections.singleton(""), 1000, 0.5,
            RepairParallelism.PARALLEL, "reaper", ru, rr);
    sr.run();

    future.getValue().get();
    executor.shutdown();

    assertEquals(RepairSegment.State.DONE, storage.getRepairSegment(segmentId).get().getState());
    assertEquals(0, storage.getRepairSegment(segmentId).get().getFailCount());
}

From source file:com.isoftstone.crawl.template.crawlstate.CrawlState.java

/**
 * ?./* w w  w  .  ja v  a 2s. co m*/
 *
 * @param dispatchName
 */
public String crawlIncrement(String folderName, boolean isDeploy) {
    String rootFolder = Config.getValue(WebtoolConstants.FOLDER_NAME_ROOT);
    String shDir;
    String crawlDir = Config.getValue(WebtoolConstants.KEY_NUTCH_CRAWLDIR);
    String solrURL = Config.getValue(WebtoolConstants.KEY_NUTCH_SOLR_URL);
    String depth = "2";
    String dispatchName = folderName + WebtoolConstants.DISPATCH_REIDIS_POSTFIX_INCREMENT;
    DispatchVo dispatchVo = RedisOperator.getDispatchResult(dispatchName, Constants.DISPATCH_REDIS_DBINDEX);
    boolean userProxy = dispatchVo.isUserProxy();

    //--shDir.
    if (isDeploy) {
        shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_DEPLOY_INCREMENT_SHDIR);
        if (userProxy) {
            shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_DEPLOY_INCREMENT_PROXY_SHDIR);
        }
    } else {
        shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_LOCAL_INCREMENT_SHDIR);
        if (userProxy) {
            shDir = Config.getValue(WebtoolConstants.KEY_NUTCH_LOCAL_INCREMENT_PROXY_SHDIR);
        }
    }

    String folderNameSeed = dispatchName.substring(0, dispatchName.lastIndexOf("_"));
    String folderNameData = folderNameSeed.substring(0, folderNameSeed.lastIndexOf("_"));
    String[] folderNameStrs = folderNameSeed.split("_");
    folderNameSeed = folderNameStrs[0] + "_" + folderNameStrs[1] + "_"
            + WebtoolConstants.INCREMENT_FILENAME_SIGN + "_" + folderNameStrs[2];
    folderNameData = folderNameData.substring(0, folderNameData.lastIndexOf("_")) + "_"
            + WebtoolConstants.INCREMENT_FILENAME_SIGN;
    String seedFolder = rootFolder + File.separator + folderNameSeed;
    String command = shDir + " " + seedFolder + " " + crawlDir + folderNameData + "_data" + " " + solrURL + " "
            + depth;
    final RunManager runManager = getRunmanager(command);
    LOG.info("??:" + command);
    CrawlToolResource.putSeedsFolder(folderNameSeed, "local");

    String resultMsg = "";
    ExecutorService es = Executors.newSingleThreadExecutor();
    Future<String> result = es.submit(new Callable<String>() {
        public String call() throws Exception {
            // the other thread
            return ShellUtils.execCmd(runManager);
        }
    });
    try {
        resultMsg = result.get();
    } catch (Exception e) {
        LOG.info("", e);
        // failed
    }
    //        new Thread(new Runnable() {
    //
    //            @Override
    //            public void run() {
    //                ShellUtils.execCmd(runManager);
    //            }
    //        }).start();
    return resultMsg;
}

From source file:com.yahoo.gondola.container.ZookeeperRegistryClientTest.java

@Test
public void testWaitForClusterComplete() throws Exception {
    // 0. A three nodes shard, two server joins
    ExecutorService executorService = Executors.newSingleThreadExecutor();
    Future<Boolean> result;
    registryClient.register(SITE_1_HOST_3_CLUSTERS, new InetSocketAddress(1234), URI.create("http://foo.com"));
    registryClient.register(SITE_1_HOST_2_CLUSTERS, new InetSocketAddress(1235), URI.create("http://foo.com"));

    // 1. The waitForClusterComplete call should block for 1 second
    Callable<Boolean> awaitCall = () -> registryClient.waitForClusterComplete(1000);

    result = executorService.submit(awaitCall);
    assertEquals(result.get(), Boolean.FALSE);

    // 2. The request should be block after the next node joins
    result = executorService.submit(awaitCall);
    registryClient.register(SITE_1_HOST_1_CLUSTER, new InetSocketAddress(1236), URI.create("http://foo.com"));
    assertEquals(result.get(), Boolean.TRUE);

    // 3. The request should success immediately, since all nodes are in the shards.
    result = executorService.submit(awaitCall);
    assertEquals(result.get(), Boolean.TRUE);
}

From source file:hivemall.mix.server.MixServerTest.java

@Test
public void test2ClientsZeroOneSparseModel() throws InterruptedException {
    final int port = NetUtils.getAvailablePort();
    CommandLine cl = CommandLineUtils.parseOptions(
            new String[] { "-port", Integer.toString(port), "-sync_threshold", "30" }, MixServer.getOptions());
    MixServer server = new MixServer(cl);
    ExecutorService serverExec = Executors.newSingleThreadExecutor();
    serverExec.submit(server);/* w  ww .  j a va2s . co m*/

    waitForState(server, ServerState.RUNNING);

    final ExecutorService clientsExec = Executors.newCachedThreadPool();
    for (int i = 0; i < 2; i++) {
        clientsExec.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    invokeClient01("test2ClientsZeroOne", port, false, false);
                } catch (InterruptedException e) {
                    Assert.fail(e.getMessage());
                }
            }
        });
    }
    clientsExec.awaitTermination(30, TimeUnit.SECONDS);
    clientsExec.shutdown();
    serverExec.shutdown();
}

From source file:com.echopf.members.ECHOMemberQuery.java

/**
 * Does Login//from  w w  w  .  j  a  v a 2 s . c om
 * @param sync if set TRUE, then the main (UI) thread is waited for complete the logging-in in a background thread. 
 *              (a synchronous communication)
 * @param callback invoked after the logging-in is completed
 * @param instanceId the reference ID of the instance to which the logged-in member belong
 * @param login_id
 * @param password
 * @throws ECHOException
 */
protected static ECHOMemberObject doLogin(final boolean sync, final LoginCallback callback,
        final String instanceId, final String login_id, final String password) throws ECHOException {
    final Handler handler = new Handler();

    // Get ready a background thread
    ExecutorService executor = Executors.newSingleThreadExecutor();
    Callable<ECHOMemberObject> communicator = new Callable<ECHOMemberObject>() {

        @Override
        public ECHOMemberObject call() throws ECHOException {

            ECHOException exception = null;
            ECHOMemberObject memberObj = null;

            try {
                JSONObject params = new JSONObject();
                params.put("login_id", login_id);
                params.put("password", password);
                JSONObject response = ECHOQuery.postRequest(instanceId + "/login", params);
                memberObj = new ECHOMemberObject(instanceId, response.optString("refid"), response);

                //
                ECHO.accessToken = response.optString("access_token");

            } catch (ECHOException e) {
                exception = e;
            } catch (Exception e) {
                exception = new ECHOException(e);
            }

            if (sync == false) {

                // Execute a callback method in the main (UI) thread.
                if (callback != null) {
                    final ECHOException fException = exception;
                    final ECHOMemberObject fMemberObj = memberObj;

                    handler.post(new Runnable() {
                        @Override
                        public void run() {
                            callback.done(fMemberObj, fException);
                        }
                    });
                }

                return null;

            } else {

                if (exception == null)
                    return memberObj;
                throw exception;
            }
        }
    };

    Future<ECHOMemberObject> future = executor.submit(communicator);

    if (sync) {
        try {
            return future.get();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt(); // ignore/reset
        } catch (ExecutionException e) {
            Throwable e2 = e.getCause();

            if (e2 instanceof ECHOException) {
                throw (ECHOException) e2;
            }

            throw new RuntimeException(e2);
        }
    }

    return null;
}

From source file:com.netflix.aegisthus.io.sstable.SSTableColumnScanner.java

public rx.Observable<AtomWritable> observable() {
    final ExecutorService service = Executors.newSingleThreadExecutor();
    rx.Observable<AtomWritable> ret = rx.Observable.create(new OnSubscribe<AtomWritable>() {
        @Override//from www  .  j a  v a2 s.  c  o m
        public void call(final Subscriber<? super AtomWritable> subscriber) {
            service.execute(new Runnable() {
                @Override
                public void run() {
                    deserialize(subscriber);
                    subscriber.onCompleted();
                }
            });
        }
    });
    LOG.info("created observable");
    return ret;
}

From source file:de.dfki.iui.mmds.scxml.engine.SCXMLEngineActivator.java

/**
 * Informs all interested listeners/handlers about the current config
 * history of the engine and provides a list of all leafs in the
 * configuration from the last steps, s.t. the complete configuration can be
 * restored (if the client has the corresponding model). The number of
 * history length can vary./*from   w ww  . j a v a2s.c o  m*/
 * 
 * @param id
 *            - The id of the engine that changed its configuration.
 * @param history
 *            - The history of the engine's configuration. The first set
 *            represents the last step, the 2nd represents the 2nd last step
 *            etc.
 */
public static void sendScxmlConfigHistory(final String id, final List<Set<String>> history) {
    if (getEventAdmin() == null)
        return;
    Executors.newSingleThreadExecutor().execute(new Runnable() {
        @Override
        public void run() {
            getEventAdmin().postEvent(new SCXMLConfigHistoryEvent(id, history));
        }
    });
}