Example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean.

Prototype

public AtomicBoolean(boolean initialValue) 

Source Link

Document

Creates a new AtomicBoolean with the given initial value.

Usage

From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java

private CompletionStage<Void> checkDone(Supplier<CompletableFuture<Boolean>> condition) {
    AtomicBoolean isDone = new AtomicBoolean(false);
    return Futures.loop(() -> !isDone.get(),
            () -> Futures.delayedFuture(condition, 100, executor).thenAccept(isDone::set), executor);
}

From source file:io.nats.client.ITClusterTest.java

@Test
public void testHotSpotReconnect() throws InterruptedException {
    int numClients = 100;
    ExecutorService executor = Executors.newFixedThreadPool(numClients,
            new NatsThreadFactory("testhotspotreconnect"));

    final BlockingQueue<String> rch = new LinkedBlockingQueue<String>();
    final BlockingQueue<Integer> dch = new LinkedBlockingQueue<Integer>();
    final AtomicBoolean shutdown = new AtomicBoolean(false);
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {
            try (NatsServer s3 = runServerOnPort(1226)) {

                final class NATSClient implements Runnable {
                    Connection nc = null;
                    final AtomicInteger numReconnects = new AtomicInteger(0);
                    final AtomicInteger numDisconnects = new AtomicInteger(0);
                    String currentUrl = null;
                    final AtomicInteger instance = new AtomicInteger(-1);

                    final Options opts;

                    NATSClient(int inst) {
                        this.instance.set(inst);
                        opts = defaultOptions();
                        opts.servers = Nats.processUrlArray(testServers);

                        opts.disconnectedCb = new DisconnectedCallback() {
                            public void onDisconnect(ConnectionEvent event) {
                                numDisconnects.incrementAndGet();
                                try {
                                    dch.put(instance.get());
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }//from   w w  w  .j a  v  a2s  . c o m
                                nc.setDisconnectedCallback(null);
                            }
                        };
                        opts.reconnectedCb = new ReconnectedCallback() {
                            public void onReconnect(ConnectionEvent event) {
                                numReconnects.incrementAndGet();
                                currentUrl = nc.getConnectedUrl();
                                try {
                                    rch.put(currentUrl);
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }
                            }
                        };
                    }

                    @Override
                    public void run() {
                        try {
                            nc = opts.connect();
                            assertTrue(!nc.isClosed());
                            assertNotNull(nc.getConnectedUrl());
                            currentUrl = nc.getConnectedUrl();
                            // System.err.println("Instance " + instance + " connected to " +
                            // currentUrl);
                            while (!shutdown.get()) {
                                sleep(10);
                            }
                            nc.close();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                    }

                    public synchronized boolean isConnected() {
                        return (nc != null && !nc.isClosed());
                    }

                    public void shutdown() {
                        shutdown.set(true);
                    }
                }

                List<NATSClient> tasks = new ArrayList<NATSClient>(numClients);
                for (int i = 0; i < numClients; i++) {
                    NATSClient task = new NATSClient(i);
                    tasks.add(task);
                    executor.submit(task);
                }

                Map<String, Integer> cs = new HashMap<String, Integer>();

                int numReady = 0;
                while (numReady < numClients) {
                    numReady = 0;
                    for (NATSClient cli : tasks) {
                        if (cli.isConnected()) {
                            numReady++;
                        }
                    }
                    sleep(100);
                }

                s1.shutdown();
                sleep(1000);

                int disconnected = 0;
                // wait for disconnects
                while (dch.size() > 0 && disconnected < numClients) {
                    Integer instance = -1;
                    instance = dch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for disconnect signal", instance);
                    disconnected++;
                }
                assertTrue(disconnected > 0);

                int reconnected = 0;
                // wait for reconnects
                for (int i = 0; i < disconnected; i++) {
                    String url = null;
                    while (rch.size() == 0) {
                        sleep(50);
                    }
                    url = rch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for reconnect signal", url);
                    reconnected++;
                    Integer count = cs.get(url);
                    if (count != null) {
                        cs.put(url, ++count);
                    } else {
                        cs.put(url, 1);
                    }
                }

                for (NATSClient client : tasks) {
                    client.shutdown();
                }
                executor.shutdownNow();
                assertTrue(executor.awaitTermination(2, TimeUnit.SECONDS));

                assertEquals(disconnected, reconnected);

                int numServers = 2;

                assertEquals(numServers, cs.size());

                int expected = numClients / numServers;
                // We expect a 40 percent variance
                int var = (int) ((float) expected * 0.40);

                int delta = Math.abs(cs.get(testServers[2]) - cs.get(testServers[4]));
                // System.err.printf("var = %d, delta = %d\n", var, delta);
                if (delta > var) {
                    String str = String.format("Connected clients to servers out of range: %d/%d", delta, var);
                    fail(str);
                }
            }
        }
    }
}

From source file:com.nridge.connector.ws.con_ws.task.TaskConnectorWS.java

/**
  * If this task is scheduled to be executed (e.g. its run/test
  * name matches the command line arguments), then this method
  * is guaranteed to be executed prior to the thread being
  * started./*from  w ww  .j  av a 2 s.  com*/
  *
  * @param anAppMgr Application manager instance.
  *
  * @throws com.nridge.core.base.std.NSException Application specific exception.
  */
@Override
public void init(AppMgr anAppMgr) throws NSException {
    mAppMgr = anAppMgr;
    Logger appLogger = mAppMgr.getLogger(this, "init");

    appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER);

    mIsAlive = new AtomicBoolean(false);

    // Write our configuration properties for troubleshooting purposes.

    mAppMgr.writeCfgProperties(appLogger);

    // Assign our between crawl sleep time.

    mSleepTimeInMinutes = 15;
    String sleepTimeString = mAppMgr.getString(Constants.CFG_PROPERTY_PREFIX + ".run_sleep_between");
    if (StringUtils.endsWithIgnoreCase(sleepTimeString, "m")) {
        String minuteString = StringUtils.stripEnd(sleepTimeString, "m");
        if ((StringUtils.isNotEmpty(minuteString)) && (StringUtils.isNumeric(minuteString)))
            mSleepTimeInMinutes = Integer.parseInt(minuteString);
    } else if ((StringUtils.isNotEmpty(sleepTimeString)) && (StringUtils.isNumeric(sleepTimeString)))
        mSleepTimeInMinutes = Integer.parseInt(sleepTimeString);

    // The extract queue holds documents that have been extracted from the content source.

    int extractQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".extract.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue extractQueue = new ArrayBlockingQueue(extractQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_EXTRACT_NAME, extractQueue);

    // The transform queue holds documents that have been transformed after extraction.

    int transformQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".transform.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue transformQueue = new ArrayBlockingQueue(transformQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_TRANSFORM_NAME, transformQueue);

    // The publish queue holds documents that have been published to the search index.

    int publishQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".publish.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue publishQueue = new ArrayBlockingQueue(publishQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_PUBLISH_NAME, publishQueue);

    // Load our schema definition from the data source folder.

    DataBag schemaBag;
    String schemaPathFileName = String.format("%s%c%s", mAppMgr.getString(mAppMgr.APP_PROPERTY_DS_PATH),
            File.separatorChar, Constants.SCHEMA_FILE_NAME);
    DataBagXML dataBagXML = new DataBagXML();
    try {
        dataBagXML.load(schemaPathFileName);
        schemaBag = dataBagXML.getBag();
    } catch (Exception e) {
        String msgStr = String.format("%s: %s", schemaPathFileName, e.getMessage());
        appLogger.error(msgStr);
        appLogger.warn("Using internal document schema as alternative - data source schema ignored.");
        schemaBag = schemaBag();
    }

    mAppMgr.addProperty(Connector.PROPERTY_SCHEMA_NAME, schemaBag);

    // Create our mail manager instance.

    MailManager mailManager = new MailManager(mAppMgr, Constants.CFG_PROPERTY_PREFIX + ".mail");
    mAppMgr.addProperty(Connector.PROPERTY_MAIL_NAME, mailManager);

    // Create/Load service time tracking file.

    mServiceTimer = new ServiceTimer(mAppMgr);
    mServiceTimer.setPropertyPrefix(Constants.CFG_PROPERTY_PREFIX);
    String stPathFileName = mServiceTimer.createServicePathFileName();
    File stFile = new File(stPathFileName);
    if (stFile.exists())
        mServiceTimer.load();

    // Is there an explicit list of phases to execute?

    String propertyName = Constants.CFG_PROPERTY_PREFIX + ".phase_list";
    String phaseProperty = mAppMgr.getString(propertyName);
    if (StringUtils.isNotEmpty(phaseProperty)) {
        if (mAppMgr.isPropertyMultiValue(propertyName))
            mPhases = mAppMgr.getStringArray(propertyName);
        else {
            mPhases = new String[1];
            mPhases[0] = phaseProperty;
        }
    }

    // Load and assign our crawl follow and ignore instances.

    CrawlFollow crawlFollow = new CrawlFollow(mAppMgr);
    crawlFollow.setCfgPropertyPrefix(Constants.CFG_PROPERTY_PREFIX + ".extract");
    try {
        crawlFollow.load();
    } catch (NSException | IOException e) {
        String msgStr = String.format("Crawl Follow: %s", e.getMessage());
        appLogger.error(msgStr);
    }
    mAppMgr.addProperty(Constants.PROPERTY_CRAWL_FOLLOW, crawlFollow);

    CrawlIgnore crawlIgnore = new CrawlIgnore(mAppMgr);
    crawlIgnore.setCfgPropertyPrefix(Constants.CFG_PROPERTY_PREFIX + ".extract");
    try {
        crawlIgnore.load();
    } catch (NSException | IOException e) {
        String msgStr = String.format("Crawl Ignore: %s", e.getMessage());
        appLogger.error(msgStr);
    }
    mAppMgr.addProperty(Constants.PROPERTY_CRAWL_IGNORE, crawlIgnore);

    // Clear out crawl queue from previous service sessions.

    CrawlQueue crawlQueue = new CrawlQueue(mAppMgr);
    crawlQueue.reset();

    appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART);

    mIsAlive.set(true);
}

From source file:com.nridge.connector.fs.con_fs.task.TaskConnectorFS.java

/**
  * If this task is scheduled to be executed (e.g. its run/test
  * name matches the command line arguments), then this method
  * is guaranteed to be executed prior to the thread being
  * started.//from w  w w  .j  a  va2 s.c  o m
  *
  * @param anAppMgr Application manager instance.
  *
  * @throws com.nridge.core.base.std.NSException Application specific exception.
  */
@Override
public void init(AppMgr anAppMgr) throws NSException {
    mAppMgr = anAppMgr;
    Logger appLogger = mAppMgr.getLogger(this, "init");

    appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER);

    mIsAlive = new AtomicBoolean(false);

    // Write our configuration properties for troubleshooting purposes.

    mAppMgr.writeCfgProperties(appLogger);

    // Assign our between crawl sleep time.

    mSleepTimeInMinutes = 15;
    String sleepTimeString = mAppMgr.getString(Constants.CFG_PROPERTY_PREFIX + ".run_sleep_between");
    if (StringUtils.endsWithIgnoreCase(sleepTimeString, "m")) {
        String minuteString = StringUtils.stripEnd(sleepTimeString, "m");
        if ((StringUtils.isNotEmpty(minuteString)) && (StringUtils.isNumeric(minuteString)))
            mSleepTimeInMinutes = Integer.parseInt(minuteString);
    } else if ((StringUtils.isNotEmpty(sleepTimeString)) && (StringUtils.isNumeric(sleepTimeString)))
        mSleepTimeInMinutes = Integer.parseInt(sleepTimeString);

    // The extract queue holds documents that have been extracted from the content source.

    int extractQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".extract.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue extractQueue = new ArrayBlockingQueue(extractQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_EXTRACT_NAME, extractQueue);

    // The transform queue holds documents that have been transformed after extraction.

    int transformQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".transform.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue transformQueue = new ArrayBlockingQueue(transformQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_TRANSFORM_NAME, transformQueue);

    // The publish queue holds documents that have been published to the search index.

    int publishQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".publish.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue publishQueue = new ArrayBlockingQueue(publishQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_PUBLISH_NAME, publishQueue);

    // Load our schema definition from the data source folder.

    DataBag schemaBag;
    String schemaPathFileName = String.format("%s%c%s", mAppMgr.getString(mAppMgr.APP_PROPERTY_DS_PATH),
            File.separatorChar, Constants.SCHEMA_FILE_NAME);
    DataBagXML dataBagXML = new DataBagXML();
    try {
        dataBagXML.load(schemaPathFileName);
        schemaBag = dataBagXML.getBag();
    } catch (Exception e) {
        String msgStr = String.format("%s: %s", schemaPathFileName, e.getMessage());
        appLogger.error(msgStr);
        appLogger.warn("Using internal document schema as alternative - data source schema ignored.");
        schemaBag = schemaBag();
    }

    mAppMgr.addProperty(Connector.PROPERTY_SCHEMA_NAME, schemaBag);

    // Create our mail manager instance.

    MailManager mailManager = new MailManager(mAppMgr, Constants.CFG_PROPERTY_PREFIX + ".mail");
    mAppMgr.addProperty(Connector.PROPERTY_MAIL_NAME, mailManager);

    // Create/Load service time tracking file.

    mServiceTimer = new ServiceTimer(mAppMgr);
    mServiceTimer.setPropertyPrefix(Constants.CFG_PROPERTY_PREFIX);
    String stPathFileName = mServiceTimer.createServicePathFileName();
    File stFile = new File(stPathFileName);
    if (stFile.exists())
        mServiceTimer.load();

    // Is there an explicit list of phases to execute?

    String propertyName = Constants.CFG_PROPERTY_PREFIX + ".phase_list";
    String phaseProperty = mAppMgr.getString(propertyName);
    if (StringUtils.isNotEmpty(phaseProperty)) {
        if (mAppMgr.isPropertyMultiValue(propertyName))
            mPhases = mAppMgr.getStringArray(propertyName);
        else {
            mPhases = new String[1];
            mPhases[0] = phaseProperty;
        }
    }

    // Clear out crawl queue from previous service sessions.

    CrawlQueue crawlQueue = new CrawlQueue(mAppMgr);
    crawlQueue.reset();

    // Create Restlet server instance.

    int portNumber = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".restlet.port_number",
            Constants.APPLICATION_PORT_NUMBER_DEFAULT);
    RestletApplication restletApplication = new RestletApplication(mAppMgr);

    appLogger.info("Starting Restlet Server.");
    mServer = new Server(Protocol.HTTP, portNumber, restletApplication);
    try {
        mServer.start();
    } catch (Exception e) {
        appLogger.error("Restlet Server (start): " + e.getMessage(), e);
        throw new NSException(e.getMessage());
    }

    appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART);

    mIsAlive.set(true);
}

From source file:com.yahoo.sql4d.indexeragent.sql.DBAccessor.java

/**
 * Suitable for CRUD operations where no result set is expected.
 * @param params/*from  w ww  .  ja va 2s . com*/
 * @param query 
 * @return  
 */
public boolean execute(Map<String, String> params, String query) {
    final AtomicBoolean result = new AtomicBoolean(false);
    Tuple2<DataSource, Connection> conn = null;
    try {
        conn = getConnection();
        NamedParameterJdbcTemplate jdbcTemplate = new NamedParameterJdbcTemplate(conn._1());
        jdbcTemplate.execute(query, params, new PreparedStatementCallback<Void>() {
            @Override
            public Void doInPreparedStatement(PreparedStatement ps) {
                try {
                    result.set(ps.execute());
                } catch (SQLException e) {
                    result.set(false);
                }
                return null;
            }
        });
    } catch (Exception ex) {
        Logger.getLogger(DBAccessor.class.getName()).log(Level.SEVERE, null, ex);
        result.set(false);
    } finally {
        returnConnection(conn);
    }
    return result.get();
}

From source file:eu.stratosphere.pact.runtime.task.ReduceTaskTest.java

@Test
public void testCancelReduceTaskWhileReducing() {

    final int keyCnt = 1000;
    final int valCnt = 2;

    addInput(new UniformRecordGenerator(keyCnt, valCnt, true));
    addInputComparator(this.comparator);
    setOutput(new NirvanaOutputList());
    getTaskConfig().setDriverStrategy(DriverStrategy.SORTED_GROUP_REDUCE);

    final GroupReduceDriver<Record, Record> testTask = new GroupReduceDriver<Record, Record>();

    final AtomicBoolean success = new AtomicBoolean(false);

    Thread taskRunner = new Thread() {
        @Override/*w  w  w.  jav  a  2  s .c o m*/
        public void run() {
            try {
                testDriver(testTask, MockDelayingReduceStub.class);
                success.set(true);
            } catch (Exception ie) {
                ie.printStackTrace();
            }
        }
    };
    taskRunner.start();

    TaskCancelThread tct = new TaskCancelThread(2, taskRunner, this);
    tct.start();

    try {
        tct.join();
        taskRunner.join();
    } catch (InterruptedException ie) {
        Assert.fail("Joining threads failed");
    }

}

From source file:com.tencent.gaia.portal.util.Shell.java

/**
 * Run a command/*from  w w w  . ja v  a  2s.  c o m*/
 */
private void runCommand() throws IOException {
    ProcessBuilder builder = new ProcessBuilder(getExecString());
    Timer timeOutTimer = null;
    ShellTimeoutTimerTask timeoutTimerTask = null;
    timedOut = new AtomicBoolean(false);
    completed = new AtomicBoolean(false);

    if (environment != null) {
        builder.environment().putAll(this.environment);
    }
    if (dir != null) {
        builder.directory(this.dir);
    }

    builder.redirectErrorStream(redirectErrorStream);

    if (Shell.WINDOWS) {
        synchronized (WindowsProcessLaunchLock) {
            // To workaround the race condition issue with child processes
            // inheriting unintended handles during process launch that can
            // lead to hangs on reading output and error streams, we
            // serialize process creation. More info available at:
            // http://support.microsoft.com/kb/315939
            process = builder.start();
        }
    } else {
        process = builder.start();
    }

    if (timeOutInterval > 0) {
        timeOutTimer = new Timer("Shell command timeout");
        timeoutTimerTask = new ShellTimeoutTimerTask(this);
        //One time scheduling.
        timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
    }
    final BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream()));
    BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
    final StringBuffer errMsg = new StringBuffer();

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    errMsg.append(line);
                    errMsg.append(System.getProperty("line.separator"));
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    try {
        errThread.start();
    } catch (IllegalStateException ise) {
    }
    try {
        parseExecResult(inReader); // parse the output
        // clear the input stream buffer
        String line = inReader.readLine();
        while (line != null) {
            line = inReader.readLine();
        }
        // wait for the process to finish and check the exit code
        exitCode = process.waitFor();
        try {
            // make sure that the error thread exits
            errThread.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while reading the error stream", ie);
        }
        completed.set(true);
        //the timeout thread handling
        //taken care in finally block
        if (exitCode != 0) {
            throw new ExitCodeException(exitCode, errMsg.toString());
        }
    } catch (InterruptedException ie) {
        throw new IOException(ie.toString());
    } finally {
        if (timeOutTimer != null) {
            timeOutTimer.cancel();
        }
        // close the input stream
        try {
            // JDK 7 tries to automatically drain the input streams for us
            // when the process exits, but since close is not synchronized,
            // it creates a race if we close the stream first and the same
            // fd is recycled.  the stream draining thread will attempt to
            // drain that fd!!  it may block, OOM, or cause bizarre behavior
            // see: https://bugs.openjdk.java.net/browse/JDK-8024521
            //      issue is fixed in build 7u60
            InputStream stdout = process.getInputStream();
            synchronized (stdout) {
                inReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the input stream", ioe);
        }
        try {
            if (!completed.get()) {
                errThread.interrupt();
                errThread.join();
            }
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while joining errThread");
        }
        try {
            InputStream stderr = process.getErrorStream();
            synchronized (stderr) {
                errReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the error stream", ioe);
        }
        process.destroy();
        lastTime = System.currentTimeMillis();
    }
}

From source file:org.springsource.ide.eclipse.commons.internal.configurator.ConfiguratorImporter.java

public List<ConfigurableExtension> detectExtensions(final IProgressMonitor monitor) {
    final List<ConfigurableExtension> result = new ArrayList<ConfigurableExtension>();
    Set<WorkspaceConfiguratorParticipant> participants = ParticipantExtensionPointReader.getParticipants();
    for (final WorkspaceConfiguratorParticipant participant : participants) {
        SafeRunner.run(new ISafeRunnable() {
            public void handleException(Throwable exception) {
                // logged by super class
            }/*from w  w  w  . ja v a  2s .c  om*/

            public void run() throws Exception {
                List<ConfigurableExtension> extensions = participant.detectExtensions(ConfiguratorImporter.this,
                        monitor);
                result.addAll(extensions);
            }
        });
    }
    Set<InstallableItem> installableItems = ParticipantExtensionPointReader.getInstallableItems();
    for (final InstallableItem item : installableItems) {
        boolean found = false;
        for (ConfigurableExtension extension : result) {
            if (extension.getId().equals(item.getId())) {
                extension.setInstallableItem(item);
                found = true;
            }
        }
        if (!found) {
            for (final WorkspaceConfiguratorParticipant participant : participants) {
                if (participant.getId().equals(item.getConfiguratorId())) {
                    final AtomicBoolean added = new AtomicBoolean(false);
                    SafeRunner.run(new ISafeRunnable() {
                        public void handleException(Throwable exception) {
                            // logged by super class
                        }

                        public void run() throws Exception {
                            ConfigurableExtension extension = participant.createExtension(item, monitor);
                            if (extension != null) {
                                result.add(extension);
                                added.set(true);
                            }
                        }
                    });
                    if (added.get()) {
                        break;
                    }
                }
            }
        }
    }

    return result;
}

From source file:org.opennms.newts.gsod.ImportRunner.java

public void execute(String... args) throws Exception {

    CmdLineParser parser = new CmdLineParser(this);
    try {//w w w. ja va 2  s. c  o m
        parser.parseArgument(args);
    } catch (CmdLineException e) {
        // handling of wrong arguments
        System.err.println(e.getMessage());
        parser.printUsage(System.err);
        return;
    }

    // Setup the slf4j metrics reporter
    MetricRegistry metrics = new MetricRegistry();

    final long start = System.currentTimeMillis();
    metrics.register("elapsed-seconds", new Gauge<Double>() {

        @Override
        public Double getValue() {
            return (System.currentTimeMillis() - start) / 1000.0;
        }

    });

    final ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).outputTo(System.err)
            .convertRatesTo(SECONDS).convertDurationsTo(MILLISECONDS).build();

    reporter.start(10, SECONDS);

    if (m_restUrl == null) {
        // we are using a direct importer so use a NewtsReporter for storing metrics
        NewtsReporter newtsReporter = NewtsReporter.forRegistry(metrics).name("importer")
                .convertRatesTo(SECONDS).convertDurationsTo(MILLISECONDS).build(repository());

        newtsReporter.start(1, SECONDS);
    }

    LOG.debug("Scanning {} for GSOD data files...", m_source);

    // walk the files in the directory given
    Observable<Sample> samples = fileTreeWalker(m_source.toPath()).subscribeOn(Schedulers.io())

            // set up a meter for each file processed
            .map(meter(metrics.meter("files"), Path.class))

            // report file
            .map(reportFile())

            // read all the files and convert them into lines
            .mergeMap(lines())
            // excluding the header lines
            .filter(exclude("YEARMODA"))

            // turn each line into a list of samples
            .mergeMap(samples())

            // adjust time on samples according to arguments
            .map(adjustTime())

            // meter the samples
            .map(meter(metrics.meter("samples"), Sample.class));

    Observable<List<Sample>> batches = samples
            // create batches each second or of size m_samplesPerBatch whichever comes first
            .buffer(m_samplesPerBatch);

    Observable<Boolean> doImport = m_restUrl != null ? restPoster(batches, metrics)
            : directPoster(batches, metrics);

    System.err.println("doImport = " + doImport);

    // GO!!!
    final AtomicReference<Subscription> subscription = new AtomicReference<>();
    final AtomicBoolean failed = new AtomicBoolean(false);

    final CountDownLatch latch = new CountDownLatch(1);

    Subscription s = doImport.subscribe(new Observer<Boolean>() {

        @Override
        public void onCompleted() {
            System.err.println("Finished Importing Everything!");
            reporter.report();
            latch.countDown();
            System.exit(0);
        }

        @Override
        public void onError(Throwable e) {
            failed.set(true);
            System.err.println("Error importing!");
            e.printStackTrace();
            try {
                //latch.await();
                Subscription s = subscription.get();
                if (s != null)
                    s.unsubscribe();

            } catch (Exception ex) {
                System.err.println("Failed to close httpClient!");
                ex.printStackTrace();
            } finally {
                //dumpThreads();
            }
        }

        @Override
        public void onNext(Boolean t) {
            System.err.println("Received a boolen: " + t);
        }
    });

    subscription.set(s);
    if (failed.get()) {
        s.unsubscribe();
    }
    //latch.countDown();
    System.err.println("Return from Subscribe!");

    latch.await();

    //dumpThreads();

}

From source file:dk.netarkivet.common.utils.ProcessUtils.java

/** Wait for the end of a process, but only for a limited time.  This
 * method takes care of the ways waitFor can get interrupted.
 *
 * @param p Process to wait for/*  w  ww  .j a  va  2s  .  co  m*/
 * @param maxWait The maximum number of milliseconds to wait for the
 * process to exit.
 * @return Exit value for process, or null if the process didn't exit
 * within the expected time.
 */
public static Integer waitFor(final Process p, long maxWait) {
    ArgumentNotValid.checkNotNull(p, "Process p");
    ArgumentNotValid.checkPositive(maxWait, "long maxWait");
    long startTime = System.currentTimeMillis();
    Timer timer = new Timer(true);
    final Thread waitThread = Thread.currentThread();
    boolean wakeupScheduled = false;
    final AtomicBoolean doneWaiting = new AtomicBoolean(false);
    while (System.currentTimeMillis() < startTime + maxWait) {
        try {
            if (!wakeupScheduled) {
                // First time in here, we need to start the wakup thread,
                // but be sure it doesn't notify us too early or too late.
                synchronized (waitThread) {
                    timer.schedule(new TimerTask() {
                        public void run() {
                            synchronized (waitThread) {
                                if (!doneWaiting.get()) {
                                    waitThread.interrupt();
                                }
                            }
                        }
                    }, maxWait);
                    wakeupScheduled = true;
                }
            }

            p.waitFor();
            break;
        } catch (InterruptedException e) {
            // May happen for a number of reasons.  We just check if we've
            // timed out yet when we go through the loop again.
        }
    }
    synchronized (waitThread) {
        timer.cancel();
        doneWaiting.set(true);
        Thread.interrupted(); // In case the timer task interrupted.
    }
    try {
        return p.exitValue();
    } catch (IllegalThreadStateException e) {
        log.warn("Process '" + p + "' did not exit within " + (System.currentTimeMillis() - startTime)
                + " milliseconds");
        return null;
    }
}