Example usage for java.util.concurrent.atomic AtomicLong set

List of usage examples for java.util.concurrent.atomic AtomicLong set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong set.

Prototype

public final void set(long newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:com.netflix.curator.framework.imps.TestFrameworkBackground.java

@Test
public void testRetries() throws Exception {
    final int SLEEP = 1000;
    final int TIMES = 5;

    Timing timing = new Timing();
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryNTimes(TIMES, SLEEP));
    try {/* w w  w  .  ja  v a2s .com*/
        client.start();
        client.getZookeeperClient().blockUntilConnectedOrTimedOut();

        final CountDownLatch latch = new CountDownLatch(TIMES);
        final List<Long> times = Lists.newArrayList();
        final AtomicLong start = new AtomicLong(System.currentTimeMillis());
        ((CuratorFrameworkImpl) client).debugListener = new CuratorFrameworkImpl.DebugBackgroundListener() {
            @Override
            public void listen(OperationAndData<?> data) {
                if (data.getOperation().getClass().getName().contains("CreateBuilderImpl")) {
                    long now = System.currentTimeMillis();
                    times.add(now - start.get());
                    start.set(now);
                    latch.countDown();
                }
            }
        };

        server.stop();
        client.create().inBackground().forPath("/one");

        latch.await();

        for (long elapsed : times.subList(1, times.size())) // first one isn't a retry
        {
            Assert.assertTrue(elapsed >= SLEEP, elapsed + ": " + times);
        }
    } finally {
        IOUtils.closeQuietly(client);
    }
}

From source file:be.dataminded.nifi.plugins.ExecuteOracleSQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;//from  w ww  .  ja v  a2 s  .  com
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final Integer fetchSize = context.getProperty(FETCH_SIZE).asInteger();

    final StopWatch stopWatch = new StopWatch(true);
    final String selectQuery;
    if (context.getProperty(SQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset())));
        selectQuery = queryContents.toString();
    }

    try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) {
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        st.setFetchSize(fetchSize); // hint fetch size
        final AtomicLong nrOfRows = new AtomicLong(0L);
        if (fileToProcess == null) {
            fileToProcess = session.create();
        }
        fileToProcess = session.write(fileToProcess, out -> {
            try {
                logger.debug("Executing query {}", new Object[] { selectQuery });
                final ResultSet resultSet = st.executeQuery(selectQuery);
                nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, convertNamesForAvro));
            } catch (final SQLException e) {
                throw new ProcessException(e);
            }
        });

        // set attribute how many rows were selected
        fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { fileToProcess, nrOfRows.get() });
        session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(fileToProcess, SUCCESS);
    } catch (final ProcessException | SQLException e) {
        if (fileToProcess == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, fileToProcess, e });
                fileToProcess = session.penalize(fileToProcess);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(fileToProcess, FAILURE);
        }
    }
}

From source file:org.apache.pulsar.compaction.CompactedTopicTest.java

/**
 * Build a compacted ledger, and return the id of the ledger, the position of the different
 * entries in the ledger, and a list of gaps, and the entry which should be returned after the gap.
 *//*from   w  w w .  j  a va  2 s.  c  o  m*/
private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger(
        BookKeeper bk, int count) throws Exception {
    LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE,
            Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD);
    List<Pair<MessageIdData, Long>> positions = new ArrayList<>();
    List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>();

    AtomicLong ledgerIds = new AtomicLong(10L);
    AtomicLong entryIds = new AtomicLong(0L);
    CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> {
        List<MessageIdData> idsInGap = new ArrayList<MessageIdData>();
        if (r.nextInt(10) == 1) {
            long delta = r.nextInt(10) + 1;
            idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1)
                    .build());
            ledgerIds.addAndGet(delta);
            entryIds.set(0);
        }
        long delta = r.nextInt(5);
        if (delta != 0) {
            idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1)
                    .build());
        }
        MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get())
                .setEntryId(entryIds.addAndGet(delta + 1)).build();

        @Cleanup
        RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER);

        CompletableFuture<Void> f = new CompletableFuture<>();
        ByteBuf buffer = m.serialize();

        lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> {
            if (rc != BKException.Code.OK) {
                f.completeExceptionally(BKException.create(rc));
            } else {
                positions.add(Pair.of(id, eid));
                idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid)));
                f.complete(null);
            }
        }, null);
        return f;
    }).toArray(CompletableFuture[]::new)).get();
    lh.close();

    return Triple.of(lh.getId(), positions, idsInGaps);
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scan() {
    WebTarget t = extractWebTarget(dockerScanner.getDockerClient());
    logger.info("Scanning {}", t);
    JsonNode response = t.path("/info").request().buildGet().invoke(JsonNode.class);

    JsonNode swarm = response.path("Swarm");
    JsonNode cluster = swarm.path("Cluster");
    String swarmClusterId = cluster.path("ID").asText();

    // need to parse these dates
    String createdAt = cluster.path("CreatedAt").asText();
    String updatedAt = cluster.path("UpdatedAt").asText();
    ObjectNode props = mapper.createObjectNode();
    props.put("swarmClusterId", swarmClusterId);
    props.put("createdAt", createdAt);
    props.put("updatedAt", updatedAt);

    JsonNode swarmNode = dockerScanner.getNeoRxClient().execCypher(
            "merge (c:DockerSwarm {swarmClusterId:{id}}) set c+={props},c.updateTs=timestamp() return c", "id",
            swarmClusterId, "props", props).blockingFirst(MissingNode.getInstance());

    if (isUnixDomainScoket(t.getUri().toString())) {
        // Only set managerApiUrl to a unix domain socket if it has not
        // already been set.
        // This is useful for trident
        if (!isUnixDomainScoket(swarmNode.path("managerApiUrl").asText())) {

            String LOCAL_DOCKER_DAEMON_SOCKET_URL = "unix:///var/run/docker.sock";
            logger.info("setting mangerApiUrl to {} for swarm {}", LOCAL_DOCKER_DAEMON_SOCKET_URL,
                    swarmClusterId);/*from  w ww .  j a  va  2 s  .  c  o m*/

            String name = "local";
            dockerScanner.getNeoRxClient()
                    .execCypher("match (c:DockerSwarm {name:{name}}) return c", "name", name).forEach(it -> {
                        String oldSwarmClusterId = it.path("swarmClusterId").asText();
                        if (!swarmClusterId.equals(oldSwarmClusterId)) {
                            dockerScanner.getNeoRxClient().execCypher(
                                    "match (c:DockerSwarm {swarmClusterId:{swarmClusterId}}) detach delete c",
                                    "swarmClusterId", oldSwarmClusterId);
                        }
                    });

            dockerScanner.getNeoRxClient().execCypher(
                    "match (c:DockerSwarm {swarmClusterId:{id}}) set c.managerApiUrl={managerApiUrl},c.name={name},c.tridentClusterId={name} return c",
                    "id", swarmClusterId, "managerApiUrl", LOCAL_DOCKER_DAEMON_SOCKET_URL, "name", name);

        }
    }

    AtomicBoolean fail = new AtomicBoolean(false);
    response = t.path("/nodes").request().buildGet().invoke(JsonNode.class);
    AtomicLong earliestTimestamp = new AtomicLong(Long.MAX_VALUE);
    response.elements().forEachRemaining(it -> {
        try {
            earliestTimestamp.set(
                    Math.min(earliestTimestamp.get(), saveDockerNode(swarmClusterId, flattenSwarmNode(it))));
        } catch (RuntimeException e) {
            logger.warn("problem", e);
            fail.set(true);
        }
    });

    if (!fail.get()) {
        if (earliestTimestamp.get() < System.currentTimeMillis()) {
            logger.info("deleting DockerHost nodes before with updateTs<{}", earliestTimestamp.get());
            dockerScanner.getNeoRxClient().execCypher(
                    "match (s:DockerSwarm {swarmClusterId:{id}})--(x:DockerHost) where s.updateTs>x.updateTs detach delete x",
                    "id", swarmClusterId);
        }
    }
    scanServicesForSwarm(swarmClusterId);
    scanTasksForSwarm(swarmClusterId);
}

From source file:org.deeplearning4j.models.word2vec.Word2Vec.java

/**
 * Train on a list of vocab words//  w  w  w  .j a va  2 s  .c  om
 * @param sentence the list of vocab words to train on
 */
public void trainSentence(final List<VocabWord> sentence, AtomicLong nextRandom, double alpha) {
    if (sentence == null || sentence.isEmpty())
        return;
    for (int i = 0; i < sentence.size(); i++) {
        nextRandom.set(nextRandom.get() * 25214903917L + 11);
        skipGram(i, sentence, (int) nextRandom.get() % window, nextRandom, alpha);
    }

}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scanTasksForSwarm(String swarmClusterId) {

    logger.info("scanning tasks for swarm: {}", swarmClusterId);

    AtomicLong earlistUpdate = new AtomicLong(Long.MAX_VALUE);
    AtomicBoolean error = new AtomicBoolean(false);
    JsonNode response = getRestClient().getTasks();
    response.forEach(it -> {/*from w  w w  . ja  v  a  2s.c o m*/
        try {
            earlistUpdate.set(Math.min(earlistUpdate.get(), saveTask(it)));

        } catch (Exception e) {
            logger.warn("problem updating task", e);
            error.set(true);
        }
    });

    if (error.get() == false) {
        if (earlistUpdate.get() < System.currentTimeMillis()) {
            dockerScanner.getNeoRxClient().execCypher(
                    "match (x:DockerTask) where x.swarmClusterId={swarmClusterId} and x.updateTs<{cutoff} detach delete x",
                    "cutoff", earlistUpdate.get(), "swarmClusterId", swarmClusterId);
        }
    }

}

From source file:com.opengamma.engine.cache.BerkeleyDBValueSpecificationIdentifierBinaryDataStoreTest.java

public void parallelPutGetTest() throws InterruptedException {
    final int numEntries = 5000;
    final int numCycles = 1;
    final int numGets = numCycles * numEntries;
    final Random random = new Random();

    File dbDir = createDbDir("parallelPutGetTest");
    Environment dbEnvironment = BerkeleyDBViewComputationCacheSource.constructDatabaseEnvironment(dbDir, false);

    final BerkeleyDBBinaryDataStore dataStore = new BerkeleyDBBinaryDataStore(dbEnvironment,
            "parallelPutGetTest");
    dataStore.start();/*from  ww  w .  j  av  a 2 s  .c  o  m*/

    final AtomicLong currentMaxIdentifier = new AtomicLong(0L);
    final byte[] bytes = new byte[100];
    random.nextBytes(bytes);
    Thread tPut = new Thread(new Runnable() {
        @Override
        public void run() {
            OperationTimer timer = new OperationTimer(s_logger, "Putting {} entries", numEntries);
            for (int i = 0; i < numEntries; i++) {
                random.nextBytes(bytes);
                dataStore.put(i, bytes);
                currentMaxIdentifier.set(i);
            }
            long numMillis = timer.finished();

            double msPerPut = ((double) numMillis) / ((double) numGets);
            double putsPerSecond = 1000.0 / msPerPut;

            s_logger.info("for {} puts, {} ms/put, {} puts/sec",
                    new Object[] { numEntries, msPerPut, putsPerSecond });
        }

    }, "Putter");

    class GetRunner implements Runnable {
        @Override
        public void run() {
            OperationTimer timer = new OperationTimer(s_logger, "Getting {} entries", numGets);
            for (int i = 0; i < numGets; i++) {
                int maxIdentifier = (int) currentMaxIdentifier.get();
                long actualIdentifier = random.nextInt(maxIdentifier);
                dataStore.get(actualIdentifier);
            }
            long numMillis = timer.finished();

            double msPerGet = ((double) numMillis) / ((double) numGets);
            double getsPerSecond = 1000.0 / msPerGet;

            s_logger.info("for {} gets, {} ms/get, {} gets/sec",
                    new Object[] { numGets, msPerGet, getsPerSecond });
        }
    }
    ;
    Thread tGet1 = new Thread(new GetRunner(), "getter-1");
    Thread tGet2 = new Thread(new GetRunner(), "getter-2");
    //Thread tGet3 = new Thread(new GetRunner(), "getter-3");
    //Thread tGet4 = new Thread(new GetRunner(), "getter-4");
    //Thread tGet5 = new Thread(new GetRunner(), "getter-5");

    tPut.start();
    Thread.sleep(5L);
    tGet1.start();
    tGet2.start();
    //tGet3.start();
    //tGet4.start();
    //tGet5.start();

    tPut.join();
    tGet1.join();
    tGet2.join();
    //tGet3.join();
    //tGet4.join();
    //tGet5.join();

    dataStore.delete();
    dataStore.stop();
    dbEnvironment.close();
}

From source file:com.antsdb.saltedfish.nosql.Gobbler.java

/**
 * return -1 if there is no valid sp found meaning this is an empty database
 * @throws Exception /* w  w  w . j  av a  2  s . c  o  m*/
 */
public long getLatestSp() {
    long sp = this.spaceman.getAllocationPointer();
    int spaceId = SpaceManager.getSpaceId(sp);
    long spaceStartSp = this.spaceman.getSpaceStartSp(spaceId);
    if (spaceStartSp == sp) {
        // if current space is empty, wait a little
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        }
    }
    AtomicLong result = new AtomicLong(-1);
    try {
        this.replay(spaceStartSp, true, new ReplayHandler() {
            @Override
            public void all(LogEntry entry) {
                result.set(entry.getSpacePointer());
            }
        });
    } catch (Exception ignored) {
    }
    return result.get();
}

From source file:org.apache.nifi.processors.standard.ExecuteSQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;//w w  w  . j  av  a  2  s . c  om
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final StopWatch stopWatch = new StopWatch(true);
    final String selectQuery;
    if (context.getProperty(SQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, new InputStreamCallback() {
            @Override
            public void process(InputStream in) throws IOException {
                queryContents.append(IOUtils.toString(in));
            }
        });
        selectQuery = queryContents.toString();
    }

    try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) {
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        final AtomicLong nrOfRows = new AtomicLong(0L);
        if (fileToProcess == null) {
            fileToProcess = session.create();
        }
        fileToProcess = session.write(fileToProcess, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                try {
                    logger.debug("Executing query {}", new Object[] { selectQuery });
                    final ResultSet resultSet = st.executeQuery(selectQuery);
                    nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, convertNamesForAvro));
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }
            }
        });

        // set attribute how many rows were selected
        fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { fileToProcess, nrOfRows.get() });
        session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(fileToProcess, REL_SUCCESS);
    } catch (final ProcessException | SQLException e) {
        if (fileToProcess == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, fileToProcess, e });
                fileToProcess = session.penalize(fileToProcess);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(fileToProcess, REL_FAILURE);
        }
    }
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

protected long saveTask(JsonNode it) {

    ObjectNode n = flattenTask(it);//from   w w w  .  j av  a  2s. co  m

    n.put("swarmClusterId", getSwarmClusterId().get());

    String taskId = n.get("taskId").asText();
    String serviceId = n.path("serviceId").asText();
    String swarmNodeId = n.path("swarmNodeId").asText();
    checkNotEmpty(taskId, "taskId");
    checkNotEmpty(serviceId, "serviceId");
    checkNotEmpty(swarmNodeId, "swarmNodeId");

    AtomicLong timestamp = new AtomicLong(Long.MAX_VALUE);
    dockerScanner.getNeoRxClient()
            .execCypher(
                    "merge (x:DockerTask {taskId:{taskId}}) set x+={props}, x.updateTs=timestamp() return x",
                    "taskId", taskId, "props", n)
            .forEach(tt -> {

                timestamp.set(tt.path("updateTs").asLong(Long.MAX_VALUE));

                removeDockerLabels("DockerTask", "taskId", taskId, n, it);
            });

    {
        // it might be worth it to select these relationships and only
        // update if they are missing
        dockerScanner.getNeoRxClient().execCypher(
                "match (s:DockerService {serviceId:{serviceId}}),(t:DockerTask{taskId:{taskId}}) merge (s)-[x:CONTAINS]->(t) set x.updateTs=timestamp() return t,s",
                "serviceId", serviceId, "taskId", taskId);

        dockerScanner.getNeoRxClient().execCypher(
                "match (h:DockerHost {swarmNodeId:{swarmNodeId}}), (t:DockerTask {swarmNodeId:{swarmNodeId}}) merge (h)-[x:RUNS]->(t) set x.updateTs=timestamp()",
                "swarmNodeId", swarmNodeId);
    }
    return timestamp.get();
}