Example usage for java.util.concurrent.atomic AtomicLong set

List of usage examples for java.util.concurrent.atomic AtomicLong set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong set.

Prototype

public final void set(long newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

long saveDockerNode(String swarmClusterId, JsonNode n) {

    String swarmNodeId = n.get("swarmNodeId").asText();
    AtomicLong updateTs = new AtomicLong(Long.MAX_VALUE);
    dockerScanner.getNeoRxClient().execCypher(
            "merge (n:DockerHost {swarmNodeId:{nodeId}}) set n+={props}, n.updateTs=timestamp() return n",
            "nodeId", swarmNodeId, "props", n).forEach(actual -> {
                removeDockerLabels("DockerHost", "swarmNodeId", swarmNodeId, n, actual);
                updateTs.set(Math.min(updateTs.get(), actual.path("updateTs").asLong(Long.MAX_VALUE)));
            });/*from   ww w  . j  a  v a  2s  .  co m*/

    logger.info("connecting swarm={} to node={}", swarmClusterId, swarmNodeId);
    dockerScanner.getNeoRxClient().execCypher(
            "match (s:DockerSwarm {swarmClusterId:{swarmClusterId}}), (n:DockerHost {swarmNodeId:{nodeId}}) merge (s)-[x:CONTAINS]->(n) set x.updateTs=timestamp()",
            "swarmClusterId", swarmClusterId, "nodeId", swarmNodeId);
    return updateTs.get();

}

From source file:org.apache.activemq.usecases.NetworkBridgeProducerFlowControlTest.java

public void doTestSendFailIfNoSpaceDoesNotBlockNetwork(ActiveMQDestination slowDestination,
        ActiveMQDestination fastDestination) throws Exception {

    final int NUM_MESSAGES = 100;
    final long TEST_MESSAGE_SIZE = 1024;
    final long SLOW_CONSUMER_DELAY_MILLIS = 100;

    // Start a local and a remote broker.
    createBroker(new URI("broker:(tcp://localhost:0" + ")?brokerName=broker0&persistent=false&useJmx=true"));
    BrokerService remoteBroker = createBroker(
            new URI("broker:(tcp://localhost:0" + ")?brokerName=broker1&persistent=false&useJmx=true"));
    remoteBroker.getSystemUsage().setSendFailIfNoSpace(true);

    // Set a policy on the remote broker that limits the maximum size of the
    // slow shared queue.
    PolicyEntry policyEntry = new PolicyEntry();
    policyEntry.setMemoryLimit(5 * TEST_MESSAGE_SIZE);
    PolicyMap policyMap = new PolicyMap();
    policyMap.put(slowDestination, policyEntry);
    remoteBroker.setDestinationPolicy(policyMap);

    // Create an outbound bridge from the local broker to the remote broker.
    // The bridge is configured with the remoteDispatchType enhancement.
    NetworkConnector nc = bridgeBrokers("broker0", "broker1");
    nc.setAlwaysSyncSend(true);//from  ww  w.  j a v a 2  s. c o m
    nc.setPrefetchSize(1);

    startAllBrokers();
    waitForBridgeFormation();

    // Start two asynchronous consumers on the remote broker, one for each
    // of the two shared queues, and keep track of how long it takes for
    // each of the consumers to receive all the messages.
    final CountDownLatch fastConsumerLatch = new CountDownLatch(NUM_MESSAGES);
    final CountDownLatch slowConsumerLatch = new CountDownLatch(NUM_MESSAGES);

    final long startTimeMillis = System.currentTimeMillis();
    final AtomicLong fastConsumerTime = new AtomicLong();
    final AtomicLong slowConsumerTime = new AtomicLong();

    Thread fastWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                fastConsumerLatch.await();
                fastConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    Thread slowWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                slowConsumerLatch.await();
                slowConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    fastWaitThread.start();
    slowWaitThread.start();

    createConsumer("broker1", fastDestination, fastConsumerLatch);
    MessageConsumer slowConsumer = createConsumer("broker1", slowDestination, slowConsumerLatch);
    MessageIdList messageIdList = brokers.get("broker1").consumers.get(slowConsumer);
    messageIdList.setProcessingDelay(SLOW_CONSUMER_DELAY_MILLIS);

    // Send the test messages to the local broker's shared queues. The
    // messages are either persistent or non-persistent to demonstrate the
    // difference between synchronous and asynchronous dispatch.
    persistentDelivery = false;
    sendMessages("broker0", fastDestination, NUM_MESSAGES);
    sendMessages("broker0", slowDestination, NUM_MESSAGES);

    fastWaitThread.join(TimeUnit.SECONDS.toMillis(60));
    slowWaitThread.join(TimeUnit.SECONDS.toMillis(60));

    assertTrue("no exceptions on the wait threads:" + exceptions, exceptions.isEmpty());

    LOG.info("Fast consumer duration (ms): " + fastConsumerTime.get());
    LOG.info("Slow consumer duration (ms): " + slowConsumerTime.get());

    assertTrue("fast time set", fastConsumerTime.get() > 0);
    assertTrue("slow time set", slowConsumerTime.get() > 0);

    // Verify the behaviour as described in the description of this class.
    Assert.assertTrue(fastConsumerTime.get() < slowConsumerTime.get() / 10);
}

From source file:org.apache.activemq.usecases.NetworkBridgeProducerFlowControlTest.java

public void testSendFailIfNoSpaceReverseDoesNotBlockQueueNetwork() throws Exception {
    final int NUM_MESSAGES = 100;
    final long TEST_MESSAGE_SIZE = 1024;
    final long SLOW_CONSUMER_DELAY_MILLIS = 100;

    final ActiveMQQueue slowDestination = new ActiveMQQueue(
            NetworkBridgeProducerFlowControlTest.class.getSimpleName()
                    + ".slow.shared?consumer.prefetchSize=1");

    final ActiveMQQueue fastDestination = new ActiveMQQueue(
            NetworkBridgeProducerFlowControlTest.class.getSimpleName()
                    + ".fast.shared?consumer.prefetchSize=1");

    // Start a local and a remote broker.
    BrokerService localBroker = createBroker(
            new URI("broker:(tcp://localhost:0" + ")?brokerName=broker0&persistent=false&useJmx=true"));
    createBroker(new URI("broker:(tcp://localhost:0" + ")?brokerName=broker1&persistent=false&useJmx=true"));
    localBroker.getSystemUsage().setSendFailIfNoSpace(true);

    // Set a policy on the local broker that limits the maximum size of the
    // slow shared queue.
    PolicyEntry policyEntry = new PolicyEntry();
    policyEntry.setMemoryLimit(5 * TEST_MESSAGE_SIZE);
    PolicyMap policyMap = new PolicyMap();
    policyMap.put(slowDestination, policyEntry);
    localBroker.setDestinationPolicy(policyMap);

    // Create an outbound bridge from the local broker to the remote broker.
    // The bridge is configured with the remoteDispatchType enhancement.
    NetworkConnector nc = bridgeBrokers("broker0", "broker1");
    nc.setAlwaysSyncSend(true);//  w  ww . ja  va2 s .  c om
    nc.setPrefetchSize(1);
    nc.setDuplex(true);

    startAllBrokers();
    waitForBridgeFormation();

    // Start two asynchronous consumers on the local broker, one for each
    // of the two shared queues, and keep track of how long it takes for
    // each of the consumers to receive all the messages.
    final CountDownLatch fastConsumerLatch = new CountDownLatch(NUM_MESSAGES);
    final CountDownLatch slowConsumerLatch = new CountDownLatch(NUM_MESSAGES);

    final long startTimeMillis = System.currentTimeMillis();
    final AtomicLong fastConsumerTime = new AtomicLong();
    final AtomicLong slowConsumerTime = new AtomicLong();

    Thread fastWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                fastConsumerLatch.await();
                fastConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    Thread slowWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                slowConsumerLatch.await();
                slowConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    fastWaitThread.start();
    slowWaitThread.start();

    createConsumer("broker0", fastDestination, fastConsumerLatch);
    MessageConsumer slowConsumer = createConsumer("broker0", slowDestination, slowConsumerLatch);
    MessageIdList messageIdList = brokers.get("broker0").consumers.get(slowConsumer);
    messageIdList.setProcessingDelay(SLOW_CONSUMER_DELAY_MILLIS);

    // Send the test messages to the local broker's shared queues. The
    // messages are either persistent or non-persistent to demonstrate the
    // difference between synchronous and asynchronous dispatch.
    persistentDelivery = false;
    sendMessages("broker1", fastDestination, NUM_MESSAGES);
    sendMessages("broker1", slowDestination, NUM_MESSAGES);

    fastWaitThread.join(TimeUnit.SECONDS.toMillis(60));
    slowWaitThread.join(TimeUnit.SECONDS.toMillis(60));

    assertTrue("no exceptions on the wait threads:" + exceptions, exceptions.isEmpty());

    LOG.info("Fast consumer duration (ms): " + fastConsumerTime.get());
    LOG.info("Slow consumer duration (ms): " + slowConsumerTime.get());

    assertTrue("fast time set", fastConsumerTime.get() > 0);
    assertTrue("slow time set", slowConsumerTime.get() > 0);

    // Verify the behaviour as described in the description of this class.
    Assert.assertTrue(fastConsumerTime.get() < slowConsumerTime.get() / 10);
}

From source file:org.apache.activemq.usecases.NetworkBridgeProducerFlowControlTest.java

/**
 * This test is parameterized by {@link #persistentTestMessages}, which
 * determines whether the producer on broker0 sends persistent or
 * non-persistent messages, and {@link #networkIsAlwaysSendSync}, which
 * determines how the bridge will forward both persistent and non-persistent
 * messages to broker1.//from   w w  w  .ja v a  2  s.c o m
 *
 * @see #initCombosForTestFastAndSlowRemoteConsumers()
 */
public void testFastAndSlowRemoteConsumers() throws Exception {
    final int NUM_MESSAGES = 100;
    final long TEST_MESSAGE_SIZE = 1024;
    final long SLOW_CONSUMER_DELAY_MILLIS = 100;

    // Consumer prefetch is disabled for broker1's consumers.
    final ActiveMQQueue SLOW_SHARED_QUEUE = new ActiveMQQueue(
            NetworkBridgeProducerFlowControlTest.class.getSimpleName()
                    + ".slow.shared?consumer.prefetchSize=1");

    final ActiveMQQueue FAST_SHARED_QUEUE = new ActiveMQQueue(
            NetworkBridgeProducerFlowControlTest.class.getSimpleName()
                    + ".fast.shared?consumer.prefetchSize=1");

    // Start a local and a remote broker.
    createBroker(new URI("broker:(tcp://localhost:0" + ")?brokerName=broker0&persistent=false&useJmx=true"));
    BrokerService remoteBroker = createBroker(
            new URI("broker:(tcp://localhost:0" + ")?brokerName=broker1&persistent=false&useJmx=true"));

    // Set a policy on the remote broker that limits the maximum size of the
    // slow shared queue.
    PolicyEntry policyEntry = new PolicyEntry();
    policyEntry.setMemoryLimit(5 * TEST_MESSAGE_SIZE);
    PolicyMap policyMap = new PolicyMap();
    policyMap.put(SLOW_SHARED_QUEUE, policyEntry);
    remoteBroker.setDestinationPolicy(policyMap);

    // Create an outbound bridge from the local broker to the remote broker.
    // The bridge is configured with the remoteDispatchType enhancement.
    NetworkConnector nc = bridgeBrokers("broker0", "broker1");
    nc.setAlwaysSyncSend(networkIsAlwaysSendSync);
    nc.setPrefetchSize(1);

    startAllBrokers();
    waitForBridgeFormation();

    // Send the test messages to the local broker's shared queues. The
    // messages are either persistent or non-persistent to demonstrate the
    // difference between synchronous and asynchronous dispatch.
    persistentDelivery = persistentTestMessages;
    sendMessages("broker0", FAST_SHARED_QUEUE, NUM_MESSAGES);
    sendMessages("broker0", SLOW_SHARED_QUEUE, NUM_MESSAGES);

    // Start two asynchronous consumers on the remote broker, one for each
    // of the two shared queues, and keep track of how long it takes for
    // each of the consumers to receive all the messages.
    final CountDownLatch fastConsumerLatch = new CountDownLatch(NUM_MESSAGES);
    final CountDownLatch slowConsumerLatch = new CountDownLatch(NUM_MESSAGES);

    final long startTimeMillis = System.currentTimeMillis();
    final AtomicLong fastConsumerTime = new AtomicLong();
    final AtomicLong slowConsumerTime = new AtomicLong();

    Thread fastWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                fastConsumerLatch.await();
                fastConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    Thread slowWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                slowConsumerLatch.await();
                slowConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    fastWaitThread.start();
    slowWaitThread.start();

    createConsumer("broker1", FAST_SHARED_QUEUE, fastConsumerLatch);
    MessageConsumer slowConsumer = createConsumer("broker1", SLOW_SHARED_QUEUE, slowConsumerLatch);
    MessageIdList messageIdList = brokers.get("broker1").consumers.get(slowConsumer);
    messageIdList.setProcessingDelay(SLOW_CONSUMER_DELAY_MILLIS);

    fastWaitThread.join();
    slowWaitThread.join();

    assertTrue("no exceptions on the wait threads:" + exceptions, exceptions.isEmpty());

    LOG.info("Fast consumer duration (ms): " + fastConsumerTime.get());
    LOG.info("Slow consumer duration (ms): " + slowConsumerTime.get());

    // Verify the behaviour as described in the description of this class.
    if (networkIsAlwaysSendSync) {
        Assert.assertTrue(fastConsumerTime.get() < slowConsumerTime.get() / 20);

    } else {
        Assert.assertEquals(persistentTestMessages, fastConsumerTime.get() < slowConsumerTime.get() / 10);
    }
}

From source file:org.apache.nifi.processors.standard.AbstractQueryDatabaseTable.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory)
        throws ProcessException {
    // Fetch the column/table info once
    if (!setupComplete.get()) {
        super.setup(context);
    }//from  w  ww.j  a v a  2  s.  co  m
    ProcessSession session = sessionFactory.createSession();
    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();

    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue());
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue();
    final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue();
    final String sqlQuery = context.getProperty(SQL_QUERY).evaluateAttributeExpressions().getValue();
    final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES)
            .evaluateAttributeExpressions().getValue();
    final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions()
            .getValue();
    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions()
            .asInteger();
    final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField;
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger()
            : 0;

    SqlWriter sqlWriter = configureSqlWriter(session, context);

    final StateManager stateManager = context.getStateManager();
    final StateMap stateMap;

    try {
        stateMap = stateManager.getState(Scope.CLUSTER);
    } catch (final IOException ioe) {
        getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform "
                + "query until this is accomplished.", ioe);
        context.yield();
        return;
    }
    // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually
    // set as the current state map (after the session has been committed)
    final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap());

    //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map
    for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) {
        String maxPropKey = maxProp.getKey().toLowerCase();
        String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey, dbAdapter);
        if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) {
            String newMaxPropValue;
            // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme)
            // the value has been stored under a key that is only the column name. Fall back to check the column name,
            // but store the new initial max value under the fully-qualified key.
            if (statePropertyMap.containsKey(maxPropKey)) {
                newMaxPropValue = statePropertyMap.get(maxPropKey);
            } else {
                newMaxPropValue = maxProp.getValue();
            }
            statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue);

        }
    }

    List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null
            : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*"));
    final String selectQuery = getQuery(dbAdapter, tableName, sqlQuery, columnNames, maxValueColumnNameList,
            customWhereClause, statePropertyMap);
    final StopWatch stopWatch = new StopWatch(true);
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService.getConnection(Collections.emptyMap());
            final Statement st = con.createStatement()) {

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        String jdbcURL = "DBCPService";
        try {
            DatabaseMetaData databaseMetaData = con.getMetaData();
            if (databaseMetaData != null) {
                jdbcURL = databaseMetaData.getURL();
            }
        } catch (SQLException se) {
            // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly
        }

        final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions()
                .asTimePeriod(TimeUnit.SECONDS).intValue();
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        if (logger.isDebugEnabled()) {
            logger.debug("Executing query {}", new Object[] { selectQuery });
        }
        try (final ResultSet resultSet = st.executeQuery(selectQuery)) {
            int fragmentIndex = 0;
            // Max values will be updated in the state property map by the callback
            final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(tableName,
                    statePropertyMap, dbAdapter);

            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                FlowFile fileToProcess = session.create();
                try {
                    fileToProcess = session.write(fileToProcess, out -> {
                        try {
                            nrOfRows.set(
                                    sqlWriter.writeResultSet(resultSet, out, getLogger(), maxValCollector));
                        } catch (Exception e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(fileToProcess);
                    throw e;
                }

                if (nrOfRows.get() > 0) {
                    // set attributes
                    final Map<String, String> attributesToAdd = new HashMap<>();
                    attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
                    attributesToAdd.put(RESULT_TABLENAME, tableName);

                    if (maxRowsPerFlowFile > 0) {
                        attributesToAdd.put(FRAGMENT_ID, fragmentIdentifier);
                        attributesToAdd.put(FRAGMENT_INDEX, String.valueOf(fragmentIndex));
                    }

                    attributesToAdd.putAll(sqlWriter.getAttributesToAdd());
                    fileToProcess = session.putAllAttributes(fileToProcess, attributesToAdd);
                    sqlWriter.updateCounters(session);

                    logger.info("{} contains {} records; transferring to 'success'",
                            new Object[] { fileToProcess, nrOfRows.get() });

                    session.getProvenanceReporter().receive(fileToProcess, jdbcURL,
                            stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    resultSetFlowFiles.add(fileToProcess);
                    // If we've reached the batch size, send out the flow files
                    if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) {
                        session.transfer(resultSetFlowFiles, REL_SUCCESS);
                        session.commit();
                        resultSetFlowFiles.clear();
                    }
                } else {
                    // If there were no rows returned, don't send the flowfile
                    session.remove(fileToProcess);
                    // If no rows and this was first FlowFile, yield
                    if (fragmentIndex == 0) {
                        context.yield();
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }

                // If we aren't splitting up the data into flow files or fragments, then the result set has been entirely fetched so don't loop back around
                if (maxFragments == 0 && maxRowsPerFlowFile == 0) {
                    break;
                }

                // If we are splitting up the data into flow files, don't loop back around if we've gotten all results
                if (maxRowsPerFlowFile > 0 && nrOfRows.get() < maxRowsPerFlowFile) {
                    break;
                }
            }

            // Apply state changes from the Max Value tracker
            maxValCollector.applyStateChanges();

            // Even though the maximum value and total count are known at this point, to maintain consistent behavior if Output Batch Size is set, do not store the attributes
            if (outputBatchSize == 0) {
                for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                    // Add maximum values as attributes
                    for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) {
                        // Get just the column name from the key
                        String key = entry.getKey();
                        String colName = key
                                .substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length());
                        resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                "maxvalue." + colName, entry.getValue()));
                    }

                    //set count on all FlowFiles
                    if (maxRowsPerFlowFile > 0) {
                        resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                FRAGMENT_COUNT, Integer.toString(fragmentIndex)));
                    }
                }
            }
        } catch (final SQLException e) {
            throw e;
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);

    } catch (final ProcessException | SQLException e) {
        logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e });
        if (!resultSetFlowFiles.isEmpty()) {
            session.remove(resultSetFlowFiles);
        }
        context.yield();
    } finally {
        session.commit();
        try {
            // Update the state
            stateManager.setState(statePropertyMap, Scope.CLUSTER);
        } catch (IOException ioe) {
            getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded",
                    new Object[] { this, ioe });
        }
    }
}

From source file:org.lendingclub.mercator.aws.ELBScanner.java

protected void mapElbToInstance(JsonNode instances, String elbArn, String region) {

    AtomicLong oldestRelationshipTs = new AtomicLong(Long.MAX_VALUE);
    for (JsonNode i : instances) {

        String instanceName = i.path("instanceId").asText();
        String instanceArn = String.format("arn:aws:ec2:%s:%s:instance/%s", region, getAccountId(),
                instanceName);//w w  w  . jav a 2  s . co m
        // logger.info("{} instanceArn: {}",elbArn,instanceArn);
        String cypher = "match (x:AwsElb {aws_arn:{elbArn}}), (y:AwsEc2Instance {aws_arn:{instanceArn}}) "
                + "merge (x)-[r:DISTRIBUTES_TRAFFIC_TO]->(y) set r.updateTs=timestamp() return x,r,y";
        getNeoRxClient().execCypher(cypher, "elbArn", elbArn, "instanceArn", instanceArn).forEach(r -> {
            oldestRelationshipTs
                    .set(Math.min(r.path("r").path("updateTs").asLong(), oldestRelationshipTs.get()));
        });

        if (oldestRelationshipTs.get() > 0 && oldestRelationshipTs.get() < Long.MAX_VALUE) {
            cypher = "match (x:AwsElb {aws_arn:{elbArn}})-[r:DISTRIBUTES_TRAFFIC_TO]-(y:AwsEc2Instance) where r.updateTs<{oldest}  delete r";
            getNeoRxClient().execCypher(cypher, "elbArn", elbArn, "oldest", oldestRelationshipTs.get());
        }
    }
}

From source file:org.apache.hadoop.hdfs.TestAutoEditRollWhenAvatarFailover.java

/**
 * Test if we can get block locations after killing primary avatar,
 * failing over to standby avatar (making it the new primary),
 * restarting a new standby avatar, killing the new primary avatar and
 * failing over to the restarted standby.
 * /*from  www.  j a v  a 2 s  .c o  m*/
 * Write logs for a while to make sure automatic rolling are triggered.
 */
@Test
public void testDoubleFailOverWithAutomaticRoll() throws Exception {
    setUp(false, "testDoubleFailOverWithAutomaticRoll");

    // To make sure it's never the case that both primary and standby
    // issue rolling, we use a injection handler. 
    final AtomicBoolean startKeepThread = new AtomicBoolean(true);
    final AtomicInteger countAutoRolled = new AtomicInteger(0);
    final AtomicBoolean needFail = new AtomicBoolean(false);
    final AtomicLong currentThreadId = new AtomicLong(-1);
    final Object waitFor10Rolls = new Object();
    InjectionHandler.set(new InjectionHandler() {
        @Override
        protected void _processEvent(InjectionEventI event, Object... args) {
            if (event == InjectionEvent.FSEDIT_AFTER_AUTOMATIC_ROLL) {
                countAutoRolled.incrementAndGet();
                if (countAutoRolled.get() >= 10) {
                    synchronized (waitFor10Rolls) {
                        waitFor10Rolls.notifyAll();
                    }
                }

                if (!startKeepThread.get()) {
                    currentThreadId.set(-1);
                } else if (currentThreadId.get() == -1) {
                    currentThreadId.set(Thread.currentThread().getId());
                } else if (currentThreadId.get() != Thread.currentThread().getId()) {
                    LOG.warn("[Thread " + Thread.currentThread().getId() + "] expected: " + currentThreadId);
                    needFail.set(true);
                }

                LOG.info("[Thread " + Thread.currentThread().getId() + "] finish automatic log rolling, count "
                        + countAutoRolled.get());

                // Increase the rolling time a little bit once after 7 auto rolls 
                if (countAutoRolled.get() % 7 == 3) {
                    DFSTestUtil.waitNMilliSecond(75);
                }
            }
        }
    });

    FileSystem fs = cluster.getFileSystem();

    // Add some transactions during a period of time before failing over.
    long startTime = System.currentTimeMillis();
    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(100);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() >= 10) {
            LOG.info("Automatic rolled 10 times.");
            long duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 4500);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    // Tune the rolling timeout temporarily to avoid race conditions
    // only triggered in tests
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);

    LOG.info("================== killing primary 1");

    cluster.killPrimary();

    // Fail over and make sure after fail over, automatic edits roll still
    // will happen.
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);
    LOG.info("================== failing over 1");
    cluster.failOver();
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== restarting standby");
    cluster.restartStandby();
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== Finish restarting standby");

    // Wait for automatic rolling happens if there is no new transaction.
    startKeepThread.set(true);

    startTime = System.currentTimeMillis();
    long waitDeadLine = startTime + 20000;
    synchronized (waitFor10Rolls) {
        while (System.currentTimeMillis() < waitDeadLine && countAutoRolled.get() < 10) {
            waitFor10Rolls.wait(waitDeadLine - System.currentTimeMillis());
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);
    long duration = System.currentTimeMillis() - startTime;
    TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs", duration > 9000);

    // failover back 
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);

    LOG.info("================== killing primary 2");
    cluster.killPrimary();
    LOG.info("================== failing over 2");
    cluster.failOver();

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);

    // Make sure after failover back, automatic rolling can still happen.
    startKeepThread.set(true);

    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(200);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() > 10) {
            LOG.info("Automatic rolled 10 times.");
            duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 9000);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    InjectionHandler.clear();

    if (needFail.get()) {
        TestCase.fail("Automatic rolling doesn't happen in the same thread when should.");
    }
}

From source file:io.hummer.util.ws.WebServiceClient.java

private void pauseToAvoidSpamming() throws Exception {
    long minIntervalMS = 5000;
    long otherwiseSleepMS = 1500;
    long maxStoredHosts = 20;

    String host = new URL(this.endpointURL).getHost();
    synchronized (lastRequestedHosts) {
        if (!lastRequestedHosts.containsKey(host)) {
            lastRequestedHosts.put(host, new AtomicLong(System.currentTimeMillis()));
            return;
        }//w  w  w .j a v a2  s  .com
    }
    AtomicLong time = lastRequestedHosts.get(host);
    synchronized (time) {
        if ((System.currentTimeMillis() - time.get()) < minIntervalMS) {
            logger.info("Sleeping some time to avoid spamming host '" + host + "'");
            Thread.sleep(otherwiseSleepMS);
            time.set(System.currentTimeMillis());
        }
    }
    if (lastRequestedHosts.size() > maxStoredHosts) {
        new CollectionsUtil().removeKeyWithSmallestValue(lastRequestedHosts);
    }
}

From source file:org.lol.reddit.reddit.api.RedditAPIIndividualSubredditDataRequester.java

public void performRequest(final Collection<String> subredditCanonicalIds, final TimestampBound timestampBound,
        final RequestResponseHandler<HashMap<String, RedditSubreddit>, SubredditRequestFailure> handler) {

    // TODO if there's a bulk API to do this, that would be good... :)

    final HashMap<String, RedditSubreddit> result = new HashMap<String, RedditSubreddit>();
    final AtomicBoolean stillOkay = new AtomicBoolean(true);
    final AtomicInteger requestsToGo = new AtomicInteger(subredditCanonicalIds.size());
    final AtomicLong oldestResult = new AtomicLong(Long.MAX_VALUE);

    final RequestResponseHandler<RedditSubreddit, SubredditRequestFailure> innerHandler = new RequestResponseHandler<RedditSubreddit, SubredditRequestFailure>() {
        @Override//from  w ww.  j ava 2s. co m
        public void onRequestFailed(SubredditRequestFailure failureReason) {
            synchronized (result) {
                if (stillOkay.get()) {
                    stillOkay.set(false);
                    handler.onRequestFailed(failureReason);
                }
            }
        }

        @Override
        public void onRequestSuccess(RedditSubreddit innerResult, long timeCached) {
            synchronized (result) {
                if (stillOkay.get()) {

                    result.put(innerResult.getKey(), innerResult);
                    oldestResult.set(Math.min(oldestResult.get(), timeCached));

                    if (requestsToGo.decrementAndGet() == 0) {
                        handler.onRequestSuccess(result, oldestResult.get());
                    }
                }
            }
        }
    };

    for (String subredditCanonicalId : subredditCanonicalIds) {
        performRequest(subredditCanonicalId, timestampBound, innerHandler);
    }
}

From source file:net.dv8tion.jda.core.utils.PermissionUtil.java

/**
 * Pushes all deny/allow values to the specified BiConsumer
 * <br>First parameter is allow, second is deny
 *//*from w  w  w. jav a  2  s  .c  o  m*/
private static void getExplicitOverrides(Channel channel, Member member, AtomicLong allow, AtomicLong deny) {
    PermissionOverride override = channel.getPermissionOverride(member.getGuild().getPublicRole());
    long allowRaw = 0;
    long denyRaw = 0;
    if (override != null) {
        denyRaw = override.getDeniedRaw();
        allowRaw = override.getAllowedRaw();
    }

    long allowRole = 0;
    long denyRole = 0;
    // create temporary bit containers for role cascade
    for (Role role : member.getRoles()) {
        override = channel.getPermissionOverride(role);
        if (override != null) {
            // important to update role cascade not others
            denyRole |= override.getDeniedRaw();
            allowRole |= override.getAllowedRaw();
        }
    }
    // Override the raw values of public role then apply role cascade
    allowRaw = (allowRaw & ~denyRole) | allowRole;
    denyRaw = (denyRaw & ~allowRole) | denyRole;

    override = channel.getPermissionOverride(member);
    if (override != null) {
        // finally override the role cascade with member overrides
        final long oDeny = override.getDeniedRaw();
        final long oAllow = override.getAllowedRaw();
        allowRaw = (allowRaw & ~oDeny) | oAllow;
        denyRaw = (denyRaw & ~oAllow) | oDeny;
        // this time we need to exclude new allowed bits from old denied ones and OR the new denied bits as final overrides
    }
    // set as resulting values
    allow.set(allowRaw);
    deny.set(denyRaw);
}