Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * Tests for hbase-2727./*  w ww . j av  a2  s  .  c  om*/
 * @throws Exception
 * @see https://issues.apache.org/jira/browse/HBASE-2727
 */
@Test
public void test2727() throws Exception {
    // Test being able to have > 1 set of edits in the recovered.edits directory.
    // Ensure edits are replayed properly.
    final TableName tableName = TableName.valueOf("test2727");
    HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region2);
    final byte[] rowName = tableName.getName();

    HLog wal1 = createWAL(this.conf);
    // Add 1k to each family.
    final int countPerFamily = 1000;
    final AtomicLong sequenceId = new AtomicLong(1);
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal1, htd, sequenceId);
    }
    wal1.close();
    runWALSplit(this.conf);

    HLog wal2 = createWAL(this.conf);
    // Add 1k to each family.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal2, htd, sequenceId);
    }
    wal2.close();
    runWALSplit(this.conf);

    HLog wal3 = createWAL(this.conf);
    try {
        HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal3);
        long seqid = region.getOpenSeqNum();
        // The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1.
        // When opened, this region would apply 6k edits, and increment the sequenceId by 1
        assertTrue(seqid > sequenceId.get());
        assertEquals(seqid - 1, sequenceId.get());
        LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: " + sequenceId.get());

        // TODO: Scan all.
        region.close();
    } finally {
        wal3.closeAndDelete();
    }
}

From source file:org.apache.nifi.processors.cassandra.QueryCassandra.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;/*from w  w  w . j  ava2  s  . c om*/
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final String selectQuery = context.getProperty(CQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
            .getValue();
    final long queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS);
    final String outputFormat = context.getProperty(OUTPUT_FORMAT).getValue();
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());
    final StopWatch stopWatch = new StopWatch(true);

    if (fileToProcess == null) {
        fileToProcess = session.create();
    }

    try {
        // The documentation for the driver recommends the session remain open the entire time the processor is running
        // and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
        final Session connectionSession = cassandraSession.get();
        final ResultSetFuture queryFuture = connectionSession.executeAsync(selectQuery);
        final AtomicLong nrOfRows = new AtomicLong(0L);

        fileToProcess = session.write(fileToProcess, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                try {
                    logger.debug("Executing CQL query {}", new Object[] { selectQuery });
                    final ResultSet resultSet;
                    if (queryTimeout > 0) {
                        resultSet = queryFuture.getUninterruptibly(queryTimeout, TimeUnit.MILLISECONDS);
                        if (AVRO_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(
                                    convertToAvroStream(resultSet, out, queryTimeout, TimeUnit.MILLISECONDS));
                        } else if (JSON_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(convertToJsonStream(resultSet, out, charset, queryTimeout,
                                    TimeUnit.MILLISECONDS));
                        }
                    } else {
                        resultSet = queryFuture.getUninterruptibly();
                        if (AVRO_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(convertToAvroStream(resultSet, out, 0, null));
                        } else if (JSON_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(convertToJsonStream(resultSet, out, charset, 0, null));
                        }
                    }

                } catch (final TimeoutException | InterruptedException | ExecutionException e) {
                    throw new ProcessException(e);
                }
            }
        });

        // set attribute how many rows were selected
        fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { fileToProcess, nrOfRows.get() });
        session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(fileToProcess, REL_SUCCESS);

    } catch (final NoHostAvailableException nhae) {
        getLogger().error(
                "No host in the Cassandra cluster can be contacted successfully to execute this query", nhae);
        // Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
        // a thousand error messages would be logged. However we would like information from Cassandra itself, so
        // cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
        // logger message above).
        getLogger().error(nhae.getCustomMessage(10, true, false));
        fileToProcess = session.penalize(fileToProcess);
        session.transfer(fileToProcess, REL_RETRY);

    } catch (final QueryExecutionException qee) {
        logger.error("Cannot execute the query with the requested consistency level successfully", qee);
        fileToProcess = session.penalize(fileToProcess);
        session.transfer(fileToProcess, REL_RETRY);

    } catch (final QueryValidationException qve) {
        if (context.hasIncomingConnection()) {
            logger.error(
                    "The CQL query {} is invalid due to syntax error, authorization issue, or another "
                            + "validation problem; routing {} to failure",
                    new Object[] { selectQuery, fileToProcess }, qve);
            fileToProcess = session.penalize(fileToProcess);
            session.transfer(fileToProcess, REL_FAILURE);
        } else {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("The CQL query {} is invalid due to syntax error, authorization issue, or another "
                    + "validation problem", new Object[] { selectQuery }, qve);
            session.remove(fileToProcess);
            context.yield();
        }
    } catch (final ProcessException e) {
        if (context.hasIncomingConnection()) {
            logger.error("Unable to execute CQL select query {} for {} due to {}; routing to failure",
                    new Object[] { selectQuery, fileToProcess, e });
            fileToProcess = session.penalize(fileToProcess);
            session.transfer(fileToProcess, REL_FAILURE);
        } else {
            logger.error("Unable to execute CQL select query {} due to {}", new Object[] { selectQuery, e });
            session.remove(fileToProcess);
            context.yield();
        }
    }
}

From source file:org.apache.nifi.processors.standard.AbstractExecuteSQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;/*from w w w. ja v  a2  s. com*/
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions()
            .asInteger();
    final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField;

    SqlWriter sqlWriter = configureSqlWriter(session, context, fileToProcess);

    final String selectQuery;
    if (context.getProperty(SQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset())));
        selectQuery = queryContents.toString();
    }

    int resultCount = 0;
    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final PreparedStatement st = con.prepareStatement(selectQuery)) {
        st.setQueryTimeout(queryTimeout); // timeout in seconds

        if (fileToProcess != null) {
            JdbcCommon.setParameters(st, fileToProcess.getAttributes());
        }
        logger.debug("Executing query {}", new Object[] { selectQuery });

        int fragmentIndex = 0;
        final String fragmentId = UUID.randomUUID().toString();

        final StopWatch executionTime = new StopWatch(true);

        boolean hasResults = st.execute();

        long executionTimeElapsed = executionTime.getElapsed(TimeUnit.MILLISECONDS);

        boolean hasUpdateCount = st.getUpdateCount() != -1;

        while (hasResults || hasUpdateCount) {
            //getMoreResults() and execute() return false to indicate that the result of the statement is just a number and not a ResultSet
            if (hasResults) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                try {
                    final ResultSet resultSet = st.getResultSet();
                    do {
                        final StopWatch fetchTime = new StopWatch(true);

                        FlowFile resultSetFF;
                        if (fileToProcess == null) {
                            resultSetFF = session.create();
                        } else {
                            resultSetFF = session.create(fileToProcess);
                            resultSetFF = session.putAllAttributes(resultSetFF, fileToProcess.getAttributes());
                        }

                        try {
                            resultSetFF = session.write(resultSetFF, out -> {
                                try {
                                    nrOfRows.set(sqlWriter.writeResultSet(resultSet, out, getLogger(), null));
                                } catch (Exception e) {
                                    throw (e instanceof ProcessException) ? (ProcessException) e
                                            : new ProcessException(e);
                                }
                            });

                            long fetchTimeElapsed = fetchTime.getElapsed(TimeUnit.MILLISECONDS);

                            // set attributes
                            final Map<String, String> attributesToAdd = new HashMap<>();
                            attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
                            attributesToAdd.put(RESULT_QUERY_DURATION,
                                    String.valueOf(executionTimeElapsed + fetchTimeElapsed));
                            attributesToAdd.put(RESULT_QUERY_EXECUTION_TIME,
                                    String.valueOf(executionTimeElapsed));
                            attributesToAdd.put(RESULT_QUERY_FETCH_TIME, String.valueOf(fetchTimeElapsed));
                            attributesToAdd.put(RESULTSET_INDEX, String.valueOf(resultCount));
                            attributesToAdd.putAll(sqlWriter.getAttributesToAdd());
                            resultSetFF = session.putAllAttributes(resultSetFF, attributesToAdd);
                            sqlWriter.updateCounters(session);

                            // if fragmented ResultSet, determine if we should keep this fragment; set fragment attributes
                            if (maxRowsPerFlowFile > 0) {
                                // if row count is zero and this is not the first fragment, drop it instead of committing it.
                                if (nrOfRows.get() == 0 && fragmentIndex > 0) {
                                    session.remove(resultSetFF);
                                    break;
                                }

                                resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_ID, fragmentId);
                                resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_INDEX,
                                        String.valueOf(fragmentIndex));
                            }

                            logger.info("{} contains {} records; transferring to 'success'",
                                    new Object[] { resultSetFF, nrOfRows.get() });
                            // Report a FETCH event if there was an incoming flow file, or a RECEIVE event otherwise
                            if (context.hasIncomingConnection()) {
                                session.getProvenanceReporter().fetch(resultSetFF,
                                        "Retrieved " + nrOfRows.get() + " rows",
                                        executionTimeElapsed + fetchTimeElapsed);
                            } else {
                                session.getProvenanceReporter().receive(resultSetFF,
                                        "Retrieved " + nrOfRows.get() + " rows",
                                        executionTimeElapsed + fetchTimeElapsed);
                            }
                            resultSetFlowFiles.add(resultSetFF);

                            // If we've reached the batch size, send out the flow files
                            if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) {
                                session.transfer(resultSetFlowFiles, REL_SUCCESS);
                                session.commit();
                                resultSetFlowFiles.clear();
                            }

                            fragmentIndex++;
                        } catch (Exception e) {
                            // Remove the result set flow file and propagate the exception
                            session.remove(resultSetFF);
                            if (e instanceof ProcessException) {
                                throw (ProcessException) e;
                            } else {
                                throw new ProcessException(e);
                            }
                        }
                    } while (maxRowsPerFlowFile > 0 && nrOfRows.get() == maxRowsPerFlowFile);

                    // If we are splitting results but not outputting batches, set count on all FlowFiles
                    if (outputBatchSize == 0 && maxRowsPerFlowFile > 0) {
                        for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                            resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                    FRAGMENT_COUNT, Integer.toString(fragmentIndex)));
                        }
                    }
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }

                resultCount++;
            }

            // are there anymore result sets?
            try {
                hasResults = st.getMoreResults(Statement.CLOSE_CURRENT_RESULT);
                hasUpdateCount = st.getUpdateCount() != -1;
            } catch (SQLException ex) {
                hasResults = false;
                hasUpdateCount = false;
            }
        }

        // Transfer any remaining files to SUCCESS
        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        resultSetFlowFiles.clear();

        //If we had at least one result then it's OK to drop the original file, but if we had no results then
        //  pass the original flow file down the line to trigger downstream processors
        if (fileToProcess != null) {
            if (resultCount > 0) {
                session.remove(fileToProcess);
            } else {
                fileToProcess = session.write(fileToProcess,
                        out -> sqlWriter.writeEmptyResultSet(out, getLogger()));
                fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, "0");
                fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(),
                        sqlWriter.getMimeType());
                session.transfer(fileToProcess, REL_SUCCESS);
            }
        } else if (resultCount == 0) {
            //If we had no inbound FlowFile, no exceptions, and the SQL generated no result sets (Insert/Update/Delete statements only)
            // Then generate an empty Output FlowFile
            FlowFile resultSetFF = session.create();

            resultSetFF = session.write(resultSetFF, out -> sqlWriter.writeEmptyResultSet(out, getLogger()));
            resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, "0");
            resultSetFF = session.putAttribute(resultSetFF, CoreAttributes.MIME_TYPE.key(),
                    sqlWriter.getMimeType());
            session.transfer(resultSetFF, REL_SUCCESS);
        }
    } catch (final ProcessException | SQLException e) {
        //If we had at least one result then it's OK to drop the original file, but if we had no results then
        //  pass the original flow file down the line to trigger downstream processors
        if (fileToProcess == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, fileToProcess, e });
                fileToProcess = session.penalize(fileToProcess);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(fileToProcess, REL_FAILURE);
        }
    }
}

From source file:de.hybris.platform.jdbcwrapper.ConnectionPoolTest.java

@Test
public void testJndiDataSource() throws SQLException {
    TestUtils.disableFileAnalyzer("log error expected");
    final Collection<TestConnectionImpl> allConnections = new ConcurrentLinkedQueue<TestConnectionImpl>();
    final AtomicLong connectionCounter = new AtomicLong(0);
    final HybrisDataSource dataSource = createDataSource(Registry.getCurrentTenantNoFallback(), allConnections,
            connectionCounter, false, true);

    final Connection conn = dataSource.getConnection();
    conn.close();// ww w.ja  v a2 s . c  o  m

    // kill data source
    dataSource.destroy();
    assertTrue(dataSource.getConnectionPool().isPoolClosed());
    LOG.info("data source destroyed");
    TestUtils.enableFileAnalyzer();
}

From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java

@Test(timeout = 50_000)
public void testShutdown()
        throws NoSuchFieldException, IllegalAccessException, InterruptedException, ExecutionException {
    final CountDownLatch latch = new CountDownLatch(1);
    final ListenableFuture future;
    final AtomicLong runs = new AtomicLong(0);
    long prior = 0;
    try {/*from www. j  av  a2  s  .co  m*/

        final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(),
                new URIExtractionNamespace.ObjectMapperFlatDataParser(
                        URIExtractionNamespaceTest.registerTypes(new ObjectMapper())),
                new Period(1l), null);
        final String cacheId = UUID.randomUUID().toString();
        final Runnable runnable = manager.getPostRunnable(namespace, factory, cacheId);
        future = manager.schedule(namespace, factory, new Runnable() {
            @Override
            public void run() {
                runnable.run();
                latch.countDown();
                runs.incrementAndGet();
            }
        }, cacheId);

        latch.await();
        Assert.assertFalse(future.isCancelled());
        Assert.assertFalse(future.isDone());
        prior = runs.get();
        while (runs.get() <= prior) {
            Thread.sleep(50);
        }
        Assert.assertTrue(runs.get() > prior);
    } finally {
        lifecycle.stop();
    }
    manager.waitForServiceToEnd(1_000, TimeUnit.MILLISECONDS);

    prior = runs.get();
    Thread.sleep(50);
    Assert.assertEquals(prior, runs.get());

    Field execField = NamespaceExtractionCacheManager.class
            .getDeclaredField("listeningScheduledExecutorService");
    execField.setAccessible(true);
    Assert.assertTrue(((ListeningScheduledExecutorService) execField.get(manager)).isShutdown());
    Assert.assertTrue(((ListeningScheduledExecutorService) execField.get(manager)).isTerminated());
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scanServicesForSwarm(String swarmClusterId) {

    JsonNode response = getRestClient().getServices();

    AtomicLong earlistUpdate = new AtomicLong(Long.MAX_VALUE);
    AtomicBoolean error = new AtomicBoolean(false);
    response.forEach(it -> {/*from ww  w .j av  a  2 s  .  c o  m*/
        try {
            ObjectNode n = flattenService(it);
            n.put("swarmClusterId", swarmClusterId);
            dockerScanner.getNeoRxClient().execCypher(
                    "merge (x:DockerService {serviceId:{serviceId}}) set x+={props}, x.updateTs=timestamp() return x",
                    "serviceId", n.get("serviceId").asText(), "props", n).forEach(svc -> {
                        removeDockerLabels("DockerService", "serviceId", n.get("serviceId").asText(), n, svc);
                        earlistUpdate.set(
                                Math.min(earlistUpdate.get(), svc.path("updateTs").asLong(Long.MAX_VALUE)));
                    });
            dockerScanner.getNeoRxClient().execCypher(
                    "match (swarm:DockerSwarm {swarmClusterId:{swarmClusterId}}),(service:DockerService{serviceId:{serviceId}}) merge (swarm)-[x:CONTAINS]->(service) set x.updateTs=timestamp()",
                    "swarmClusterId", swarmClusterId, "serviceId", n.path("serviceId").asText());

        } catch (Exception e) {
            logger.warn("problem updating service", e);
            error.set(true);
        }
    });
    if (error.get() == false) {
        if (earlistUpdate.get() < System.currentTimeMillis()) {
            dockerScanner.getNeoRxClient().execCypher(
                    "match (x:DockerService) where x.swarmClusterId={swarmClusterId} and x.updateTs<{cutoff} detach delete x",
                    "cutoff", earlistUpdate.get(), "swarmClusterId", swarmClusterId);
        }
    }

}

From source file:org.apache.hadoop.hbase.wal.TestDefaultWALProvider.java

/**
 * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs
 * and also don't archive "live logs" (that is, a log with un-flushed entries).
 * <p>//from  w w w.jav a  2  s  . c o m
 * This is what it does:
 * It creates two regions, and does a series of inserts along with log rolling.
 * Whenever a WAL is rolled, HLogBase checks previous wals for archiving. A wal is eligible for
 * archiving if for all the regions which have entries in that wal file, have flushed - past
 * their maximum sequence id in that wal file.
 * <p>
 * @throws IOException
 */
@Test
public void testWALArchiving() throws IOException {
    LOG.debug("testWALArchiving");
    HTableDescriptor table1 = new HTableDescriptor(TableName.valueOf("t1"))
            .addFamily(new HColumnDescriptor("row"));
    HTableDescriptor table2 = new HTableDescriptor(TableName.valueOf("t2"))
            .addFamily(new HColumnDescriptor("row"));
    final Configuration localConf = new Configuration(conf);
    localConf.set(WALFactory.WAL_PROVIDER, DefaultWALProvider.class.getName());
    final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName());
    try {
        final WAL wal = wals.getWAL(UNSPECIFIED_REGION);
        assertEquals(0, DefaultWALProvider.getNumRolledLogFiles(wal));
        HRegionInfo hri1 = new HRegionInfo(table1.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        HRegionInfo hri2 = new HRegionInfo(table2.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        // ensure that we don't split the regions.
        hri1.setSplit(false);
        hri2.setSplit(false);
        // variables to mock region sequenceIds.
        final AtomicLong sequenceId1 = new AtomicLong(1);
        final AtomicLong sequenceId2 = new AtomicLong(1);
        // start with the testing logic: insert a waledit, and roll writer
        addEdits(wal, hri1, table1, 1, sequenceId1);
        wal.rollWriter();
        // assert that the wal is rolled
        assertEquals(1, DefaultWALProvider.getNumRolledLogFiles(wal));
        // add edits in the second wal file, and roll writer.
        addEdits(wal, hri1, table1, 1, sequenceId1);
        wal.rollWriter();
        // assert that the wal is rolled
        assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(wal));
        // add a waledit to table1, and flush the region.
        addEdits(wal, hri1, table1, 3, sequenceId1);
        flushRegion(wal, hri1.getEncodedNameAsBytes(), table1.getFamiliesKeys());
        // roll log; all old logs should be archived.
        wal.rollWriter();
        assertEquals(0, DefaultWALProvider.getNumRolledLogFiles(wal));
        // add an edit to table2, and roll writer
        addEdits(wal, hri2, table2, 1, sequenceId2);
        wal.rollWriter();
        assertEquals(1, DefaultWALProvider.getNumRolledLogFiles(wal));
        // add edits for table1, and roll writer
        addEdits(wal, hri1, table1, 2, sequenceId1);
        wal.rollWriter();
        assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(wal));
        // add edits for table2, and flush hri1.
        addEdits(wal, hri2, table2, 2, sequenceId2);
        flushRegion(wal, hri1.getEncodedNameAsBytes(), table2.getFamiliesKeys());
        // the log : region-sequenceId map is
        // log1: region2 (unflushed)
        // log2: region1 (flushed)
        // log3: region2 (unflushed)
        // roll the writer; log2 should be archived.
        wal.rollWriter();
        assertEquals(2, DefaultWALProvider.getNumRolledLogFiles(wal));
        // flush region2, and all logs should be archived.
        addEdits(wal, hri2, table2, 2, sequenceId2);
        flushRegion(wal, hri2.getEncodedNameAsBytes(), table2.getFamiliesKeys());
        wal.rollWriter();
        assertEquals(0, DefaultWALProvider.getNumRolledLogFiles(wal));
    } finally {
        if (wals != null) {
            wals.close();
        }
    }
}

From source file:org.apache.nifi.processors.hive.SelectHiveQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;/*from  w  w w.jav a2 s .c o  m*/

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final HiveDBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(HiveDBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    final String selectQuery;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, new InputStreamCallback() {
            @Override
            public void process(InputStream in) throws IOException {
                queryContents.append(IOUtils.toString(in));
            }
        });
        selectQuery = queryContents.toString();
    }

    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();

    try (final Connection con = dbcpService.getConnection();
            final Statement st = (flowbased ? con.prepareStatement(selectQuery) : con.createStatement())) {

        final AtomicLong nrOfRows = new AtomicLong(0L);
        if (fileToProcess == null) {
            flowfile = session.create();
        } else {
            flowfile = fileToProcess;
        }

        flowfile = session.write(flowfile, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                try {
                    logger.debug("Executing query {}", new Object[] { selectQuery });
                    if (flowbased) {
                        // Hive JDBC Doesn't Support this yet:
                        // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                        // int paramCount = pmd.getParameterCount();

                        // Alternate way to determine number of params in SQL.
                        int paramCount = StringUtils.countMatches(selectQuery, "?");

                        if (paramCount > 0) {
                            setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                        }
                    }

                    final ResultSet resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                            : st.executeQuery(selectQuery));

                    if (AVRO.equals(outputFormat)) {
                        nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out));
                    } else if (CSV.equals(outputFormat)) {
                        CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter, quote,
                                escape);
                        nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                    } else {
                        nrOfRows.set(0L);
                        throw new ProcessException("Unsupported output format: " + outputFormat);
                    }
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }
            }
        });

        // Set attribute for how many rows were selected
        flowfile = session.putAttribute(flowfile, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        // Set MIME type on output document and add extension to filename
        if (AVRO.equals(outputFormat)) {
            flowfile = session.putAttribute(flowfile, CoreAttributes.MIME_TYPE.key(), AVRO_MIME_TYPE);
            flowfile = session.putAttribute(flowfile, CoreAttributes.FILENAME.key(),
                    flowfile.getAttribute(CoreAttributes.FILENAME.key()) + ".avro");
        } else if (CSV.equals(outputFormat)) {
            flowfile = session.putAttribute(flowfile, CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
            flowfile = session.putAttribute(flowfile, CoreAttributes.FILENAME.key(),
                    flowfile.getAttribute(CoreAttributes.FILENAME.key()) + ".csv");
        }

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { flowfile, nrOfRows.get() });

        if (context.hasIncomingConnection()) {
            // If the flow file came from an incoming connection, issue a Modify Content provenance event

            session.getProvenanceReporter().modifyContent(flowfile, "Retrieved " + nrOfRows.get() + " rows",
                    stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        } else {
            // If we created a flow file from rows received from Hive, issue a Receive provenance event
            session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                    stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        }
        session.transfer(flowfile, REL_SUCCESS);
    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { selectQuery, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    } finally {

    }
}

From source file:io.hummer.util.ws.WebServiceClient.java

private void pauseToAvoidSpamming() throws Exception {
    long minIntervalMS = 5000;
    long otherwiseSleepMS = 1500;
    long maxStoredHosts = 20;

    String host = new URL(this.endpointURL).getHost();
    synchronized (lastRequestedHosts) {
        if (!lastRequestedHosts.containsKey(host)) {
            lastRequestedHosts.put(host, new AtomicLong(System.currentTimeMillis()));
            return;
        }//from   w  ww .  j av  a 2s.com
    }
    AtomicLong time = lastRequestedHosts.get(host);
    synchronized (time) {
        if ((System.currentTimeMillis() - time.get()) < minIntervalMS) {
            logger.info("Sleeping some time to avoid spamming host '" + host + "'");
            Thread.sleep(otherwiseSleepMS);
            time.set(System.currentTimeMillis());
        }
    }
    if (lastRequestedHosts.size() > maxStoredHosts) {
        new CollectionsUtil().removeKeyWithSmallestValue(lastRequestedHosts);
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java

/**
 * Test multi-threaded row mutations.//  www.  j  a v  a  2  s  . c o m
 */
@Test
public void testRowMutationMultiThreads() throws IOException {

    LOG.info("Starting test testRowMutationMultiThreads");
    initHRegion(tableName, name.getMethodName(), fam1);

    // create 10 threads, each will alternate between adding and
    // removing a column
    int numThreads = 10;
    int opsPerThread = 500;
    AtomicOperation[] all = new AtomicOperation[numThreads];

    AtomicLong timeStamps = new AtomicLong(0);
    AtomicInteger failures = new AtomicInteger(0);
    // create all threads
    for (int i = 0; i < numThreads; i++) {
        all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
            @Override
            public void run() {
                boolean op = true;
                for (int i = 0; i < numOps; i++) {
                    try {
                        // throw in some flushes
                        if (i % 10 == 0) {
                            synchronized (region) {
                                LOG.debug("flushing");
                                region.flushcache();
                                if (i % 100 == 0) {
                                    region.compactStores();
                                }
                            }
                        }
                        long ts = timeStamps.incrementAndGet();
                        RowMutations rm = new RowMutations(row);
                        if (op) {
                            Put p = new Put(row, ts);
                            p.add(fam1, qual1, value1);
                            rm.add(p);
                            Delete d = new Delete(row);
                            d.deleteColumns(fam1, qual2, ts);
                            rm.add(d);
                        } else {
                            Delete d = new Delete(row);
                            d.deleteColumns(fam1, qual1, ts);
                            rm.add(d);
                            Put p = new Put(row, ts);
                            p.add(fam1, qual2, value2);
                            rm.add(p);
                        }
                        region.mutateRow(rm);
                        op ^= true;
                        // check: should always see exactly one column
                        Get g = new Get(row);
                        Result r = region.get(g);
                        if (r.size() != 1) {
                            LOG.debug(r);
                            failures.incrementAndGet();
                            fail();
                        }
                    } catch (IOException e) {
                        e.printStackTrace();
                        failures.incrementAndGet();
                        fail();
                    }
                }
            }
        };
    }

    // run all threads
    for (int i = 0; i < numThreads; i++) {
        all[i].start();
    }

    // wait for all threads to finish
    for (int i = 0; i < numThreads; i++) {
        try {
            all[i].join();
        } catch (InterruptedException e) {
        }
    }
    assertEquals(0, failures.get());
}