Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:com.thinkbiganalytics.nifi.v2.ingest.GetTableData.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = null;/*from   w ww. j  a  v a  2s  . c  o  m*/
    if (context.hasIncomingConnection()) {
        flowFile = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (flowFile == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final FlowFile incoming = flowFile;
    final ComponentLog logger = getLog();

    final DBCPService dbcpService = context.getProperty(JDBC_SERVICE).asControllerService(DBCPService.class);
    final MetadataProviderService metadataService = context.getProperty(METADATA_SERVICE)
            .asControllerService(MetadataProviderService.class);
    final String loadStrategy = context.getProperty(LOAD_STRATEGY).getValue();
    final String categoryName = context.getProperty(FEED_CATEGORY).evaluateAttributeExpressions(incoming)
            .getValue();
    final String feedName = context.getProperty(FEED_NAME).evaluateAttributeExpressions(incoming).getValue();
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(incoming).getValue();
    final String fieldSpecs = context.getProperty(TABLE_SPECS).evaluateAttributeExpressions(incoming)
            .getValue();
    final String dateField = context.getProperty(DATE_FIELD).evaluateAttributeExpressions(incoming).getValue();
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer overlapTime = context.getProperty(OVERLAP_TIME).evaluateAttributeExpressions(incoming)
            .asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer backoffTime = context.getProperty(BACKOFF_PERIOD).asTimePeriod(TimeUnit.SECONDS).intValue();
    final String unitSize = context.getProperty(UNIT_SIZE).getValue();
    final String outputType = context.getProperty(OUTPUT_TYPE).getValue();
    String outputDelimiter = context.getProperty(OUTPUT_DELIMITER).evaluateAttributeExpressions(incoming)
            .getValue();
    final String delimiter = StringUtils.isBlank(outputDelimiter) ? "," : outputDelimiter;

    final PropertyValue waterMarkPropName = context.getProperty(HIGH_WATER_MARK_PROP)
            .evaluateAttributeExpressions(incoming);

    final String[] selectFields = parseFields(fieldSpecs);

    final LoadStrategy strategy = LoadStrategy.valueOf(loadStrategy);
    final StopWatch stopWatch = new StopWatch(true);

    try (final Connection conn = dbcpService.getConnection()) {

        FlowFile outgoing = (incoming == null ? session.create() : incoming);
        final AtomicLong nrOfRows = new AtomicLong(0L);
        final LastFieldVisitor visitor = new LastFieldVisitor(dateField, null);
        final FlowFile current = outgoing;

        outgoing = session.write(outgoing, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                ResultSet rs = null;
                try {
                    GetTableDataSupport support = new GetTableDataSupport(conn, queryTimeout);
                    if (strategy == LoadStrategy.FULL_LOAD) {
                        rs = support.selectFullLoad(tableName, selectFields);
                    } else if (strategy == LoadStrategy.INCREMENTAL) {
                        String waterMarkValue = getIncrementalWaterMarkValue(current, waterMarkPropName);
                        LocalDateTime waterMarkTime = LocalDateTime.parse(waterMarkValue, DATE_TIME_FORMAT);
                        Date lastLoadDate = toDate(waterMarkTime);
                        visitor.setLastModifyDate(lastLoadDate);
                        rs = support.selectIncremental(tableName, selectFields, dateField, overlapTime,
                                lastLoadDate, backoffTime, GetTableDataSupport.UnitSizes.valueOf(unitSize));
                    } else {
                        throw new RuntimeException("Unsupported loadStrategy [" + loadStrategy + "]");
                    }

                    if (GetTableDataSupport.OutputType.DELIMITED
                            .equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
                        nrOfRows.set(JdbcCommon.convertToDelimitedStream(rs, out,
                                (strategy == LoadStrategy.INCREMENTAL ? visitor : null), delimiter));
                    } else if (GetTableDataSupport.OutputType.AVRO
                            .equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
                        avroSchema = JdbcCommon.createSchema(rs);
                        nrOfRows.set(JdbcCommon.convertToAvroStream(rs, out,
                                (strategy == LoadStrategy.INCREMENTAL ? visitor : null), avroSchema));
                    } else {
                        throw new RuntimeException("Unsupported output format type [" + outputType + "]");
                    }
                } catch (final SQLException e) {
                    throw new IOException("SQL execution failure", e);
                } finally {
                    if (rs != null) {
                        try {
                            if (rs.getStatement() != null) {
                                rs.getStatement().close();
                            }
                            rs.close();
                        } catch (SQLException e) {
                            getLog().error("Error closing sql statement and resultset");
                        }
                    }
                }
            }
        });

        // set attribute how many rows were selected
        outgoing = session.putAttribute(outgoing, RESULT_ROW_COUNT, Long.toString(nrOfRows.get()));

        //set output format type and avro schema for feed setup, if available
        outgoing = session.putAttribute(outgoing, "db.table.output.format", outputType);
        String avroSchemaForFeedSetup = (avroSchema != null) ? JdbcCommon.getAvroSchemaForFeedSetup(avroSchema)
                : EMPTY_STRING;
        outgoing = session.putAttribute(outgoing, "db.table.avro.schema", avroSchemaForFeedSetup);

        session.getProvenanceReporter().modifyContent(outgoing, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));

        // Terminate flow file if no work
        Long rowcount = nrOfRows.get();
        outgoing = session.putAttribute(outgoing, ComponentAttributes.NUM_SOURCE_RECORDS.key(),
                String.valueOf(rowcount));

        if (nrOfRows.get() == 0L) {
            logger.info("{} contains no data; transferring to 'nodata'", new Object[] { outgoing });
            session.transfer(outgoing, REL_NO_DATA);
        } else {

            logger.info("{} contains {} records; transferring to 'success'",
                    new Object[] { outgoing, nrOfRows.get() });

            if (strategy == LoadStrategy.INCREMENTAL) {
                String newWaterMarkStr = format(visitor.getLastModifyDate());
                outgoing = setIncrementalWaterMarkValue(session, outgoing, waterMarkPropName, newWaterMarkStr);

                logger.info("Recorded load status feed {} date {}", new Object[] { feedName, newWaterMarkStr });
            }
            session.transfer(outgoing, REL_SUCCESS);
        }
    } catch (final Exception e) {
        if (incoming == null) {
            logger.error(
                    "Unable to execute SQL select from table due to {}. No incoming flow file to route to failure",
                    new Object[] { e });
        } else {
            logger.error("Unable to execute SQL select from table due to {}; routing to failure",
                    new Object[] { incoming, e });
            session.transfer(incoming, REL_FAILURE);
        }
    }
}

From source file:com.facebook.LinkBench.LinkBenchDriver.java

/**
 * Start all runnables at the same time. Then block till all
 * tasks are completed. Returns the elapsed time (in millisec)
 * since the start of the first task to the completion of all tasks.
 *//*from w w  w  .j  a v a2 s  .com*/
static long concurrentExec(final List<? extends Runnable> tasks) throws Throwable {
    final CountDownLatch startSignal = new CountDownLatch(tasks.size());
    final CountDownLatch doneSignal = new CountDownLatch(tasks.size());
    final AtomicLong startTime = new AtomicLong(0);
    for (final Runnable task : tasks) {
        new Thread(new Runnable() {
            @Override
            public void run() {
                /*
                 * Run a task.  If an uncaught exception occurs, bail
                 * out of the benchmark immediately, since any results
                 * of the benchmark will no longer be valid anyway
                 */
                try {
                    startSignal.countDown();
                    startSignal.await();
                    long now = System.currentTimeMillis();
                    startTime.compareAndSet(0, now);
                    task.run();
                } catch (Throwable e) {
                    Logger threadLog = Logger.getLogger(ConfigUtil.LINKBENCH_LOGGER);
                    threadLog.error("Unrecoverable exception in worker thread:", e);
                    Runtime.getRuntime().halt(1);
                }
                doneSignal.countDown();
            }
        }).start();
    }
    doneSignal.await(); // wait for all threads to finish
    long endTime = System.currentTimeMillis();
    return endTime - startTime.get();
}

From source file:org.apache.nifi.processors.standard.QueryDatabaseTable.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory)
        throws ProcessException {
    ProcessSession session = sessionFactory.createSession();
    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();

    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue());
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue();
    final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue();
    final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES)
            .evaluateAttributeExpressions().getValue();
    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger()
            : 0;/*from w  ww  . j  a  va2 s.c o m*/
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();

    final Map<String, String> maxValueProperties = getDefaultMaxValueProperties(context.getProperties());

    final StateManager stateManager = context.getStateManager();
    final StateMap stateMap;

    try {
        stateMap = stateManager.getState(Scope.CLUSTER);
    } catch (final IOException ioe) {
        getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform "
                + "query until this is accomplished.", ioe);
        context.yield();
        return;
    }
    // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually
    // set as the current state map (after the session has been committed)
    final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap());

    //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map
    for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) {
        String maxPropKey = maxProp.getKey().toLowerCase();
        String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey);
        if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) {
            String newMaxPropValue;
            // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme)
            // the value has been stored under a key that is only the column name. Fall back to check the column name,
            // but store the new initial max value under the fully-qualified key.
            if (statePropertyMap.containsKey(maxPropKey)) {
                newMaxPropValue = statePropertyMap.get(maxPropKey);
            } else {
                newMaxPropValue = maxProp.getValue();
            }
            statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue);

        }
    }

    List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null
            : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*"));
    final String selectQuery = getQuery(dbAdapter, tableName, columnNames, maxValueColumnNameList,
            statePropertyMap);
    final StopWatch stopWatch = new StopWatch(true);
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) {

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        String jdbcURL = "DBCPService";
        try {
            DatabaseMetaData databaseMetaData = con.getMetaData();
            if (databaseMetaData != null) {
                jdbcURL = databaseMetaData.getURL();
            }
        } catch (SQLException se) {
            // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly
        }

        final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions()
                .asTimePeriod(TimeUnit.SECONDS).intValue();
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        try {
            logger.debug("Executing query {}", new Object[] { selectQuery });
            final ResultSet resultSet = st.executeQuery(selectQuery);
            int fragmentIndex = 0;
            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                FlowFile fileToProcess = session.create();
                try {
                    fileToProcess = session.write(fileToProcess, out -> {
                        // Max values will be updated in the state property map by the callback
                        final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(
                                tableName, statePropertyMap, dbAdapter);
                        try {
                            nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, tableName,
                                    maxValCollector, maxRowsPerFlowFile, convertNamesForAvro));
                        } catch (SQLException | RuntimeException e) {
                            throw new ProcessException(
                                    "Error during database query or conversion of records to Avro.", e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(fileToProcess);
                    throw e;
                }

                if (nrOfRows.get() > 0) {
                    // set attribute how many rows were selected
                    fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT,
                            String.valueOf(nrOfRows.get()));
                    fileToProcess = session.putAttribute(fileToProcess, RESULT_TABLENAME, tableName);
                    if (maxRowsPerFlowFile > 0) {
                        fileToProcess = session.putAttribute(fileToProcess, "fragment.identifier",
                                fragmentIdentifier);
                        fileToProcess = session.putAttribute(fileToProcess, "fragment.index",
                                String.valueOf(fragmentIndex));
                    }

                    logger.info("{} contains {} Avro records; transferring to 'success'",
                            new Object[] { fileToProcess, nrOfRows.get() });

                    session.getProvenanceReporter().receive(fileToProcess, jdbcURL,
                            stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    resultSetFlowFiles.add(fileToProcess);
                } else {
                    // If there were no rows returned, don't send the flowfile
                    session.remove(fileToProcess);
                    context.yield();
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }
            }

            for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                // Add maximum values as attributes
                for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) {
                    // Get just the column name from the key
                    String key = entry.getKey();
                    String colName = key
                            .substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length());
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                            "maxvalue." + colName, entry.getValue()));
                }

                //set count on all FlowFiles
                if (maxRowsPerFlowFile > 0) {
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count",
                            Integer.toString(fragmentIndex)));
                }
            }

        } catch (final SQLException e) {
            throw e;
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);

    } catch (final ProcessException | SQLException e) {
        logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e });
        if (!resultSetFlowFiles.isEmpty()) {
            session.remove(resultSetFlowFiles);
        }
        context.yield();
    } finally {
        session.commit();
        try {
            // Update the state
            stateManager.setState(statePropertyMap, Scope.CLUSTER);
        } catch (IOException ioe) {
            getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded",
                    new Object[] { this, ioe });
        }
    }
}

From source file:de.javakaffee.web.msm.serializer.javolution.JavolutionTranscoderTest.java

@DataProvider(name = "typesAsSessionAttributesProvider")
protected Object[][] createTypesAsSessionAttributesData() {
    return new Object[][] { { int.class, 42 }, { long.class, 42 }, { Boolean.class, Boolean.TRUE },
            { String.class, "42" }, { StringBuilder.class, new StringBuilder("42") },
            { StringBuffer.class, new StringBuffer("42") }, { Class.class, String.class },
            { Long.class, Long.valueOf(42) }, { Integer.class, Integer.valueOf(42) },
            { Character.class, Character.valueOf('c') }, { Byte.class, Byte.valueOf("b".getBytes()[0]) },
            { Double.class, Double.valueOf(42d) }, { Float.class, Float.valueOf(42f) },
            { Short.class, Short.valueOf((short) 42) }, { BigDecimal.class, new BigDecimal(42) },
            { AtomicInteger.class, new AtomicInteger(42) }, { AtomicLong.class, new AtomicLong(42) },
            { MutableInt.class, new MutableInt(42) }, { Integer[].class, new Integer[] { 42 } },
            { Date.class, new Date(System.currentTimeMillis() - 10000) },
            { Calendar.class, Calendar.getInstance() }, { Currency.class, Currency.getInstance("EUR") },
            { ArrayList.class, new ArrayList<String>(Arrays.asList("foo")) },
            { int[].class, new int[] { 1, 2 } }, { long[].class, new long[] { 1, 2 } },
            { short[].class, new short[] { 1, 2 } }, { float[].class, new float[] { 1, 2 } },
            { double[].class, new double[] { 1, 2 } }, { int[].class, new int[] { 1, 2 } },
            { byte[].class, "42".getBytes() }, { char[].class, "42".toCharArray() },
            { String[].class, new String[] { "23", "42" } },
            { Person[].class, new Person[] { createPerson("foo bar", Gender.MALE, 42) } } };
}

From source file:org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java

/**
 * Test multi-threaded region mutations.
 *///w  ww .j a va2s  .  c  om
@Test
public void testMultiRowMutationMultiThreads() throws IOException {

    LOG.info("Starting test testMultiRowMutationMultiThreads");
    initHRegion(tableName, name.getMethodName(), fam1);

    // create 10 threads, each will alternate between adding and
    // removing a column
    int numThreads = 10;
    int opsPerThread = 500;
    AtomicOperation[] all = new AtomicOperation[numThreads];

    AtomicLong timeStamps = new AtomicLong(0);
    AtomicInteger failures = new AtomicInteger(0);
    final List<byte[]> rowsToLock = Arrays.asList(row, row2);
    // create all threads
    for (int i = 0; i < numThreads; i++) {
        all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
            @Override
            public void run() {
                boolean op = true;
                for (int i = 0; i < numOps; i++) {
                    try {
                        // throw in some flushes
                        if (i % 10 == 0) {
                            synchronized (region) {
                                LOG.debug("flushing");
                                region.flushcache();
                                if (i % 100 == 0) {
                                    region.compactStores();
                                }
                            }
                        }
                        long ts = timeStamps.incrementAndGet();
                        List<Mutation> mrm = new ArrayList<Mutation>();
                        if (op) {
                            Put p = new Put(row2, ts);
                            p.add(fam1, qual1, value1);
                            mrm.add(p);
                            Delete d = new Delete(row);
                            d.deleteColumns(fam1, qual1, ts);
                            mrm.add(d);
                        } else {
                            Delete d = new Delete(row2);
                            d.deleteColumns(fam1, qual1, ts);
                            mrm.add(d);
                            Put p = new Put(row, ts);
                            p.add(fam1, qual1, value2);
                            mrm.add(p);
                        }
                        region.mutateRowsWithLocks(mrm, rowsToLock);
                        op ^= true;
                        // check: should always see exactly one column
                        Scan s = new Scan(row);
                        RegionScanner rs = region.getScanner(s);
                        List<Cell> r = new ArrayList<Cell>();
                        while (rs.next(r))
                            ;
                        rs.close();
                        if (r.size() != 1) {
                            LOG.debug(r);
                            failures.incrementAndGet();
                            fail();
                        }
                    } catch (IOException e) {
                        e.printStackTrace();
                        failures.incrementAndGet();
                        fail();
                    }
                }
            }
        };
    }

    // run all threads
    for (int i = 0; i < numThreads; i++) {
        all[i].start();
    }

    // wait for all threads to finish
    for (int i = 0; i < numThreads; i++) {
        try {
            all[i].join();
        } catch (InterruptedException e) {
        }
    }
    assertEquals(0, failures.get());
}

From source file:org.archive.modules.writer.WARCWriterProcessor.java

protected void addStats(Map<String, Map<String, Long>> substats) {
    for (String key : substats.keySet()) {
        // intentionally redundant here -- if statement avoids creating
        // unused empty map every time; putIfAbsent() ensures thread safety
        if (stats.get(key) == null) {
            stats.putIfAbsent(key, new ConcurrentHashMap<String, AtomicLong>());
        }/* ww w  .  j a v a2 s  .  c  o  m*/

        for (String subkey : substats.get(key).keySet()) {
            AtomicLong oldValue = stats.get(key).get(subkey);
            if (oldValue == null) {
                oldValue = stats.get(key).putIfAbsent(subkey, new AtomicLong(substats.get(key).get(subkey)));
            }
            if (oldValue != null) {
                oldValue.addAndGet(substats.get(key).get(subkey));
            }
        }
    }
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the
 * queue and send them./*  ww w  .  ja v a 2s. c  o m*/
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);

            // Keep 20 queries inside the query queue.
            while (queryQueue.size() == 20) {
                Thread.sleep(1);

                long currentTime = System.currentTimeMillis();
                if (currentTime - reportStartTime >= reportIntervalMs) {
                    long timePassed = currentTime - startTime;
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                                    + "Average Client Time: {}ms.",
                            timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt);
                    reportStartTime = currentTime;
                    numReportIntervals++;

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                    + "Average Client Time: {}ms.",
            timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.apache.hadoop.hbase.wal.TestWALFactory.java

@Test(timeout = 300000)
public void testAppendClose() throws Exception {
    TableName tableName = TableName.valueOf(currentTest.getMethodName());
    HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
            false);/* w  w  w .  j  a  v a 2 s .  c om*/

    final WAL wal = wals.getWAL(regioninfo.getEncodedNameAsBytes());
    final AtomicLong sequenceId = new AtomicLong(1);
    final int total = 20;

    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(tableName.getName()));

    for (int i = 0; i < total; i++) {
        WALEdit kvs = new WALEdit();
        kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
        wal.append(htd, regioninfo,
                new WALKey(regioninfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), kvs,
                sequenceId, true, null);
    }
    // Now call sync to send the data to HDFS datanodes
    wal.sync();
    int namenodePort = cluster.getNameNodePort();
    final Path walPath = DefaultWALProvider.getCurrentFileName(wal);

    // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
    try {
        DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
        dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
        TEST_UTIL.shutdownMiniDFSCluster();
        try {
            // wal.writer.close() will throw an exception,
            // but still call this since it closes the LogSyncer thread first
            wal.shutdown();
        } catch (IOException e) {
            LOG.info(e);
        }
        fs.close(); // closing FS last so DFSOutputStream can't call close
        LOG.info("STOPPED first instance of the cluster");
    } finally {
        // Restart the cluster
        while (cluster.isClusterUp()) {
            LOG.error("Waiting for cluster to go down");
            Thread.sleep(1000);
        }
        assertFalse(cluster.isClusterUp());
        cluster = null;
        for (int i = 0; i < 100; i++) {
            try {
                cluster = TEST_UTIL.startMiniDFSClusterForTestWAL(namenodePort);
                break;
            } catch (BindException e) {
                LOG.info("Sleeping.  BindException bringing up new cluster");
                Threads.sleep(1000);
            }
        }
        cluster.waitActive();
        fs = cluster.getFileSystem();
        LOG.info("STARTED second instance.");
    }

    // set the lease period to be 1 second so that the
    // namenode triggers lease recovery upon append request
    Method setLeasePeriod = cluster.getClass().getDeclaredMethod("setLeasePeriod",
            new Class[] { Long.TYPE, Long.TYPE });
    setLeasePeriod.setAccessible(true);
    setLeasePeriod.invoke(cluster, 1000L, 1000L);
    try {
        Thread.sleep(1000);
    } catch (InterruptedException e) {
        LOG.info(e);
    }

    // Now try recovering the log, like the HMaster would do
    final FileSystem recoveredFs = fs;
    final Configuration rlConf = conf;

    class RecoverLogThread extends Thread {
        public Exception exception = null;

        public void run() {
            try {
                FSUtils.getInstance(fs, rlConf).recoverFileLease(recoveredFs, walPath, rlConf, null);
            } catch (IOException e) {
                exception = e;
            }
        }
    }

    RecoverLogThread t = new RecoverLogThread();
    t.start();
    // Timeout after 60 sec. Without correct patches, would be an infinite loop
    t.join(60 * 1000);
    if (t.isAlive()) {
        t.interrupt();
        throw new Exception("Timed out waiting for WAL.recoverLog()");
    }

    if (t.exception != null)
        throw t.exception;

    // Make sure you can read all the content
    WAL.Reader reader = wals.createReader(fs, walPath);
    int count = 0;
    WAL.Entry entry = new WAL.Entry();
    while (reader.next(entry) != null) {
        count++;
        assertTrue("Should be one KeyValue per WALEdit", entry.getEdit().getCells().size() == 1);
    }
    assertEquals(total, count);
    reader.close();

    // Reset the lease period
    setLeasePeriod.invoke(cluster, new Object[] { new Long(60000), new Long(3600000) });
}

From source file:com.amazon.kinesis.streaming.agent.Agent.java

private Map<String, Object> globalMetrics(Map<String, Map<String, Object>> metrics) {
    Map<String, Object> globalMetrics = new HashMap<>();
    long bytesBehind = 0;
    int filesBehind = 0;
    long bytesConsumed = 0;
    long recordsParsed = 0;
    long recordsSent = 0;
    AtomicLong zero = new AtomicLong(0);
    for (Map.Entry<String, Map<String, Object>> tailerMetrics : metrics.entrySet()) {
        bytesBehind += Metrics.getMetric(tailerMetrics.getValue(), Metrics.FILE_TAILER_BYTES_BEHIND_METRIC, 0L);
        filesBehind += Metrics.getMetric(tailerMetrics.getValue(), Metrics.FILE_TAILER_FILES_BEHIND_METRIC, 0);
        bytesConsumed += Metrics/*  ww  w . j a  va  2 s .c o  m*/
                .getMetric(tailerMetrics.getValue(), Metrics.PARSER_TOTAL_BYTES_CONSUMED_METRIC, zero).get();
        recordsParsed += Metrics
                .getMetric(tailerMetrics.getValue(), Metrics.PARSER_TOTAL_RECORDS_PARSED_METRIC, zero).get();
        recordsSent += Metrics
                .getMetric(tailerMetrics.getValue(), Metrics.SENDER_TOTAL_RECORDS_SENT_METRIC, zero).get();
    }
    globalMetrics.put("TotalBytesBehind", bytesBehind);
    globalMetrics.put("TotalFilesBehind", filesBehind);
    globalMetrics.put("TotalBytesConsumed", bytesConsumed);
    globalMetrics.put("TotalRecordsParsed", recordsParsed);
    globalMetrics.put("TotalRecordsSent", recordsSent);
    globalMetrics.put("MaxSendingThreads", agentContext.maxSendingThreads());
    globalMetrics.put("UpTimeMillis", uptime.elapsed(TimeUnit.MILLISECONDS));
    globalMetrics.put("ActiveSendingThreads", sendingExecutor.getActiveCount());
    globalMetrics.put("SendingThreadsAlive", sendingExecutor.getPoolSize());
    globalMetrics.put("MaxSendingThreadsAlive", sendingExecutor.getLargestPoolSize());
    return globalMetrics;
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

protected long saveTask(JsonNode it) {

    ObjectNode n = flattenTask(it);/*from  ww  w .j  a v  a 2s  . c om*/

    n.put("swarmClusterId", getSwarmClusterId().get());

    String taskId = n.get("taskId").asText();
    String serviceId = n.path("serviceId").asText();
    String swarmNodeId = n.path("swarmNodeId").asText();
    checkNotEmpty(taskId, "taskId");
    checkNotEmpty(serviceId, "serviceId");
    checkNotEmpty(swarmNodeId, "swarmNodeId");

    AtomicLong timestamp = new AtomicLong(Long.MAX_VALUE);
    dockerScanner.getNeoRxClient()
            .execCypher(
                    "merge (x:DockerTask {taskId:{taskId}}) set x+={props}, x.updateTs=timestamp() return x",
                    "taskId", taskId, "props", n)
            .forEach(tt -> {

                timestamp.set(tt.path("updateTs").asLong(Long.MAX_VALUE));

                removeDockerLabels("DockerTask", "taskId", taskId, n, it);
            });

    {
        // it might be worth it to select these relationships and only
        // update if they are missing
        dockerScanner.getNeoRxClient().execCypher(
                "match (s:DockerService {serviceId:{serviceId}}),(t:DockerTask{taskId:{taskId}}) merge (s)-[x:CONTAINS]->(t) set x.updateTs=timestamp() return t,s",
                "serviceId", serviceId, "taskId", taskId);

        dockerScanner.getNeoRxClient().execCypher(
                "match (h:DockerHost {swarmNodeId:{swarmNodeId}}), (t:DockerTask {swarmNodeId:{swarmNodeId}}) merge (h)-[x:RUNS]->(t) set x.updateTs=timestamp()",
                "swarmNodeId", swarmNodeId);
    }
    return timestamp.get();
}