Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.archive.crawler.reporting.StatisticsTracker.java

/**
 * Increment a counter for a key in a given HashMap by an arbitrary amount.
 * Used for various aggregate data. The increment amount can be negative.
 *
 *
 * @param map//ww w .j  a  va 2  s  .c o m
 *            The HashMap
 * @param key
 *            The key for the counter to be incremented, if it does not exist
 *            it will be added (set to equal to <code>increment</code>).
 *            If null it will increment the counter "unknown".
 * @param increment
 *            The amount to increment counter related to the <code>key</code>.
 */
protected static void incrementMapCount(ConcurrentMap<String, AtomicLong> map, String key, long increment) {
    if (key == null) {
        key = "unknown";
    }
    AtomicLong lw = (AtomicLong) map.get(key);
    if (lw == null) {
        lw = new AtomicLong(0);
        AtomicLong prevVal = map.putIfAbsent(key, lw);
        if (prevVal != null) {
            lw = prevVal;
        }
    }
    lw.addAndGet(increment);
}

From source file:org.apache.nifi.processors.hive.SelectHive3QL.java

private void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;/*  w  w w  . j  ava2 s. c  o  m*/

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final Hive3DBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(Hive3DBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    List<String> preQueries = getQueries(
            context.getProperty(HIVEQL_PRE_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());
    List<String> postQueries = getQueries(
            context.getProperty(HIVEQL_POST_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    String hqlStatement;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        hqlStatement = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, charset)));
        hqlStatement = queryContents.toString();
    }

    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions(fileToProcess)
            .asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions(fileToProcess).asInteger();
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions(fileToProcess).asInteger()
            : 0;
    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final Statement st = (flowbased ? con.prepareStatement(hqlStatement) : con.createStatement())) {
        Pair<String, SQLException> failure = executeConfigStatements(con, preQueries);
        if (failure != null) {
            // In case of failure, assigning config query to "hqlStatement"  to follow current error handling
            hqlStatement = failure.getLeft();
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }
        st.setQueryTimeout(
                context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions(fileToProcess).asInteger());

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        final List<FlowFile> resultSetFlowFiles = new ArrayList<>();
        try {
            logger.debug("Executing query {}", new Object[] { hqlStatement });
            if (flowbased) {
                // Hive JDBC Doesn't Support this yet:
                // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                // int paramCount = pmd.getParameterCount();

                // Alternate way to determine number of params in SQL.
                int paramCount = StringUtils.countMatches(hqlStatement, "?");

                if (paramCount > 0) {
                    setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                }
            }

            final ResultSet resultSet;

            try {
                resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                        : st.executeQuery(hqlStatement));
            } catch (SQLException se) {
                // If an error occurs during the query, a flowfile is expected to be routed to failure, so ensure one here
                flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
                fileToProcess = null;
                throw se;
            }

            int fragmentIndex = 0;
            String baseFilename = (fileToProcess != null)
                    ? fileToProcess.getAttribute(CoreAttributes.FILENAME.key())
                    : null;
            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);
                flowfile = (fileToProcess == null) ? session.create() : session.create(fileToProcess);
                if (baseFilename == null) {
                    baseFilename = flowfile.getAttribute(CoreAttributes.FILENAME.key());
                }
                try {
                    flowfile = session.write(flowfile, out -> {
                        try {
                            if (AVRO.equals(outputFormat)) {
                                nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out,
                                        maxRowsPerFlowFile, convertNamesForAvro));
                            } else if (CSV.equals(outputFormat)) {
                                CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter,
                                        quote, escape, maxRowsPerFlowFile);
                                nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                            } else {
                                nrOfRows.set(0L);
                                throw new ProcessException("Unsupported output format: " + outputFormat);
                            }
                        } catch (final SQLException | RuntimeException e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(flowfile);
                    throw e;
                }

                if (nrOfRows.get() > 0 || resultSetFlowFiles.isEmpty()) {
                    final Map<String, String> attributes = new HashMap<>();
                    // Set attribute for how many rows were selected
                    attributes.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

                    try {
                        // Set input/output table names by parsing the query
                        attributes.putAll(toQueryTableAttributes(findTableNames(hqlStatement)));
                    } catch (Exception e) {
                        // If failed to parse the query, just log a warning message, but continue.
                        getLogger().warn("Failed to parse query: {} due to {}",
                                new Object[] { hqlStatement, e }, e);
                    }

                    // Set MIME type on output document and add extension to filename
                    if (AVRO.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), MIME_TYPE_AVRO_BINARY);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".avro");
                    } else if (CSV.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".csv");
                    }

                    if (maxRowsPerFlowFile > 0) {
                        attributes.put("fragment.identifier", fragmentIdentifier);
                        attributes.put("fragment.index", String.valueOf(fragmentIndex));
                    }

                    flowfile = session.putAllAttributes(flowfile, attributes);

                    logger.info("{} contains {} " + outputFormat + " records; transferring to 'success'",
                            new Object[] { flowfile, nrOfRows.get() });

                    if (context.hasIncomingConnection()) {
                        // If the flow file came from an incoming connection, issue a Fetch provenance event
                        session.getProvenanceReporter().fetch(flowfile, dbcpService.getConnectionURL(),
                                "Retrieved " + nrOfRows.get() + " rows",
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    } else {
                        // If we created a flow file from rows received from Hive, issue a Receive provenance event
                        session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    }
                    resultSetFlowFiles.add(flowfile);
                } else {
                    // If there were no rows returned (and the first flow file has been sent, we're done processing, so remove the flowfile and carry on
                    session.remove(flowfile);
                    if (resultSetFlowFiles != null && resultSetFlowFiles.size() > 0) {
                        flowfile = resultSetFlowFiles.get(resultSetFlowFiles.size() - 1);
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }
            }

            for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                // Set count on all FlowFiles
                if (maxRowsPerFlowFile > 0) {
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count",
                            Integer.toString(fragmentIndex)));
                }
            }

        } catch (final SQLException e) {
            throw e;
        }

        failure = executeConfigStatements(con, postQueries);
        if (failure != null) {
            hqlStatement = failure.getLeft();
            if (resultSetFlowFiles != null) {
                resultSetFlowFiles.forEach(ff -> session.remove(ff));
            }
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        if (fileToProcess != null) {
            session.remove(fileToProcess);
        }

    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { hqlStatement, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { hqlStatement, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { hqlStatement, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { hqlStatement, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    }
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at a target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them.//w  w w .  j av  a  2  s . c  om
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS (target QPS).
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS);
    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                int numQueriesExecutedInt = numQueriesExecuted.get();
                LOGGER.info(
                        "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                        startQPS, timePassed, numQueriesExecutedInt,
                        numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                        totalBrokerTime.get() / (double) numQueriesExecutedInt,
                        totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                reportStartTime = currentTime;
                numReportIntervals++;

                if ((numIntervalsToReportAndClearStatistics != 0)
                        && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                    numReportIntervals = 0;
                    startTime = currentTime;
                    reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                            statisticsList);
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            startQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.apache.druid.java.util.common.CompressionUtilsTest.java

@Test
public void testGoodGzipWithException() throws Exception {
    final AtomicLong flushes = new AtomicLong(0);
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    CompressionUtils.gzip(Files.asByteSource(testFile), new ByteSink() {
        @Override/*w  w  w . ja va 2  s. c  o m*/
        public OutputStream openStream() throws IOException {
            return new FilterOutputStream(new FileOutputStream(gzFile)) {
                @Override
                public void flush() throws IOException {
                    if (flushes.getAndIncrement() > 0) {
                        super.flush();
                    } else {
                        throw new IOException("Haven't flushed enough");
                    }
                }
            };
        }
    }, Predicates.alwaysTrue());
    Assert.assertTrue(gzFile.exists());
    try (final InputStream inputStream = CompressionUtils.decompress(new FileInputStream(gzFile), "file.gz")) {
        assertGoodDataStream(inputStream);
    }
    if (!testFile.delete()) {
        throw new IOE("Unable to delete file [%s]", testFile.getAbsolutePath());
    }
    Assert.assertFalse(testFile.exists());
    CompressionUtils.gunzip(Files.asByteSource(gzFile), testFile);
    Assert.assertTrue(testFile.exists());
    try (final InputStream inputStream = new FileInputStream(testFile)) {
        assertGoodDataStream(inputStream);
    }
    Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
}

From source file:io.druid.java.util.common.CompressionUtilsTest.java

@Test(expected = IOException.class)
public void testStreamErrorGzip() throws Exception {
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    final AtomicLong flushes = new AtomicLong(0L);
    CompressionUtils.gzip(new FileInputStream(testFile), new FileOutputStream(gzFile) {
        @Override/*from   www  .j  a v a  2 s. com*/
        public void flush() throws IOException {
            if (flushes.getAndIncrement() > 0) {
                super.flush();
            } else {
                throw new IOException("Test exception");
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.wal.TestWALFactory.java

/**
 * @throws IOException/*www . j av a 2 s . com*/
 */
@Test
public void testAppend() throws IOException {
    final int COL_COUNT = 10;
    final HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tablename"))
            .addFamily(new HColumnDescriptor("column"));
    final byte[] row = Bytes.toBytes("row");
    WAL.Reader reader = null;
    final AtomicLong sequenceId = new AtomicLong(1);
    try {
        // Write columns named 1, 2, 3, etc. and then values of single byte
        // 1, 2, 3...
        long timestamp = System.currentTimeMillis();
        WALEdit cols = new WALEdit();
        for (int i = 0; i < COL_COUNT; i++) {
            cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp,
                    new byte[] { (byte) (i + '0') }));
        }
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        final WAL log = wals.getWAL(hri.getEncodedNameAsBytes());
        final long txid = log.append(htd, hri,
                new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis()), cols,
                sequenceId, true, null);
        log.sync(txid);
        log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
        log.completeCacheFlush(hri.getEncodedNameAsBytes());
        log.shutdown();
        Path filename = DefaultWALProvider.getCurrentFileName(log);
        // Now open a reader on the log and assert append worked.
        reader = wals.createReader(fs, filename);
        WAL.Entry entry = reader.next();
        assertEquals(COL_COUNT, entry.getEdit().size());
        int idx = 0;
        for (Cell val : entry.getEdit().getCells()) {
            assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName()));
            assertTrue(htd.getTableName().equals(entry.getKey().getTablename()));
            assertTrue(Bytes.equals(row, 0, row.length, val.getRowArray(), val.getRowOffset(),
                    val.getRowLength()));
            assertEquals((byte) (idx + '0'), CellUtil.cloneValue(val)[0]);
            System.out.println(entry.getKey() + " " + val);
            idx++;
        }
    } finally {
        if (reader != null) {
            reader.close();
        }
    }
}

From source file:io.druid.java.util.common.CompressionUtilsTest.java

@Test(expected = IOException.class)
public void testStreamErrorGunzip() throws Exception {
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    CompressionUtils.gzip(Files.asByteSource(testFile), Files.asByteSink(gzFile),
            Predicates.<Throwable>alwaysTrue());
    Assert.assertTrue(gzFile.exists());/*  w  w  w .  j a v  a 2  s .c o  m*/
    try (final InputStream inputStream = CompressionUtils.decompress(new FileInputStream(gzFile), "file.gz")) {
        assertGoodDataStream(inputStream);
    }
    if (testFile.exists() && !testFile.delete()) {
        throw new RE("Unable to delete file [%s]", testFile.getAbsolutePath());
    }
    Assert.assertFalse(testFile.exists());
    final AtomicLong flushes = new AtomicLong(0L);
    CompressionUtils.gunzip(new FileInputStream(gzFile), new FilterOutputStream(new FileOutputStream(testFile) {
        @Override
        public void flush() throws IOException {
            if (flushes.getAndIncrement() > 0) {
                super.flush();
            } else {
                throw new IOException("Test exception");
            }
        }
    }));
}

From source file:org.killbill.queue.TestDBBackedQueue.java

@Test(groups = "slow")
public void testWithOneReaderOneWriter() throws InterruptedException {

    final PersistentBusConfig config = createConfig(7, 100, false, true);
    queue = new DBBackedQueue<BusEventModelDao>(clock, sqlDao, config, "oneReaderOneWriter-bus_event",
            metricRegistry, databaseTransactionNotificationApi);
    queue.initialize();/*from w  ww.  j  a  v a2s.c  o  m*/

    Thread writer = new Thread(new WriterRunnable(0, 1000, queue));
    final AtomicLong consumed = new AtomicLong(0);
    final ReaderRunnable readerRunnable = new ReaderRunnable(0, consumed, 1000, queue);
    final Thread reader = new Thread(readerRunnable);

    writer.start();
    while (queue.isQueueOpenForWrite()) {
        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
        }
    }
    reader.start();

    try {
        writer.join();
        reader.join();
    } catch (InterruptedException e) {
        Assert.fail("InterruptedException ", e);
    }

    final List<BusEventModelDao> ready = sqlDao.getReadyEntries(clock.getUTCNow().toDate(), 1000, OWNER,
            "bus_events");
    assertEquals(ready.size(), 0);

    log.info("Got inflightProcessed = " + queue.getTotalInflightFetched() + "/1000, inflightWritten = "
            + queue.getTotalInflightInsert() + "/1000");
    assertEquals(queue.getTotalInsert(), 1000L);

    // Verify ordering
    long expected = 999;
    for (Long cur : readerRunnable.getSearch1()) {
        assertEquals(cur.longValue(), expected);
        expected--;
    }
}

From source file:net.dv8tion.jda.core.utils.PermissionUtil.java

/**
 * Retrieves the explicit permissions of the specified {@link net.dv8tion.jda.core.entities.Member Member}
 * in its hosting {@link net.dv8tion.jda.core.entities.Guild Guild} and specific {@link net.dv8tion.jda.core.entities.Channel Channel}.
 * <br>This method does not calculate the owner in.
 * <b>Allowed permissions override denied permissions of {@link net.dv8tion.jda.core.entities.PermissionOverride PermissionOverrides}!</b>
 *
 * <p>All permissions returned are explicitly granted to this Member via its {@link net.dv8tion.jda.core.entities.Role Roles}.
 * <br>Permissions like {@link net.dv8tion.jda.core.Permission#ADMINISTRATOR Permission.ADMINISTRATOR} do not
 * grant other permissions in this value.
 * <p>This factor in all {@link net.dv8tion.jda.core.entities.PermissionOverride PermissionOverrides} that affect this member
 * and only grants the ones that are explicitly given.
 *
 * @param  channel/* w  w w  .  j av  a  2s  .c o  m*/
 *         The target channel of which to check {@link net.dv8tion.jda.core.entities.PermissionOverride PermissionOverrides}
 * @param  member
 *         The non-null {@link net.dv8tion.jda.core.entities.Member Member} for which to get implicit permissions
 *
 * @throws IllegalArgumentException
 *         If any of the arguments is {@code null}
 *         or the specified entities are not from the same {@link net.dv8tion.jda.core.entities.Guild Guild}
 *
 * @return Primitive (unsigned) long value with the implicit permissions of the specified member in the specified channel
 *
 * @since  3.1
 */
public static long getExplicitPermission(Channel channel, Member member) {
    Checks.notNull(channel, "Channel");
    Checks.notNull(member, "Member");

    final Guild guild = member.getGuild();
    checkGuild(channel.getGuild(), guild, "Member");

    long permission = getExplicitPermission(member);

    AtomicLong allow = new AtomicLong(0);
    AtomicLong deny = new AtomicLong(0);

    // populates allow/deny
    getExplicitOverrides(channel, member, allow, deny);

    return apply(permission, allow.get(), deny.get());
}

From source file:jduagui.Controller.java

public static long getSize(String startPath, Map<String, Long> dirs, Map<String, Long> files)
        throws IOException {
    final AtomicLong size = new AtomicLong(0);
    final AtomicLong subdirs = new AtomicLong(0);
    final AtomicLong fs = new AtomicLong(0);
    final File f = new File(startPath);
    final String str = "";
    Path path = Paths.get(startPath);

    Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
        @Override/*  w w  w  . ja va2  s  .c  o m*/
        public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
            subdirs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
            fs.incrementAndGet();
            size.addAndGet(attrs.size());
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
            fs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }
    });
    if (subdirs.decrementAndGet() == -1)
        subdirs.incrementAndGet();

    if (f.isDirectory()) {
        dirs.put(startPath, subdirs.get());
        files.put(startPath, fs.get());
    }
    return size.get();
}