Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.spectralogic.ds3client.helpers.FileSystemHelper_Test.java

private void putObjectThenRunVerification(final FileSystemHelper fileSystemHelper,
        final ResultVerifier resultVerifier) throws IOException, URISyntaxException {
    try {// w  ww. ja  v a2s.  c o m
        final String DIR_NAME = "largeFiles/";
        final String[] FILE_NAMES = new String[] { "lesmis-copies.txt" };

        final Path dirPath = ResourceUtils.loadFileResource(DIR_NAME);

        final AtomicLong totalBookSizes = new AtomicLong(0);

        final List<String> bookTitles = new ArrayList<>();
        final List<Ds3Object> objects = new ArrayList<>();
        for (final String book : FILE_NAMES) {
            final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + book);
            final long bookSize = Files.size(objPath);
            totalBookSizes.getAndAdd(bookSize);
            final Ds3Object obj = new Ds3Object(book, bookSize);

            bookTitles.add(book);
            objects.add(obj);
        }

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 1;
        final int retryDelay = -1;
        final Ds3ClientHelpers ds3ClientHelpers = new Ds3ClientHelpersImpl(client, maxNumBlockAllocationRetries,
                maxNumObjectTransferAttempts, retryDelay, new SameThreadEventRunner(), fileSystemHelper);

        final AtomicInteger numTimesCallbackCalled = new AtomicInteger(0);

        final Ds3ClientHelpers.Job writeJob = ds3ClientHelpers.startWriteJob(BUCKET_NAME, objects);
        writeJob.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                numTimesCallbackCalled.getAndIncrement();

                final ObjectStorageSpaceVerificationResult result = ds3ClientHelpers
                        .objectsFromBucketWillFitInDirectory(BUCKET_NAME, Arrays.asList(FILE_NAMES),
                                Paths.get("."));

                resultVerifier.verifyResult(result, totalBookSizes.get());
            }
        });

        writeJob.transfer(new FileObjectPutter(dirPath));

        assertEquals(1, numTimesCallbackCalled.get());
    } finally {
        deleteAllContents(client, BUCKET_NAME);
    }
}

From source file:org.apache.usergrid.persistence.index.impl.EntityIndexTest.java

@Test
public void testIndexThreads() throws IOException {

    long now = System.currentTimeMillis();
    final int threads = 20;
    final int size = 30;

    final String entityType = "thing";

    final CountDownLatch latch = new CountDownLatch(threads);
    final AtomicLong failTime = new AtomicLong(0);
    InputStream is = this.getClass().getResourceAsStream("/sample-large.json");
    ObjectMapper mapper = new ObjectMapper();
    final List<Object> sampleJson = mapper.readValue(is, new TypeReference<List<Object>>() {
    });//from   ww w. j  a va2 s  .  c o  m
    for (int i = 0; i < threads; i++) {

        final IndexEdge indexEdge = new IndexEdgeImpl(appId, "things", SearchEdge.NodeType.SOURCE, i);

        Thread thread = new Thread(() -> {
            try {

                EntityIndexBatch batch = entityIndex.createBatch();
                insertJsonBlob(sampleJson, batch, entityType, indexEdge, size, 0);
                indexProducer.put(batch.build()).subscribe();
                ;
            } catch (Exception e) {
                synchronized (failTime) {
                    if (failTime.get() == 0) {
                        failTime.set(System.currentTimeMillis());
                    }
                }
                System.out.println(e.toString());
                fail("threw exception");
            } finally {
                latch.countDown();
            }
        });
        thread.start();
    }
    try {
        latch.await();
    } catch (InterruptedException ie) {
        throw new RuntimeException(ie);
    }
    assertTrue("system must have failed at " + (failTime.get() - now), failTime.get() == 0);
}

From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java

@Test(timeout = 50_000)
public void testRunCount() throws InterruptedException, ExecutionException {
    final Lifecycle lifecycle = new Lifecycle();
    final NamespaceExtractionCacheManager onHeap;
    final AtomicLong runCount = new AtomicLong(0);
    final CountDownLatch latch = new CountDownLatch(1);
    try {//w  w  w. jav  a 2 s.  co m
        onHeap = new OnHeapNamespaceExtractionCacheManager(lifecycle,
                new ConcurrentHashMap<String, Function<String, String>>(), new NoopServiceEmitter(),
                ImmutableMap.<Class<? extends ExtractionNamespace>, ExtractionNamespaceFunctionFactory<?>>of(
                        URIExtractionNamespace.class,
                        new URIExtractionNamespaceFunctionFactory(
                                ImmutableMap.<String, SearchableVersionedDataFinder>of("file",
                                        new LocalFileTimestampVersionFinder()))));

        final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(),
                new URIExtractionNamespace.ObjectMapperFlatDataParser(
                        URIExtractionNamespaceTest.registerTypes(new ObjectMapper())),
                new Period(1l), null);
        final String cacheId = UUID.randomUUID().toString();
        ListenableFuture<?> future = onHeap.schedule(namespace, factory, new Runnable() {
            @Override
            public void run() {
                manager.getPostRunnable(namespace, factory, cacheId).run();
                latch.countDown();
                runCount.incrementAndGet();
            }
        }, cacheId);
        latch.await();
        Thread.sleep(20);
    } finally {
        lifecycle.stop();
    }
    onHeap.waitForServiceToEnd(1_000, TimeUnit.MILLISECONDS);
    Assert.assertTrue(runCount.get() > 5);
}

From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java

public void testDelete(final String ns) throws InterruptedException {
    final CountDownLatch latch = new CountDownLatch(5);
    final CountDownLatch latchMore = new CountDownLatch(10);

    final AtomicLong runs = new AtomicLong(0);
    long prior = 0;
    final URIExtractionNamespace namespace = new URIExtractionNamespace(ns, tmpFile.toURI(),
            new URIExtractionNamespace.ObjectMapperFlatDataParser(
                    URIExtractionNamespaceTest.registerTypes(new ObjectMapper())),
            new Period(1l), null);
    final String cacheId = UUID.randomUUID().toString();
    final CountDownLatch latchBeforeMore = new CountDownLatch(1);
    ListenableFuture<?> future = manager.schedule(namespace, factory, new Runnable() {
        @Override//  www  .j a v  a 2  s.  co m
        public void run() {
            try {
                if (!Thread.interrupted()) {
                    manager.getPostRunnable(namespace, factory, cacheId).run();
                } else {
                    Thread.currentThread().interrupt();
                }
                if (!Thread.interrupted()) {
                    runs.incrementAndGet();
                } else {
                    Thread.currentThread().interrupt();
                }
            } finally {
                latch.countDown();
                try {
                    if (latch.getCount() == 0) {
                        latchBeforeMore.await();
                    }
                } catch (InterruptedException e) {
                    log.debug("Interrupted");
                    Thread.currentThread().interrupt();
                } finally {
                    latchMore.countDown();
                }
            }
        }
    }, cacheId);
    latch.await();
    prior = runs.get();
    latchBeforeMore.countDown();
    Assert.assertFalse(future.isCancelled());
    Assert.assertFalse(future.isDone());
    Assert.assertTrue(fnCache.containsKey(ns));
    latchMore.await();
    Assert.assertTrue(runs.get() > prior);

    Assert.assertTrue(manager.implData.containsKey(ns));

    manager.delete("ns");
    Assert.assertFalse(manager.implData.containsKey(ns));
    Assert.assertFalse(fnCache.containsKey(ns));
    Assert.assertTrue(future.isCancelled());
    Assert.assertTrue(future.isDone());
    prior = runs.get();
    Thread.sleep(20);
    Assert.assertEquals(prior, runs.get());
}

From source file:org.apache.nifi.processors.standard.AbstractExecuteSQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;/*from ww  w  .ja  va2s  .  co m*/
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions()
            .asInteger();
    final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField;

    SqlWriter sqlWriter = configureSqlWriter(session, context, fileToProcess);

    final String selectQuery;
    if (context.getProperty(SQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset())));
        selectQuery = queryContents.toString();
    }

    int resultCount = 0;
    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final PreparedStatement st = con.prepareStatement(selectQuery)) {
        st.setQueryTimeout(queryTimeout); // timeout in seconds

        if (fileToProcess != null) {
            JdbcCommon.setParameters(st, fileToProcess.getAttributes());
        }
        logger.debug("Executing query {}", new Object[] { selectQuery });

        int fragmentIndex = 0;
        final String fragmentId = UUID.randomUUID().toString();

        final StopWatch executionTime = new StopWatch(true);

        boolean hasResults = st.execute();

        long executionTimeElapsed = executionTime.getElapsed(TimeUnit.MILLISECONDS);

        boolean hasUpdateCount = st.getUpdateCount() != -1;

        while (hasResults || hasUpdateCount) {
            //getMoreResults() and execute() return false to indicate that the result of the statement is just a number and not a ResultSet
            if (hasResults) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                try {
                    final ResultSet resultSet = st.getResultSet();
                    do {
                        final StopWatch fetchTime = new StopWatch(true);

                        FlowFile resultSetFF;
                        if (fileToProcess == null) {
                            resultSetFF = session.create();
                        } else {
                            resultSetFF = session.create(fileToProcess);
                            resultSetFF = session.putAllAttributes(resultSetFF, fileToProcess.getAttributes());
                        }

                        try {
                            resultSetFF = session.write(resultSetFF, out -> {
                                try {
                                    nrOfRows.set(sqlWriter.writeResultSet(resultSet, out, getLogger(), null));
                                } catch (Exception e) {
                                    throw (e instanceof ProcessException) ? (ProcessException) e
                                            : new ProcessException(e);
                                }
                            });

                            long fetchTimeElapsed = fetchTime.getElapsed(TimeUnit.MILLISECONDS);

                            // set attributes
                            final Map<String, String> attributesToAdd = new HashMap<>();
                            attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
                            attributesToAdd.put(RESULT_QUERY_DURATION,
                                    String.valueOf(executionTimeElapsed + fetchTimeElapsed));
                            attributesToAdd.put(RESULT_QUERY_EXECUTION_TIME,
                                    String.valueOf(executionTimeElapsed));
                            attributesToAdd.put(RESULT_QUERY_FETCH_TIME, String.valueOf(fetchTimeElapsed));
                            attributesToAdd.put(RESULTSET_INDEX, String.valueOf(resultCount));
                            attributesToAdd.putAll(sqlWriter.getAttributesToAdd());
                            resultSetFF = session.putAllAttributes(resultSetFF, attributesToAdd);
                            sqlWriter.updateCounters(session);

                            // if fragmented ResultSet, determine if we should keep this fragment; set fragment attributes
                            if (maxRowsPerFlowFile > 0) {
                                // if row count is zero and this is not the first fragment, drop it instead of committing it.
                                if (nrOfRows.get() == 0 && fragmentIndex > 0) {
                                    session.remove(resultSetFF);
                                    break;
                                }

                                resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_ID, fragmentId);
                                resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_INDEX,
                                        String.valueOf(fragmentIndex));
                            }

                            logger.info("{} contains {} records; transferring to 'success'",
                                    new Object[] { resultSetFF, nrOfRows.get() });
                            // Report a FETCH event if there was an incoming flow file, or a RECEIVE event otherwise
                            if (context.hasIncomingConnection()) {
                                session.getProvenanceReporter().fetch(resultSetFF,
                                        "Retrieved " + nrOfRows.get() + " rows",
                                        executionTimeElapsed + fetchTimeElapsed);
                            } else {
                                session.getProvenanceReporter().receive(resultSetFF,
                                        "Retrieved " + nrOfRows.get() + " rows",
                                        executionTimeElapsed + fetchTimeElapsed);
                            }
                            resultSetFlowFiles.add(resultSetFF);

                            // If we've reached the batch size, send out the flow files
                            if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) {
                                session.transfer(resultSetFlowFiles, REL_SUCCESS);
                                session.commit();
                                resultSetFlowFiles.clear();
                            }

                            fragmentIndex++;
                        } catch (Exception e) {
                            // Remove the result set flow file and propagate the exception
                            session.remove(resultSetFF);
                            if (e instanceof ProcessException) {
                                throw (ProcessException) e;
                            } else {
                                throw new ProcessException(e);
                            }
                        }
                    } while (maxRowsPerFlowFile > 0 && nrOfRows.get() == maxRowsPerFlowFile);

                    // If we are splitting results but not outputting batches, set count on all FlowFiles
                    if (outputBatchSize == 0 && maxRowsPerFlowFile > 0) {
                        for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                            resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                    FRAGMENT_COUNT, Integer.toString(fragmentIndex)));
                        }
                    }
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }

                resultCount++;
            }

            // are there anymore result sets?
            try {
                hasResults = st.getMoreResults(Statement.CLOSE_CURRENT_RESULT);
                hasUpdateCount = st.getUpdateCount() != -1;
            } catch (SQLException ex) {
                hasResults = false;
                hasUpdateCount = false;
            }
        }

        // Transfer any remaining files to SUCCESS
        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        resultSetFlowFiles.clear();

        //If we had at least one result then it's OK to drop the original file, but if we had no results then
        //  pass the original flow file down the line to trigger downstream processors
        if (fileToProcess != null) {
            if (resultCount > 0) {
                session.remove(fileToProcess);
            } else {
                fileToProcess = session.write(fileToProcess,
                        out -> sqlWriter.writeEmptyResultSet(out, getLogger()));
                fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, "0");
                fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(),
                        sqlWriter.getMimeType());
                session.transfer(fileToProcess, REL_SUCCESS);
            }
        } else if (resultCount == 0) {
            //If we had no inbound FlowFile, no exceptions, and the SQL generated no result sets (Insert/Update/Delete statements only)
            // Then generate an empty Output FlowFile
            FlowFile resultSetFF = session.create();

            resultSetFF = session.write(resultSetFF, out -> sqlWriter.writeEmptyResultSet(out, getLogger()));
            resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, "0");
            resultSetFF = session.putAttribute(resultSetFF, CoreAttributes.MIME_TYPE.key(),
                    sqlWriter.getMimeType());
            session.transfer(resultSetFF, REL_SUCCESS);
        }
    } catch (final ProcessException | SQLException e) {
        //If we had at least one result then it's OK to drop the original file, but if we had no results then
        //  pass the original flow file down the line to trigger downstream processors
        if (fileToProcess == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, fileToProcess, e });
                fileToProcess = session.penalize(fileToProcess);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(fileToProcess, REL_FAILURE);
        }
    }
}

From source file:io.druid.java.util.common.CompressionUtilsTest.java

@Test
public void testGoodGzipWithException() throws Exception {
    final AtomicLong flushes = new AtomicLong(0);
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    CompressionUtils.gzip(Files.asByteSource(testFile), new ByteSink() {
        @Override/*from w w  w.j  a  v  a2  s .  c  om*/
        public OutputStream openStream() throws IOException {
            return new FilterOutputStream(new FileOutputStream(gzFile)) {
                @Override
                public void flush() throws IOException {
                    if (flushes.getAndIncrement() > 0) {
                        super.flush();
                    } else {
                        throw new IOException("Haven't flushed enough");
                    }
                }
            };
        }
    }, Predicates.<Throwable>alwaysTrue());
    Assert.assertTrue(gzFile.exists());
    try (final InputStream inputStream = CompressionUtils.decompress(new FileInputStream(gzFile), "file.gz")) {
        assertGoodDataStream(inputStream);
    }
    if (!testFile.delete()) {
        throw new IOE("Unable to delete file [%s]", testFile.getAbsolutePath());
    }
    Assert.assertFalse(testFile.exists());
    CompressionUtils.gunzip(Files.asByteSource(gzFile), testFile);
    Assert.assertTrue(testFile.exists());
    try (final InputStream inputStream = new FileInputStream(testFile)) {
        assertGoodDataStream(inputStream);
    }
    Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
}

From source file:org.voltdb.TableHelper.java

/**
 * Delete rows in a VoltDB table that has a bigint pkey where pkey values are odd.
 * Works best when pkey values are contiguous and start around 0.
 *
 * Exists mostly to force compaction on tables loaded with fillTableWithBigintPkey.
 * Though if you have an even number of sites, this won't work. It'll need to be
 * updated to delete some other pattern that's a bit more generic. Right now it
 * works great for my one-site testing./*from   ww w .j  a va  2 s. c  o m*/
 *
 */
public static long deleteEveryNRows(VoltTable table, Client client, int n) throws Exception {
    // find the primary key, assume first col if not found
    int pkeyColIndex = getBigintPrimaryKeyIndexIfExists(table);
    if (pkeyColIndex == -1) {
        pkeyColIndex = 0;
        assert (table.getColumnType(0).isInteger());
    }
    String pkeyColName = table.getColumnName(pkeyColIndex);

    VoltTable result = client
            .callProcedure("@AdHoc", String.format("select %s from %s order by %s desc limit 1;", pkeyColName,
                    TableHelper.getTableName(table), pkeyColName))
            .getResults()[0];
    long maxId = result.getRowCount() > 0 ? result.asScalarLong() : 0;
    System.out.printf("Deleting odd rows with pkey ids in the range 0-%d\n", maxId);

    // track outstanding responses so 10k can be out at a time
    final AtomicInteger outstanding = new AtomicInteger(0);
    final AtomicLong deleteCount = new AtomicLong(0);

    ProcedureCallback callback = new ProcedureCallback() {
        @Override
        public void clientCallback(ClientResponse clientResponse) throws Exception {
            outstanding.decrementAndGet();
            if (clientResponse.getStatus() != ClientResponse.SUCCESS) {
                System.out.println("Error in deleter callback:");
                System.out.println(((ClientResponseImpl) clientResponse).toJSONString());
                assert (false);
            }
            VoltTable result = clientResponse.getResults()[0];
            long modified = result.asScalarLong();
            assert (modified <= 1);
            deleteCount.addAndGet(modified);
        }
    };

    // delete 100k rows at a time until nothing comes back
    long deleted = 0;
    final String deleteProcName = table.m_extraMetadata.name.toUpperCase() + ".delete";
    for (int i = 1; i <= maxId; i += n) {
        client.callProcedure(callback, deleteProcName, i);
        outstanding.incrementAndGet();
        deleted++;
        if ((deleted % 100000) == 0) {
            System.out.printf("Sent %d total delete invocations (%.1f%% of range).\n", deleted,
                    (i * 100.0) / maxId);
        }
        // block while 1000 txns are outstanding
        while (outstanding.get() >= 1000) {
            Thread.yield();
        }
    }
    // block until all calls have returned
    while (outstanding.get() > 0) {
        Thread.yield();
    }
    System.out.printf("Deleted %d odd rows\n", deleteCount.get());

    return deleteCount.get();
}

From source file:com.facebook.LinkBench.LinkBenchDriverInj.java

/**
 * Start all runnables at the same time. Then block till all
 * tasks are completed. Returns the elapsed time (in millisec)
 * since the start of the first task to the completion of all tasks.
 *//*from  ww w .  j a v a 2 s . co m*/
static long concurrentExec(final List<? extends Runnable> tasks, boolean runReq, Random rng) throws Throwable {
    final CountDownLatch startSignal = new CountDownLatch(tasks.size());
    final CountDownLatch doneSignal = new CountDownLatch(tasks.size());
    final AtomicLong startTime = new AtomicLong(0);
    for (final Runnable task : tasks) {
        new Thread(new Runnable() {
            @Override
            public void run() {
                /*
                 * Run a task.  If an uncaught exception occurs, bail
                 * out of the benchmark immediately, since any results
                 * of the benchmark will no longer be valid anyway
                 */
                try {
                    startSignal.countDown();
                    startSignal.await();
                    long now = System.currentTimeMillis();
                    startTime.compareAndSet(0, now);
                    task.run();
                } catch (Throwable e) {
                    Logger threadLog = Logger.getLogger(ConfigUtil.LINKBENCH_LOGGER);
                    threadLog.error("Unrecoverable exception in worker thread:", e);
                    Runtime.getRuntime().halt(1);
                }
                doneSignal.countDown();
            }
        }).start();
    }

    if (runReq) {
        /* Do logic with injection rate. All tasks above should be waiting on tasks */
        long reqTime_ns = System.nanoTime();
        double requestrate_ns = ((double) requestrate) / 1e9;
        long numRequests = ConfigUtil.getLong(props, Config.NUM_REQUESTS);
        System.out.println("Processing Requests:" + genQueue);

        try {
            long runStartTime = System.currentTimeMillis();
            long curTime = runStartTime;
            for (int i = 0; i < numRequests; i++) {

                reqTime_ns = Timer.waitExpInterval(rng, reqTime_ns, requestrate_ns);
                //       System.out.println("Request time: "+System.currentTimeMillis());
                genQueue.put(System.nanoTime());
                curTime = System.currentTimeMillis();
                if (curTime > runStartTime + maxTime * 1000) {
                    System.out.println("Time limit elapsed");
                    break;
                }
            }

            // Send stop signal to all requesters
            for (int i = 0; i < nrequesters; i++) {
                genQueue.put((long) 0);
            }

        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    doneSignal.await(); // wait for all threads to finish
    long endTime = System.currentTimeMillis();
    return endTime - startTime.get();
}

From source file:org.apache.druid.java.util.common.CompressionUtilsTest.java

@Test
public void testGoodGzipWithException() throws Exception {
    final AtomicLong flushes = new AtomicLong(0);
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    CompressionUtils.gzip(Files.asByteSource(testFile), new ByteSink() {
        @Override/*from  w ww.  ja v  a 2 s.  co  m*/
        public OutputStream openStream() throws IOException {
            return new FilterOutputStream(new FileOutputStream(gzFile)) {
                @Override
                public void flush() throws IOException {
                    if (flushes.getAndIncrement() > 0) {
                        super.flush();
                    } else {
                        throw new IOException("Haven't flushed enough");
                    }
                }
            };
        }
    }, Predicates.alwaysTrue());
    Assert.assertTrue(gzFile.exists());
    try (final InputStream inputStream = CompressionUtils.decompress(new FileInputStream(gzFile), "file.gz")) {
        assertGoodDataStream(inputStream);
    }
    if (!testFile.delete()) {
        throw new IOE("Unable to delete file [%s]", testFile.getAbsolutePath());
    }
    Assert.assertFalse(testFile.exists());
    CompressionUtils.gunzip(Files.asByteSource(gzFile), testFile);
    Assert.assertTrue(testFile.exists());
    try (final InputStream inputStream = new FileInputStream(testFile)) {
        assertGoodDataStream(inputStream);
    }
    Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
}

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public long del(final String... keys) {
    if (keys.length == 0) {
        return 0;
    }//  ww  w  . jav a 2 s  . com

    ShardedJedis jedis = null;
    try {
        jedis = POOL.getJedis(config.getRedisType());
        final ShardedJedisPipeline pipeline = jedis.pipelined();
        final List<Response<Long>> responses = new ArrayList<>();
        for (String key : keys) {
            responses.add(pipeline.del(key));
        }

        pipeline.sync();

        final AtomicLong dels = new AtomicLong(0);
        if (!CollectionUtils.isEmpty(responses)) {
            responses.forEach(res -> dels.addAndGet(res.get()));
        }

        return dels.get();
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}