Example usage for java.util.concurrent.atomic AtomicLong getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicLong getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong getAndIncrement.

Prototype

public final long getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:io.druid.java.util.common.CompressionUtilsTest.java

@Test(expected = IOException.class)
public void testStreamErrorGunzip() throws Exception {
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    CompressionUtils.gzip(Files.asByteSource(testFile), Files.asByteSink(gzFile),
            Predicates.<Throwable>alwaysTrue());
    Assert.assertTrue(gzFile.exists());//from   w ww. j  a va2s.  c o m
    try (final InputStream inputStream = CompressionUtils.decompress(new FileInputStream(gzFile), "file.gz")) {
        assertGoodDataStream(inputStream);
    }
    if (testFile.exists() && !testFile.delete()) {
        throw new RE("Unable to delete file [%s]", testFile.getAbsolutePath());
    }
    Assert.assertFalse(testFile.exists());
    final AtomicLong flushes = new AtomicLong(0L);
    CompressionUtils.gunzip(new FileInputStream(gzFile), new FilterOutputStream(new FileOutputStream(testFile) {
        @Override
        public void flush() throws IOException {
            if (flushes.getAndIncrement() > 0) {
                super.flush();
            } else {
                throw new IOException("Test exception");
            }
        }
    }));
}

From source file:com.hubcap.task.TaskRunner.java

/**
 * Starts the ThreadPoolExecutor which builds a set of TaskRunner instances
 * which will wait for inputs (from the user)
 */// w w w .j av a 2  s  .  c o  m
public static void startThreadPool() {
    if (!isTaskSystemReady) {
        System.out.println("startThreadPool()");

        isTaskSystemReady = true;

        // used to id the threads 'atomically'
        final AtomicLong count = new AtomicLong(0);
        if (TaskRunner.taskRunnerThreadFactory == null) {

            TaskRunner.taskRunnerThreadFactory = new ThreadFactory() {

                @Override
                public Thread newThread(Runnable r) {
                    if (runningTasks.contains(r)) {
                        throw new IllegalStateException("Cannot add duplicate runnable to running tasks");
                    }

                    Thread thread = new Thread(r);
                    thread.setDaemon(false);
                    thread.setName("HubcapTaskRunnerThread-" + count.getAndIncrement());
                    taskThreads.add(thread);
                    return thread;
                }
            };

            // calculates the current stable thread count based on the
            // assumption
            // that it takes 'X' times the amount of time to transfer data
            // (from github)
            // as it does to process said data (including Gson
            // transformation)
            // and the limit of Y% use of CPU. MAX_THREADS provides a safe
            // and stable cap for
            // systems that are so 'badass' that we would break the cap. \
            // (i.e. i have 32 cores and 12 disks = (2*32*12*1(1+5/1) =
            // 4600 threads, a bit high)...)
            int numThreads = ThreadUtils.getStableThreadCount(CPU_LOAD_TR, CPU_WAIT_TR, CPU_COMPUTE_TR,
                    Constants.MAX_TASK_RUNNER_THREADS);

            System.out.println("creating: " + numThreads + " threads for hubcap");
            TaskRunner.taskRunnerThreadPool = Executors.newFixedThreadPool(numThreads,
                    TaskRunner.taskRunnerThreadFactory);
            for (int i = 0; i < numThreads; ++i) {
                TaskRunner tr = new TaskRunner();
                taskRunnerThreadPool.execute(tr);
            }

            // pass the monitoring code to another thread
            // so we don't block the REPL loop
            monitorThread = new Thread(new Runnable() {

                @Override
                public void run() {

                    while (!taskRunnerThreadPool.isShutdown()) {
                        try {
                            TaskRunner.rebalance();
                            Thread.sleep(Constants.POOL_SHUTDOWN_CHECK_INTERVAL);

                        } catch (InterruptedException ex) {
                            if (ProcessModel.instance().getVerbose()) {
                                ErrorUtils.printStackTrace(ex);
                            }
                            break;
                        }
                    }

                    System.out.println("Thread Pool was shutdown");

                    while (!taskRunnerThreadPool.isTerminated()) {
                        try {
                            Thread.sleep(Constants.POOL_TERM_CHECK_INTERVAL);
                        } catch (InterruptedException ex) {
                            ErrorUtils.printStackTrace(ex);
                            break;
                        }
                    }

                    System.out.println("Thread pool terminated.");
                }
            });

            monitorThread.setName("TaskMonDaemon");
            monitorThread.setDaemon(false);

            // start monitoring
            monitorThread.start();

            System.out.println("Thread pool started!");
        }
    } else {
        throw new IllegalStateException("Hubcap task runner can only be initialized once!");
    }
}

From source file:com.btoddb.fastpersitentqueue.flume.FpqChannelTest.java

@Test
public void testThreading() throws Exception {
    final int numEntries = 1000;
    final int numPushers = 4;
    final int numPoppers = 4;
    final int entrySize = 1000;
    channel.setMaxTransactionSize(2000);
    final int popBatchSize = 100;
    channel.setMaxMemorySegmentSizeInBytes(10000000);
    channel.setMaxJournalFileSize(10000000);
    channel.setMaxJournalDurationInMs(30000);
    channel.setFlushPeriodInMs(1000);//from w  ww . j a  v  a 2 s  .  c  om
    channel.setNumberOfFlushWorkers(4);

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong counter = new AtomicLong();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    channel.start();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = counter.getAndIncrement();
                        pushSum.addAndGet(x);
                        ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]);
                        bb.putLong(x);

                        Transaction tx = channel.getTransaction();
                        tx.begin();
                        MyEvent event1 = new MyEvent();
                        event1.addHeader("x", String.valueOf(x)).setBody(new byte[numEntries - 8]); // take out size of long
                        channel.put(event1);
                        tx.commit();
                        tx.close();

                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !channel.isEmpty()) {
                    try {
                        Transaction tx = channel.getTransaction();
                        tx.begin();

                        Event event;
                        int count = popBatchSize;
                        while (null != (event = channel.take()) && count-- > 0) {
                            popSum.addAndGet(Long.valueOf(event.getHeaders().get("x")));
                            numPops.incrementAndGet();
                        }

                        tx.commit();
                        tx.close();

                        Thread.sleep(popRand.nextInt(10));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(channel.isEmpty(), is(true));
    assertThat(pushSum.get(), is(popSum.get()));
}

From source file:com.github.naoghuman.testdata.abclist.service.LinkMappingService.java

@Override
protected Task<Void> createTask() {
    return new Task<Void>() {
        {/*from   ww  w. j  a v  a2s. c o m*/
            updateProgress(0, saveMaxEntities);
        }

        @Override
        protected Void call() throws Exception {
            LoggerFacade.getDefault().deactivate(Boolean.TRUE);

            final StopWatch stopWatch = new StopWatch();
            stopWatch.start();

            /*
             1) over all links
             2) if random > 0.005d then do
             3) otherwise create a link without parent
             4) get 1-10 terms, create LinkMapping foreach of them
             - means a link is mapped to 1-10 terms
             5) get 0-10 topics, create LinkMapping foreach of them
             - means a link is mapped to 0-10 topics
            */

            final ObservableList<Link> links = SqlProvider.getDefault().findAllLinks();
            final ObservableList<Term> terms = SqlProvider.getDefault().findAllTerms();
            final int sizeTerms = terms.size();
            final ObservableList<Topic> topics = SqlProvider.getDefault().findAllTopics();
            final int sizeTopics = topics.size();
            final AtomicInteger index = new AtomicInteger(0);

            final CrudService crudService = DatabaseFacade.getDefault().getCrudService(entityName);
            final AtomicLong id = new AtomicLong(
                    -1_000_000_000L + DatabaseFacade.getDefault().getCrudService().count(entityName));
            links.stream() // 1
                    .forEach(link -> {
                        // 2) Should the [Link] have a parent
                        final double random = TestdataGenerator.RANDOM.nextDouble();
                        if (random > 0.005d) {
                            // 4) Create [Link]s with parent [Term]
                            final int maxTerms = TestdataGenerator.RANDOM.nextInt(10) + 1;
                            for (int i = 0; i < maxTerms; i++) {
                                final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                                lm.setId(id.getAndIncrement());

                                final Term term = terms.get(TestdataGenerator.RANDOM.nextInt(sizeTerms));
                                lm.setParentId(term.getId());
                                lm.setParentType(LinkMappingType.TERM);

                                lm.setChildId(link.getId());
                                lm.setChildType(LinkMappingType.LINK);

                                crudService.create(lm);
                            }

                            // 5) Create [Link]s with parent [Topic]
                            final int maxTopics = TestdataGenerator.RANDOM.nextInt(11);
                            for (int i = 0; i < maxTopics; i++) {
                                final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                                lm.setId(id.getAndIncrement());

                                final Topic topic = topics.get(TestdataGenerator.RANDOM.nextInt(sizeTopics));
                                lm.setParentId(topic.getId());
                                lm.setParentType(LinkMappingType.TOPIC);

                                lm.setChildId(link.getId());
                                lm.setChildType(LinkMappingType.LINK);

                                crudService.create(lm);
                            }
                        } else {
                            // 3) Some [Link]s havn't a parent
                            final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                            lm.setId(id.getAndIncrement());
                            lm.setParentId(IDefaultConfiguration.DEFAULT_ID);
                            lm.setParentType(LinkMappingType.NOT_DEFINED);
                            lm.setChildId(link.getId());
                            lm.setChildType(LinkMappingType.LINK);

                            crudService.create(lm);
                        }

                        updateProgress(index.getAndIncrement(), saveMaxEntities);
                    });

            LoggerFacade.getDefault().deactivate(Boolean.FALSE);
            stopWatch.split();
            LoggerFacade.getDefault().debug(this.getClass(),
                    "  + " + stopWatch.toSplitString() + " for " + saveMaxEntities + " LinkMappings."); // NOI18N
            stopWatch.stop();

            return null;
        }
    };
}

From source file:com.github.naoghuman.testdata.abclist.service.ExerciseTermService.java

@Override
protected Task<Void> createTask() {
    return new Task<Void>() {
        {/*  www  .j av  a 2  s.  co m*/
            updateProgress(0, saveMaxEntities);
        }

        @Override
        protected Void call() throws Exception {
            LoggerFacade.getDefault().deactivate(Boolean.TRUE);

            final StopWatch stopWatch = new StopWatch();
            stopWatch.start();

            final ObservableList<Topic> topics = SqlProvider.getDefault().findAllTopics();
            final ObservableList<Term> terms = SqlProvider.getDefault().findAllTerms();
            final int sizeTerms = terms.size();
            final AtomicInteger index = new AtomicInteger(0);

            final CrudService crudService = DatabaseFacade.getDefault().getCrudService(entityName);
            final AtomicLong id = new AtomicLong(
                    -1_000_000_000L + DatabaseFacade.getDefault().getCrudService().count(entityName));
            topics.stream().forEach(topic -> {
                final ObservableList<Exercise> exercises = SqlProvider.getDefault()
                        .findAllExercisesWithTopicId(topic.getId());
                exercises.stream().filter(exercise -> exercise.isReady()).forEach(exercise -> {
                    final int maxExerciseTerms = TestdataGenerator.RANDOM.nextInt(70) + 10;
                    for (int i = 0; i < maxExerciseTerms; i++) {
                        final Term term = terms.get(TestdataGenerator.RANDOM.nextInt(sizeTerms));
                        final ExerciseTerm exerciseTerm = ModelProvider.getDefault().getExerciseTerm();
                        exerciseTerm.setExerciseId(exercise.getId());
                        exerciseTerm.setId(id.getAndIncrement());
                        exerciseTerm.setTermId(term.getId());

                        crudService.create(exerciseTerm);
                    }
                });

                updateProgress(index.getAndIncrement(), saveMaxEntities);
            });

            LoggerFacade.getDefault().deactivate(Boolean.FALSE);
            stopWatch.split();
            LoggerFacade.getDefault().debug(this.getClass(),
                    "  + " + stopWatch.toSplitString() + " for " + saveMaxEntities + " ExerciseTerms."); // NOI18N
            stopWatch.stop();

            return null;
        }
    };
}

From source file:com.streamsets.pipeline.stage.bigquery.destination.BigQueryTarget.java

@Override
public void write(Batch batch) throws StageException {
    Map<TableId, List<Record>> tableIdToRecords = new LinkedHashMap<>();
    Map<Long, Record> requestIndexToRecords = new LinkedHashMap<>();

    if (batch.getRecords().hasNext()) {
        ELVars elVars = getContext().createELVars();
        batch.getRecords().forEachRemaining(record -> {
            RecordEL.setRecordInContext(elVars, record);
            try {
                String datasetName = dataSetEval.eval(elVars, conf.datasetEL, String.class);
                String tableName = tableNameELEval.eval(elVars, conf.tableNameEL, String.class);
                TableId tableId = TableId.of(datasetName, tableName);
                if (tableIdExistsCache.get(tableId)) {
                    List<Record> tableIdRecords = tableIdToRecords.computeIfAbsent(tableId,
                            t -> new ArrayList<>());
                    tableIdRecords.add(record);
                } else {
                    getContext().toError(record, Errors.BIGQUERY_17, datasetName, tableName,
                            conf.credentials.projectId);
                }/*from  w  ww  . j a va2  s.  co  m*/
            } catch (ELEvalException e) {
                LOG.error("Error evaluating DataSet/TableName EL", e);
                getContext().toError(record, Errors.BIGQUERY_10, e);
            } catch (ExecutionException e) {
                LOG.error("Error when checking exists for tableId, Reason : {}", e);
                Throwable rootCause = Throwables.getRootCause(e);
                getContext().toError(record, Errors.BIGQUERY_13, rootCause);
            }
        });

        tableIdToRecords.forEach((tableId, records) -> {
            final AtomicLong index = new AtomicLong(0);
            final AtomicBoolean areThereRecordsToWrite = new AtomicBoolean(false);
            InsertAllRequest.Builder insertAllRequestBuilder = InsertAllRequest.newBuilder(tableId);
            records.forEach(record -> {
                try {
                    String insertId = getInsertIdForRecord(elVars, record);
                    Map<String, ?> rowContent = convertToRowObjectFromRecord(record);
                    if (rowContent.isEmpty()) {
                        throw new OnRecordErrorException(record, Errors.BIGQUERY_14);
                    }
                    insertAllRequestBuilder.addRow(insertId, rowContent);
                    areThereRecordsToWrite.set(true);
                    requestIndexToRecords.put(index.getAndIncrement(), record);
                } catch (OnRecordErrorException e) {
                    LOG.error("Error when converting record {} to row, Reason : {} ",
                            record.getHeader().getSourceId(), e.getMessage());
                    getContext().toError(record, e.getErrorCode(), e.getParams());
                }
            });

            if (areThereRecordsToWrite.get()) {
                insertAllRequestBuilder.setIgnoreUnknownValues(conf.ignoreInvalidColumn);
                insertAllRequestBuilder.setSkipInvalidRows(false);

                InsertAllRequest request = insertAllRequestBuilder.build();

                if (!request.getRows().isEmpty()) {
                    try {
                        InsertAllResponse response = bigQuery.insertAll(request);
                        if (response.hasErrors()) {
                            response.getInsertErrors().forEach((requestIdx, errors) -> {
                                Record record = requestIndexToRecords.get(requestIdx);
                                String messages = COMMA_JOINER.join(errors.stream()
                                        .map(BigQueryError::getMessage).collect(Collectors.toList()));
                                String reasons = COMMA_JOINER.join(errors.stream().map(BigQueryError::getReason)
                                        .collect(Collectors.toList()));
                                LOG.error("Error when inserting record {}, Reasons : {}, Messages : {}",
                                        record.getHeader().getSourceId(), reasons, messages);
                                getContext().toError(record, Errors.BIGQUERY_11, reasons, messages);
                            });
                        }
                    } catch (BigQueryException e) {
                        LOG.error(Errors.BIGQUERY_13.getMessage(), e);
                        //Put all records to error.
                        for (long i = 0; i < request.getRows().size(); i++) {
                            Record record = requestIndexToRecords.get(i);
                            getContext().toError(record, Errors.BIGQUERY_13, e);
                        }
                    }
                }
            }
        });
    }
}

From source file:io.druid.java.util.common.CompressionUtilsTest.java

@Test
public void testGoodGzipWithException() throws Exception {
    final AtomicLong flushes = new AtomicLong(0);
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    CompressionUtils.gzip(Files.asByteSource(testFile), new ByteSink() {
        @Override// w w  w . jav  a 2  s  .  c  o m
        public OutputStream openStream() throws IOException {
            return new FilterOutputStream(new FileOutputStream(gzFile)) {
                @Override
                public void flush() throws IOException {
                    if (flushes.getAndIncrement() > 0) {
                        super.flush();
                    } else {
                        throw new IOException("Haven't flushed enough");
                    }
                }
            };
        }
    }, Predicates.<Throwable>alwaysTrue());
    Assert.assertTrue(gzFile.exists());
    try (final InputStream inputStream = CompressionUtils.decompress(new FileInputStream(gzFile), "file.gz")) {
        assertGoodDataStream(inputStream);
    }
    if (!testFile.delete()) {
        throw new IOE("Unable to delete file [%s]", testFile.getAbsolutePath());
    }
    Assert.assertFalse(testFile.exists());
    CompressionUtils.gunzip(Files.asByteSource(gzFile), testFile);
    Assert.assertTrue(testFile.exists());
    try (final InputStream inputStream = new FileInputStream(testFile)) {
        assertGoodDataStream(inputStream);
    }
    Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
}

From source file:com.btoddb.fastpersitentqueue.FpqIT.java

@Test
public void testThreading() throws Exception {
    final int numEntries = 1000;
    final int numPushers = 4;
    final int numPoppers = 4;
    final int entrySize = 1000;
    fpq1.setMaxTransactionSize(2000);/*w ww.  jav a2s  .  co m*/
    final int popBatchSize = 100;
    fpq1.setMaxMemorySegmentSizeInBytes(10000000);
    fpq1.setMaxJournalFileSize(10000000);
    fpq1.setMaxJournalDurationInMs(30000);
    fpq1.setFlushPeriodInMs(1000);
    fpq1.setNumberOfFlushWorkers(4);

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong counter = new AtomicLong();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    fpq1.init();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = counter.getAndIncrement();
                        pushSum.addAndGet(x);
                        ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]);
                        bb.putLong(x);

                        fpq1.beginTransaction();
                        fpq1.push(bb.array());
                        fpq1.commit();
                        if ((x + 1) % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) {
                    try {
                        fpq1.beginTransaction();
                        try {
                            Collection<FpqEntry> entries = fpq1.pop(popBatchSize);
                            if (null == entries) {
                                Thread.sleep(100);
                                continue;
                            }

                            for (FpqEntry entry : entries) {
                                ByteBuffer bb = ByteBuffer.wrap(entry.getData());
                                popSum.addAndGet(bb.getLong());
                                if (entry.getId() % 500 == 0) {
                                    System.out.println("popped ID = " + entry.getId());
                                }
                            }
                            numPops.addAndGet(entries.size());
                            fpq1.commit();
                            entries.clear();
                        } finally {
                            if (fpq1.isTransactionActive()) {
                                fpq1.rollback();
                            }
                        }
                        Thread.sleep(popRand.nextInt(10));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(fpq1.getNumberOfEntries(), is(0L));
    assertThat(pushSum.get(), is(popSum.get()));
    assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1));
    assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1));
    assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1));
    assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            is(empty()));
    assertThat(
            FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            hasSize(1));
}

From source file:com.seajas.search.profiler.service.repository.RepositoryService.java

/**
 * Process a paged list of all resources within the repository.
 * //from www . ja  v  a  2 s  .c  o  m
 * @param collection
 * @param sourceId
 * @param taxonomyMatch
 * @param url
 * @param startDate
 * @param endDate
 * @param parameters
 * @param rangeStart
 * @param rangeEnd
 * @param processor
 * @return boolean
 */
public boolean processResources(final String collection, final Integer sourceId, final String taxonomyMatch,
        final String url, final Date startDate, final Date endDate, final Map<String, String> parameters,
        final Integer rangeStart, final Integer rangeEnd, final RepositoryProcessor processor) {
    Query query = createQuery(true, collection, sourceId, taxonomyMatch, startDate, endDate, url, parameters);

    query.fields().include("_id");
    query.fields().include("currentState");
    query.fields().include("element.hostname");

    // Determine the total number of document this affects

    final AtomicLong currentResult = new AtomicLong(0L);

    // Then skip to it and get going

    query.skip(rangeStart);

    if (rangeEnd != null)
        query.limit(rangeEnd - rangeStart);

    if (logger.isInfoEnabled())
        logger.info(String.format("Processing ranges %d to %s of (unknown) results through the given processor",
                rangeStart, rangeEnd != null ? rangeEnd.toString() : "end"));

    mongoTemplate.executeQuery(query, defaultCollection, new DocumentCallbackHandler() {
        @Override
        public void processDocument(final DBObject dbObject) throws MongoException, DataAccessException {
            CompositeState currentState = CompositeState.valueOf((String) dbObject.get("currentState"));

            if (!EnumSet.of(CompositeState.Content, CompositeState.CompletedDocument,
                    CompositeState.InitialDocument).contains(currentState)) {
                if (logger.isDebugEnabled()) {
                    ObjectId id = (ObjectId) dbObject.get("_id");

                    logger.debug("Skipping over element with ID '" + id + "' and current state '" + currentState
                            + "'");
                }

                return;
            }

            ObjectId id = (ObjectId) dbObject.get("_id");
            String hostname = (String) ((BasicDBObject) dbObject.get("element")).get("hostname");

            if (logger.isInfoEnabled())
                logger.info("Processing re-indexing entry " + currentResult.getAndIncrement()
                        + " / (unknown) with ID '" + id + "' and hostname '" + hostname + "'");

            processor.process(id, hostname);
        }
    });

    return true;
}

From source file:org.apache.druid.java.util.common.CompressionUtilsTest.java

@Test
public void testGoodGzipWithException() throws Exception {
    final AtomicLong flushes = new AtomicLong(0);
    final File tmpDir = temporaryFolder.newFolder("testGoodGzipByteSource");
    final File gzFile = new File(tmpDir, testFile.getName() + ".gz");
    Assert.assertFalse(gzFile.exists());
    CompressionUtils.gzip(Files.asByteSource(testFile), new ByteSink() {
        @Override//from   w  w w.  ja  va 2  s .co  m
        public OutputStream openStream() throws IOException {
            return new FilterOutputStream(new FileOutputStream(gzFile)) {
                @Override
                public void flush() throws IOException {
                    if (flushes.getAndIncrement() > 0) {
                        super.flush();
                    } else {
                        throw new IOException("Haven't flushed enough");
                    }
                }
            };
        }
    }, Predicates.alwaysTrue());
    Assert.assertTrue(gzFile.exists());
    try (final InputStream inputStream = CompressionUtils.decompress(new FileInputStream(gzFile), "file.gz")) {
        assertGoodDataStream(inputStream);
    }
    if (!testFile.delete()) {
        throw new IOE("Unable to delete file [%s]", testFile.getAbsolutePath());
    }
    Assert.assertFalse(testFile.exists());
    CompressionUtils.gunzip(Files.asByteSource(gzFile), testFile);
    Assert.assertTrue(testFile.exists());
    try (final InputStream inputStream = new FileInputStream(testFile)) {
        assertGoodDataStream(inputStream);
    }
    Assert.assertEquals(4, flushes.get()); // 2 for suppressed closes, 2 for manual calls to shake out errors
}