Example usage for org.apache.commons.lang.mutable MutableLong longValue

List of usage examples for org.apache.commons.lang.mutable MutableLong longValue

Introduction

In this page you can find the example usage for org.apache.commons.lang.mutable MutableLong longValue.

Prototype

public long longValue() 

Source Link

Document

Returns the value of this MutableLong as a long.

Usage

From source file:com.jivesoftware.os.filer.queue.guaranteed.delivery.FileQueueBackGuaranteedDeliveryFactory.java

/**
 *
 * @param serviceConfig//from   w w  w  . j a  v a2 s .com
 * @param deliveryCallback
 * @return
 * @throws IOException
 */
public static GuaranteedDeliveryService createService(
        FileQueueBackGuaranteedDeliveryServiceConfig serviceConfig, DeliveryCallback deliveryCallback)
        throws IOException {

    final MutableLong undelivered = new MutableLong(0);
    final MutableLong delivered = new MutableLong(0);
    final FileQueueImpl queue = new FileQueueImpl(serviceConfig.getPathToQueueFiles(),
            serviceConfig.getQueueName(), serviceConfig.getTakableWhenCreationTimestampIsOlderThanXMillis(),
            serviceConfig.getTakableWhenLastAppendedIsOlderThanXMillis(),
            serviceConfig.getTakableWhenLargerThanXEntries(), serviceConfig.getMaxPageSizeInBytes(),
            serviceConfig.getPushbackAtEnqueuedSize(), serviceConfig.isDeleteOnExit(), undelivered,
            serviceConfig.isTakeFullQueuesOnly());

    final GuaranteedDeliveryServiceStatus status = new GuaranteedDeliveryServiceStatus() {
        @Override
        public long undelivered() {
            return undelivered.longValue();
        }

        @Override
        public long delivered() {
            return delivered.longValue();
        }
    };

    final QueueProcessorPool processorPool = new QueueProcessorPool(queue, serviceConfig.getNumberOfConsumers(),
            serviceConfig.getQueueProcessorConfig(), delivered, deliveryCallback);

    GuaranteedDeliveryService service = new GuaranteedDeliveryService() {
        private final AtomicBoolean running = new AtomicBoolean(false);

        @Override
        public void add(List<byte[]> add) throws DeliveryServiceException {
            if (running.compareAndSet(false, true)) {
                processorPool.start();
            }

            if (add == null) {
                return;
            }
            for (int i = 0; i < add.size(); i++) {
                byte[] value = add.get(i);
                if (value == null) {
                    continue;
                }
                try {
                    queue.add(PhasedQueueConstants.ENQUEUED, System.currentTimeMillis(), value);
                } catch (Exception ex) {
                    throw new DeliveryServiceException(add, "failed tp deliver the following items", ex);
                }
            }
        }

        @Override
        public GuaranteedDeliveryServiceStatus getStatus() {
            return status;
        }

        @Override
        public void close() {
            if (running.compareAndSet(true, false)) {
                processorPool.stop();
            }
        }
    };

    return service;
}

From source file:com.datatorrent.lib.io.block.BlockWriter.java

/**
 * Transfers the counters in partitioning.
 *
 * @param target/*from  w  w  w .  j  a v a  2 s.c o m*/
 *          target counter
 * @param source
 *          removed counter
 */
protected void addCounters(BasicCounters<MutableLong> target, BasicCounters<MutableLong> source) {
    for (Enum<BlockWriter.Counters> key : BlockWriter.Counters.values()) {
        MutableLong tcounter = target.getCounter(key);
        if (tcounter == null) {
            tcounter = new MutableLong();
            target.setCounter(key, tcounter);
        }
        MutableLong scounter = source.getCounter(key);
        if (scounter != null) {
            tcounter.add(scounter.longValue());
        }
    }
}

From source file:com.palantir.atlasdb.schema.TransactionRangeMigrator.java

private boolean internalCopyRow(RowResult<byte[]> rr, long maxBytes, Transaction writeT,
        @Output MutableLong bytesPut, @Output Mutable<byte[]> lastRowName) {
    Map<Cell, byte[]> values = rowTransform.apply(rr);
    writeT.put(destTable, values);// ww  w .  j  a  va  2  s  .  co  m

    for (Map.Entry<Cell, byte[]> e : values.entrySet()) {
        bytesPut.add(e.getValue().length + Cells.getApproxSizeOfCell(e.getKey()));
    }

    if (bytesPut.longValue() >= maxBytes) {
        lastRowName.set(rr.getRowName());
        return false;
    }
    return true;
}

From source file:com.palantir.atlasdb.schema.KvsRangeMigrator.java

private boolean internalCopyRow(RowResult<byte[]> rr, long maxBytes, @Output Map<Cell, byte[]> writeMap,
        @Output MutableLong bytesPut, @Output Mutable<byte[]> lastRowName) {
    Map<Cell, byte[]> values = rowTransform.apply(rr);
    writeMap.putAll(values);//from   w  w w  .ja  v  a  2  s. c  o  m

    for (Map.Entry<Cell, byte[]> e : values.entrySet()) {
        bytesPut.add(e.getValue().length + Cells.getApproxSizeOfCell(e.getKey()));
    }

    if (bytesPut.longValue() >= maxBytes) {
        lastRowName.set(rr.getRowName());
        return false;
    }
    return true;
}

From source file:com.jivesoftware.os.upena.amza.transport.http.replication.endpoints.UpenaAmzaReplicationRestEndpoints.java

@POST
@Consumes("application/json")
@Path("/changes/take")
public Response take(final RowUpdates rowUpdates) {
    try {/* w ww.  j a v a  2  s. c o  m*/

        final BinaryRowMarshaller rowMarshaller = new BinaryRowMarshaller();
        final List<byte[]> rows = new ArrayList<>();
        final MutableLong highestTransactionId = new MutableLong();
        amzaInstance.takeRowUpdates(rowUpdates.getTableName(), rowUpdates.getHighestTransactionId(),
                new RowScan() {
                    @Override
                    public boolean row(long orderId, RowIndexKey key, RowIndexValue value) throws Exception {
                        rows.add(rowMarshaller.toRow(orderId, key, value));
                        if (orderId > highestTransactionId.longValue()) {
                            highestTransactionId.setValue(orderId);
                        }
                        return true;
                    }
                });

        return ResponseHelper.INSTANCE.jsonResponse(
                new RowUpdates(highestTransactionId.longValue(), rowUpdates.getTableName(), rows));
    } catch (Exception x) {
        LOG.warn("Failed to apply changeset: " + rowUpdates, x);
        return ResponseHelper.INSTANCE.errorResponse("Failed to changeset " + rowUpdates, x);
    }
}

From source file:com.datatorrent.lib.io.block.AbstractBlockReader.java

/**
 * Transfers the counters in partitioning.
 *
 * @param target target counter/*from   w w w  .  j a  v a 2 s .c  o  m*/
 * @param source removed counter
 */
protected void addCounters(BasicCounters<MutableLong> target, BasicCounters<MutableLong> source) {
    for (Enum<ReaderCounterKeys> key : ReaderCounterKeys.values()) {
        MutableLong tcounter = target.getCounter(key);
        if (tcounter == null) {
            tcounter = new MutableLong();
            target.setCounter(key, tcounter);
        }
        MutableLong scounter = source.getCounter(key);
        if (scounter != null) {
            tcounter.add(scounter.longValue());
        }
    }
}

From source file:com.datatorrent.lib.io.jms.AbstractJMSInputOperator.java

/**
 * This method is called when a message is added to {@link #holdingBuffer} and can be overwritten by subclasses
 * if required. This is called by the JMS thread not Operator thread.
 *
 * @param message//w  w w .  j  av a 2  s  .c  om
 * @return message is accepted.
 * @throws javax.jms.JMSException
 */
protected boolean messageConsumed(Message message) throws JMSException {
    if (message.getJMSRedelivered() && pendingAck.contains(message.getJMSMessageID())) {
        counters.getCounter(CounterKeys.REDELIVERED).increment();
        LOG.warn("IGNORING: Redelivered Message {}", message.getJMSMessageID());
        return false;
    }
    pendingAck.add(message.getJMSMessageID());
    MutableLong receivedCt = counters.getCounter(CounterKeys.RECEIVED);
    receivedCt.increment();
    LOG.debug("message id: {} buffer size: {} received: {}", message.getJMSMessageID(), holdingBuffer.size(),
            receivedCt.longValue());
    return true;
}

From source file:eu.project.ttc.engines.morpho.CompostAE.java

@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
    SubTaskObserver observer = observerResource.getTaskObserver(TASK_NAME);
    observer.setTotalTaskWork(termIndexResource.getTermIndex().getWords().size());
    LOGGER.info("Starting morphologyical compound detection for TermIndex {}",
            this.termIndexResource.getTermIndex().getName());
    LOGGER.debug(this.toString());
    wrMeasure = termIndexResource.getTermIndex().getWRMeasure();
    swtLemmaIndex = termIndexResource.getTermIndex().getCustomIndex(TermIndexes.SINGLE_WORD_LEMMA);
    buildCompostIndex();//ww  w . j a  v a2 s  .  c  o m

    final MutableLong cnt = new MutableLong(0);

    Timer progressLoggerTimer = new Timer("Morphosyntactic splitter AE");
    progressLoggerTimer.schedule(new TimerTask() {
        @Override
        public void run() {
            int total = termIndexResource.getTermIndex().getWords().size();
            CompostAE.LOGGER.info("Progress: {}% ({} on {})",
                    String.format("%.2f", ((float) cnt.longValue() * 100) / total), cnt.longValue(), total);
        }
    }, 5000l, 5000l);

    int observingStep = 100;
    for (Term swt : termIndexResource.getTermIndex().getTerms()) {
        if (!swt.isSingleWord())
            continue;
        cnt.increment();
        if (cnt.longValue() % observingStep == 0) {
            observer.work(observingStep);
        }

        /*
         * Do not do native morphology splitting 
         * if a composition already exists.
         */
        Word word = swt.getWords().get(0).getWord();
        if (word.isCompound())
            continue;

        Map<Segmentation, Double> scores = computeScores(word.getLemma());
        if (scores.size() > 0) {

            List<Segmentation> segmentations = Lists.newArrayList(scores.keySet());

            /*
             *  compare segmentations in a deterministic way.
             */
            segmentations.sort(new Comparator<Segmentation>() {
                @Override
                public int compare(Segmentation o1, Segmentation o2) {
                    int comp = Double.compare(scores.get(o2), scores.get(o1));
                    if (comp != 0)
                        return comp;
                    comp = Integer.compare(o1.getSegments().size(), o2.getSegments().size());
                    if (comp != 0)
                        return comp;
                    for (int i = 0; i < o1.getSegments().size(); i++) {
                        comp = Integer.compare(o2.getSegments().get(i).getEnd(),
                                o1.getSegments().get(i).getEnd());
                        if (comp != 0)
                            return comp;
                    }
                    return 0;
                }
            });

            Segmentation bestSegmentation = segmentations.get(0);

            // build the word component from segmentation
            WordBuilder builder = new WordBuilder(word);

            for (Segment seg : bestSegmentation.getSegments()) {
                String lemma = segmentLemmaCache.getUnchecked(seg.getLemma());
                builder.addComponent(seg.getBegin(), seg.getEnd(), lemma);
                if (seg.isNeoclassical())
                    builder.setCompoundType(CompoundType.NEOCLASSICAL);
                else
                    builder.setCompoundType(CompoundType.NATIVE);
            }
            builder.create();

            // log the word composition
            if (LOGGER.isTraceEnabled()) {
                List<String> componentStrings = Lists.newArrayList();
                for (Component component : word.getComponents())
                    componentStrings.add(component.toString());
                LOGGER.trace("{} [{}]", word.getLemma(), Joiner.on(' ').join(componentStrings));
            }
        }
    }

    //finalize
    progressLoggerTimer.cancel();

    LOGGER.debug("segment score cache size: {}", segmentScoreEntries.size());
    LOGGER.debug("segment score hit count: " + segmentScoreEntries.stats().hitCount());
    LOGGER.debug("segment score hit rate: " + segmentScoreEntries.stats().hitRate());
    LOGGER.debug("segment score eviction count: " + segmentScoreEntries.stats().evictionCount());
    termIndexResource.getTermIndex().dropCustomIndex(TermIndexes.SINGLE_WORD_LEMMA);
    segmentScoreEntries.invalidateAll();
    segmentLemmaCache.invalidateAll();
}

From source file:it.unimi.dsi.sux4j.mph.CHDMinimalPerfectHashFunction.java

/**
 * Creates a new CHD minimal perfect hash function for the given keys.
 * //from  www.  ja va2 s.c o m
 * @param keys the keys to hash, or {@code null}.
 * @param transform a transformation strategy for the keys.
 * @param lambda the average bucket size.
 * @param loadFactor the load factor.
 * @param signatureWidth a signature width, or 0 for no signature.
 * @param tempDir a temporary directory for the store files, or {@code null} for the standard temporary directory.
 * @param chunkedHashStore a chunked hash store containing the keys, or {@code null}; the store
 * can be unchecked, but in this case <code>keys</code> and <code>transform</code> must be non-{@code null}. 
 */
protected CHDMinimalPerfectHashFunction(final Iterable<? extends T> keys,
        final TransformationStrategy<? super T> transform, final int lambda, double loadFactor,
        final int signatureWidth, final File tempDir, ChunkedHashStore<T> chunkedHashStore) throws IOException {
    this.transform = transform;

    final ProgressLogger pl = new ProgressLogger(LOGGER);
    pl.displayLocalSpeed = true;
    pl.displayFreeMemory = true;
    final RandomGenerator r = new XorShift1024StarRandomGenerator();
    pl.itemsName = "keys";

    final boolean givenChunkedHashStore = chunkedHashStore != null;
    if (!givenChunkedHashStore) {
        chunkedHashStore = new ChunkedHashStore<T>(transform, tempDir, pl);
        chunkedHashStore.reset(r.nextLong());
        chunkedHashStore.addAll(keys.iterator());
    }
    n = chunkedHashStore.size();

    defRetValue = -1; // For the very few cases in which we can decide

    int log2NumChunks = Math.max(0, Fast.mostSignificantBit(n >> LOG2_CHUNK_SIZE));
    chunkShift = chunkedHashStore.log2Chunks(log2NumChunks);
    final int numChunks = 1 << log2NumChunks;

    LOGGER.debug("Number of chunks: " + numChunks);
    LOGGER.debug("Average chunk size: " + (double) n / numChunks);

    offsetNumBucketsSeed = new long[(numChunks + 1) * 3 + 2];

    int duplicates = 0;
    final LongArrayList holes = new LongArrayList();

    @SuppressWarnings("resource")
    final OfflineIterable<MutableLong, MutableLong> coefficients = new OfflineIterable<MutableLong, MutableLong>(
            new Serializer<MutableLong, MutableLong>() {

                @Override
                public void write(final MutableLong a, final DataOutput dos) throws IOException {
                    long x = a.longValue();
                    while ((x & ~0x7FL) != 0) {
                        dos.writeByte((int) (x | 0x80));
                        x >>>= 7;
                    }
                    dos.writeByte((int) x);
                }

                @Override
                public void read(final DataInput dis, final MutableLong x) throws IOException {
                    byte b = dis.readByte();
                    long t = b & 0x7F;
                    for (int shift = 7; (b & 0x80) != 0; shift += 7) {
                        b = dis.readByte();
                        t |= (b & 0x7FL) << shift;
                    }
                    x.setValue(t);
                }
            }, new MutableLong());

    for (;;) {
        LOGGER.debug("Generating minimal perfect hash function...");

        holes.clear();
        coefficients.clear();
        pl.expectedUpdates = numChunks;
        pl.itemsName = "chunks";
        pl.start("Analysing chunks... ");

        try {
            int chunkNumber = 0;

            for (ChunkedHashStore.Chunk chunk : chunkedHashStore) {
                /* We treat a chunk as a single hash function. The number of bins is thus
                 * the first prime larger than the chunk size divided by the load factor. */
                final int p = Primes.nextPrime((int) Math.ceil(chunk.size() / loadFactor) + 1);
                final boolean used[] = new boolean[p];

                final int numBuckets = (chunk.size() + lambda - 1) / lambda;
                numBuckets(chunkNumber + 1, numBuckets(chunkNumber) + numBuckets);
                final int[] cc0 = new int[numBuckets];
                final int[] cc1 = new int[numBuckets];
                @SuppressWarnings("unchecked")
                final ArrayList<long[]>[] bucket = new ArrayList[numBuckets];
                for (int i = bucket.length; i-- != 0;)
                    bucket[i] = new ArrayList<long[]>();

                tryChunk: for (;;) {
                    for (ArrayList<long[]> b : bucket)
                        b.clear();
                    Arrays.fill(used, false);

                    /* At each try, the allocation to keys to bucket is randomized differently. */
                    final long seed = r.nextLong();
                    // System.err.println( "Number of keys: " + chunk.size()  + " Number of bins: " + p + " seed: " + seed );
                    /* We distribute the keys in this chunks in the buckets. */
                    for (Iterator<long[]> iterator = chunk.iterator(); iterator.hasNext();) {
                        final long[] triple = iterator.next();
                        final long[] h = new long[3];
                        Hashes.spooky4(triple, seed, h);
                        final ArrayList<long[]> b = bucket[(int) ((h[0] >>> 1) % numBuckets)];
                        h[1] = (int) ((h[1] >>> 1) % p);
                        h[2] = (int) ((h[2] >>> 1) % (p - 1)) + 1;

                        // All elements in a bucket must have either different h[ 1 ] or different h[ 2 ]
                        for (long[] t : b)
                            if (t[1] == h[1] && t[2] == h[2]) {
                                LOGGER.info("Duplicate index" + Arrays.toString(t));
                                continue tryChunk;
                            }
                        b.add(h);
                    }

                    final int[] perm = Util.identity(bucket.length);
                    IntArrays.quickSort(perm, new AbstractIntComparator() {
                        private static final long serialVersionUID = 1L;

                        @Override
                        public int compare(int a0, int a1) {
                            return Integer.compare(bucket[a1].size(), bucket[a0].size());
                        }
                    });

                    for (int i = 0; i < perm.length;) {
                        final LinkedList<Integer> bucketsToDo = new LinkedList<Integer>();
                        final int size = bucket[perm[i]].size();
                        //System.err.println( "Bucket size: " + size );
                        int j;
                        // Gather indices of all buckets with the same size
                        for (j = i; j < perm.length && bucket[perm[j]].size() == size; j++)
                            bucketsToDo.add(Integer.valueOf(perm[j]));

                        // Examine for each pair (c0,c1) the buckets still to do
                        ext: for (int c1 = 0; c1 < p; c1++)
                            for (int c0 = 0; c0 < p; c0++) {
                                //System.err.println( "Testing " + c0 + ", " + c1 + " (to do: " + bucketsToDo.size() + ")" );
                                for (Iterator<Integer> iterator = bucketsToDo.iterator(); iterator.hasNext();) {
                                    final int k = iterator.next().intValue();
                                    final ArrayList<long[]> b = bucket[k];
                                    boolean completed = true;
                                    final IntArrayList done = new IntArrayList();
                                    // Try to see whether the necessary entries are not used
                                    for (long[] h : b) {
                                        //assert k == h[ 0 ];

                                        int pos = (int) ((h[1] + c0 * h[2] + c1) % p);
                                        //System.err.println( "Testing pos " + pos + " for " + Arrays.toString( e  ));
                                        if (used[pos]) {
                                            completed = false;
                                            break;
                                        } else {
                                            used[pos] = true;
                                            done.add(pos);
                                        }
                                    }

                                    if (completed) {
                                        // All positions were free
                                        cc0[k] = c0;
                                        cc1[k] = c1;
                                        iterator.remove();
                                    } else
                                        for (int d : done)
                                            used[d] = false;
                                }
                                if (bucketsToDo.isEmpty())
                                    break ext;
                            }
                        if (!bucketsToDo.isEmpty())
                            continue tryChunk;

                        seed(chunkNumber, seed);
                        i = j;
                    }
                    break;
                }

                // System.err.println("DONE!");

                if (ASSERTS) {
                    final IntOpenHashSet pos = new IntOpenHashSet();
                    final long h[] = new long[3];
                    for (Iterator<long[]> iterator = chunk.iterator(); iterator.hasNext();) {
                        final long[] triple = iterator.next();
                        Hashes.spooky4(triple, seed(chunkNumber), h);
                        h[0] = (h[0] >>> 1) % numBuckets;
                        h[1] = (int) ((h[1] >>> 1) % p);
                        h[2] = (int) ((h[2] >>> 1) % (p - 1)) + 1;
                        //System.err.println( Arrays.toString(  e  ) );
                        assert pos.add((int) ((h[1] + cc0[(int) (h[0])] * h[2] + cc1[(int) (h[0])]) % p));
                    }
                }

                final MutableLong l = new MutableLong();
                for (int i = 0; i < numBuckets; i++) {
                    l.setValue(cc0[i] + cc1[i] * p);
                    coefficients.add(l);
                }

                for (int i = 0; i < p; i++)
                    if (!used[i])
                        holes.add(offset(chunkNumber) + i);

                offset(chunkNumber + 1, offset(chunkNumber) + p);
                chunkNumber++;
                pl.update();
            }

            pl.done();
            break;
        } catch (ChunkedHashStore.DuplicateException e) {
            if (keys == null)
                throw new IllegalStateException(
                        "You provided no keys, but the chunked hash store was not checked");
            if (duplicates++ > 3)
                throw new IllegalArgumentException("The input list contains duplicates");
            LOGGER.warn("Found duplicate. Recomputing triples...");
            chunkedHashStore.reset(r.nextLong());
            chunkedHashStore.addAll(keys.iterator());
        }
    }

    rank = new SparseRank(offset(offsetNumBucketsSeed.length / 3 - 1), holes.size(), holes.iterator());

    globalSeed = chunkedHashStore.seed();

    this.coefficients = new EliasFanoLongBigList(new AbstractLongIterator() {
        final OfflineIterator<MutableLong, MutableLong> iterator = coefficients.iterator();

        @Override
        public boolean hasNext() {
            return iterator.hasNext();
        }

        public long nextLong() {
            return iterator.next().longValue();
        }
    }, 0, true);

    coefficients.close();

    LOGGER.info("Completed.");
    LOGGER.info("Actual bit cost per key: " + (double) numBits() / n);

    if (signatureWidth != 0) {
        signatureMask = -1L >>> Long.SIZE - signatureWidth;
        (signatures = LongArrayBitVector.getInstance().asLongBigList(signatureWidth)).size(n);
        pl.expectedUpdates = n;
        pl.itemsName = "signatures";
        pl.start("Signing...");
        for (ChunkedHashStore.Chunk chunk : chunkedHashStore) {
            Iterator<long[]> iterator = chunk.iterator();
            for (int i = chunk.size(); i-- != 0;) {
                final long[] triple = iterator.next();
                long t = getLongByTripleNoCheck(triple);
                signatures.set(t, signatureMask & triple[0]);
                pl.lightUpdate();
            }
        }
        pl.done();
    } else {
        signatureMask = 0;
        signatures = null;
    }

    if (!givenChunkedHashStore)
        chunkedHashStore.close();
}

From source file:com.datatorrent.lib.io.fs.AbstractFSWriter.java

/**
 * This method processes received tuples.
 * Tuples are written out to the appropriate files as determined by the getFileName method.
 * If the output port is connected incoming tuples are also converted and emitted on the appropriate output port.
 * @param tuple An incoming tuple which needs to be processed.
 *//*from ww w.  j a va2 s .  c  o m*/
protected void processTuple(INPUT tuple) {
    String fileName = getFileName(tuple);

    if (Strings.isNullOrEmpty(fileName)) {
        return;
    }

    LOG.debug("file {}, hash {}, filecount {}", fileName, fileName.hashCode(), this.openPart.get(fileName));

    try {
        LOG.debug("end-offsets {}", endOffsets);

        FSDataOutputStream fsOutput = streamsCache.get(fileName);
        byte[] tupleBytes = getBytesForTuple(tuple);
        fsOutput.write(tupleBytes);
        totalBytesWritten += tupleBytes.length;
        MutableLong currentOffset = endOffsets.get(fileName);

        if (currentOffset == null) {
            currentOffset = new MutableLong(0);
            endOffsets.put(fileName, currentOffset);
        }

        currentOffset.add(tupleBytes.length);

        LOG.debug("end-offsets {}", endOffsets);
        LOG.debug("tuple: {}", tuple.toString());
        LOG.debug("current position {}, max length {}", currentOffset.longValue(), maxLength);

        if (rollingFile && currentOffset.longValue() > maxLength) {
            LOG.debug("Rotating file {} {}", fileName, currentOffset.longValue());
            rotate(fileName);
        }

        MutableLong count = counts.get(fileName);
        if (count == null) {
            count = new MutableLong(0);
            counts.put(fileName, count);
        }

        count.add(1);

        LOG.debug("count of {} =  {}", fileName, count);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    } catch (ExecutionException ex) {
        throw new RuntimeException(ex);
    }

    if (output.isConnected()) {
        output.emit(convert(tuple));
    }
}