Example usage for org.apache.commons.lang.mutable MutableLong add

List of usage examples for org.apache.commons.lang.mutable MutableLong add

Introduction

In this page you can find the example usage for org.apache.commons.lang.mutable MutableLong add.

Prototype

public void add(Number operand) 

Source Link

Document

Adds a value.

Usage

From source file:com.palantir.atlasdb.schema.TransactionRangeMigrator.java

private boolean internalCopyRow(RowResult<byte[]> rr, long maxBytes, Transaction writeT,
        @Output MutableLong bytesPut, @Output Mutable<byte[]> lastRowName) {
    Map<Cell, byte[]> values = rowTransform.apply(rr);
    writeT.put(destTable, values);//from   w  ww  .  j a  v a2  s .  c  o m

    for (Map.Entry<Cell, byte[]> e : values.entrySet()) {
        bytesPut.add(e.getValue().length + Cells.getApproxSizeOfCell(e.getKey()));
    }

    if (bytesPut.longValue() >= maxBytes) {
        lastRowName.set(rr.getRowName());
        return false;
    }
    return true;
}

From source file:com.palantir.atlasdb.schema.KvsRangeMigrator.java

private boolean internalCopyRow(RowResult<byte[]> rr, long maxBytes, @Output Map<Cell, byte[]> writeMap,
        @Output MutableLong bytesPut, @Output Mutable<byte[]> lastRowName) {
    Map<Cell, byte[]> values = rowTransform.apply(rr);
    writeMap.putAll(values);//w ww .  j  a v a 2  s .c om

    for (Map.Entry<Cell, byte[]> e : values.entrySet()) {
        bytesPut.add(e.getValue().length + Cells.getApproxSizeOfCell(e.getKey()));
    }

    if (bytesPut.longValue() >= maxBytes) {
        lastRowName.set(rr.getRowName());
        return false;
    }
    return true;
}

From source file:com.datatorrent.lib.io.block.BlockWriter.java

/**
 * Transfers the counters in partitioning.
 *
 * @param target/*from w ww  .ja  v a  2  s  .  co  m*/
 *          target counter
 * @param source
 *          removed counter
 */
protected void addCounters(BasicCounters<MutableLong> target, BasicCounters<MutableLong> source) {
    for (Enum<BlockWriter.Counters> key : BlockWriter.Counters.values()) {
        MutableLong tcounter = target.getCounter(key);
        if (tcounter == null) {
            tcounter = new MutableLong();
            target.setCounter(key, tcounter);
        }
        MutableLong scounter = source.getCounter(key);
        if (scounter != null) {
            tcounter.add(scounter.longValue());
        }
    }
}

From source file:com.datatorrent.lib.io.block.AbstractBlockReader.java

/**
 * Transfers the counters in partitioning.
 *
 * @param target target counter// ww w . j  a v  a  2s . c om
 * @param source removed counter
 */
protected void addCounters(BasicCounters<MutableLong> target, BasicCounters<MutableLong> source) {
    for (Enum<ReaderCounterKeys> key : ReaderCounterKeys.values()) {
        MutableLong tcounter = target.getCounter(key);
        if (tcounter == null) {
            tcounter = new MutableLong();
            target.setCounter(key, tcounter);
        }
        MutableLong scounter = source.getCounter(key);
        if (scounter != null) {
            tcounter.add(scounter.longValue());
        }
    }
}

From source file:com.datatorrent.lib.io.fs.AbstractFSWriter.java

/**
 * This method processes received tuples.
 * Tuples are written out to the appropriate files as determined by the getFileName method.
 * If the output port is connected incoming tuples are also converted and emitted on the appropriate output port.
 * @param tuple An incoming tuple which needs to be processed.
 *//*from   www.j a v  a 2 s .  c  o  m*/
protected void processTuple(INPUT tuple) {
    String fileName = getFileName(tuple);

    if (Strings.isNullOrEmpty(fileName)) {
        return;
    }

    LOG.debug("file {}, hash {}, filecount {}", fileName, fileName.hashCode(), this.openPart.get(fileName));

    try {
        LOG.debug("end-offsets {}", endOffsets);

        FSDataOutputStream fsOutput = streamsCache.get(fileName);
        byte[] tupleBytes = getBytesForTuple(tuple);
        fsOutput.write(tupleBytes);
        totalBytesWritten += tupleBytes.length;
        MutableLong currentOffset = endOffsets.get(fileName);

        if (currentOffset == null) {
            currentOffset = new MutableLong(0);
            endOffsets.put(fileName, currentOffset);
        }

        currentOffset.add(tupleBytes.length);

        LOG.debug("end-offsets {}", endOffsets);
        LOG.debug("tuple: {}", tuple.toString());
        LOG.debug("current position {}, max length {}", currentOffset.longValue(), maxLength);

        if (rollingFile && currentOffset.longValue() > maxLength) {
            LOG.debug("Rotating file {} {}", fileName, currentOffset.longValue());
            rotate(fileName);
        }

        MutableLong count = counts.get(fileName);
        if (count == null) {
            count = new MutableLong(0);
            counts.put(fileName, count);
        }

        count.add(1);

        LOG.debug("count of {} =  {}", fileName, count);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    } catch (ExecutionException ex) {
        throw new RuntimeException(ex);
    }

    if (output.isConnected()) {
        output.emit(convert(tuple));
    }
}

From source file:com.jivesoftware.os.amza.service.AmzaService.java

private boolean streamBootstrap(long leadershipToken, DataOutputStream dos, MutableLong bytes,
        VersionedPartitionName versionedPartitionName, int stripe, LivelyEndState livelyEndState)
        throws Exception {

    dos.writeLong(leadershipToken);/*  ww  w  .ja  v a 2s .co  m*/
    dos.writeLong(-1);
    dos.writeByte(0); // not online
    dos.writeByte(0); // last entry marker
    dos.writeByte(0); // last entry marker
    dos.writeByte(0); // streamedToEnd marker
    bytes.add(4);
    if (versionedPartitionName == null || livelyEndState == null) {
        // someone thinks we're a member for this partition
        return true;
    } else {
        // BOOTSTRAP'S BOOTSTRAPS!
        partitionCreator.get(versionedPartitionName, stripe);
        return false;
    }
}

From source file:com.datatorrent.lib.io.fs.AbstractFileOutputOperator.java

/**
 * This method processes received tuples.
 * Tuples are written out to the appropriate files as determined by the getFileName method.
 * If the output port is connected incoming tuples are also converted and emitted on the appropriate output port.
 * @param tuple An incoming tuple which needs to be processed.
 *//*from   www  .ja  v a2s  . co m*/
protected void processTuple(INPUT tuple) {
    String fileName = getFileName(tuple);

    if (Strings.isNullOrEmpty(fileName)) {
        return;
    }

    try {
        FilterOutputStream fsOutput = streamsCache.get(fileName).getFilterStream();
        byte[] tupleBytes = getBytesForTuple(tuple);
        long start = System.currentTimeMillis();
        fsOutput.write(tupleBytes);
        totalWritingTime += System.currentTimeMillis() - start;
        totalBytesWritten += tupleBytes.length;
        MutableLong currentOffset = endOffsets.get(fileName);

        if (currentOffset == null) {
            currentOffset = new MutableLong(0);
            endOffsets.put(fileName, currentOffset);
        }

        currentOffset.add(tupleBytes.length);

        if (rotationWindows > 0) {
            getRotationState(fileName).notEmpty = true;
        }

        if (rollingFile && currentOffset.longValue() > maxLength) {
            LOG.debug("Rotating file {} {} {}", fileName, openPart.get(fileName), currentOffset.longValue());
            rotate(fileName);
        }

        MutableLong count = counts.get(fileName);
        if (count == null) {
            count = new MutableLong(0);
            counts.put(fileName, count);
        }

        count.add(1);
    } catch (IOException | ExecutionException ex) {
        throw new RuntimeException(ex);
    }
}

From source file:com.jivesoftware.os.amza.service.AmzaService.java

private boolean streamOnline(RingMember ringMember, VersionedPartitionName versionedPartitionName,
        long highestTransactionId, long leadershipToken, long limit, DataOutputStream dos, MutableLong bytes,
        HighwaterStorage highwaterStorage, PartitionStripe.RowStreamer streamer) throws Exception {

    ackWaters.set(ringMember, versionedPartitionName, highestTransactionId, leadershipToken);
    dos.writeLong(leadershipToken);//  ww  w .j av a 2 s. c  o m
    dos.writeLong(versionedPartitionName.getPartitionVersion());
    dos.writeByte(1); // fully online
    bytes.increment();
    RingTopology ring = ringStoreReader.getRing(versionedPartitionName.getPartitionName().getRingName(), -1);
    for (int i = 0; i < ring.entries.size(); i++) {
        if (ring.rootMemberIndex != i) {
            RingMemberAndHost entry = ring.entries.get(i);
            long highwatermark = highwaterStorage.get(entry.ringMember, versionedPartitionName);
            byte[] ringMemberBytes = entry.ringMember.toBytes();
            dos.writeByte(1);
            dos.writeInt(ringMemberBytes.length);
            dos.write(ringMemberBytes);
            dos.writeLong(highwatermark);
            bytes.add(1 + 4 + ringMemberBytes.length + 8);
        }
    }

    dos.writeByte(0); // last entry marker
    bytes.increment();

    long[] limited = new long[1];
    long[] lastRowTxId = { -1 };
    boolean streamedToEnd = streamer.stream((rowFP, rowTxId, rowType, row) -> {
        if (limited[0] >= limit && lastRowTxId[0] < rowTxId) {
            return false;
        }
        lastRowTxId[0] = rowTxId;
        dos.writeByte(1);
        dos.writeLong(rowTxId);
        dos.writeByte(rowType.toByte());
        dos.writeInt(row.length);
        dos.write(row);
        bytes.add(1 + 8 + 1 + 4 + row.length);
        limited[0]++;
        return true;
    });

    dos.writeByte(0); // last entry marker
    bytes.increment();
    dos.writeByte(streamedToEnd ? 1 : 0); // streamedToEnd marker
    bytes.increment();
    return false;
}

From source file:com.datatorrent.lib.io.fs.AbstractFileInputOperator.java

@Override
public Collection<Partition<AbstractFileInputOperator<T>>> definePartitions(
        Collection<Partition<AbstractFileInputOperator<T>>> partitions, PartitioningContext context) {
    lastRepartition = System.currentTimeMillis();

    int totalCount = getNewPartitionCount(partitions, context);

    LOG.debug("Computed new partitions: {}", totalCount);

    if (totalCount == partitions.size()) {
        return partitions;
    }/*from  www .j  a v  a 2  s  .co m*/

    AbstractFileInputOperator<T> tempOperator = partitions.iterator().next().getPartitionedInstance();

    MutableLong tempGlobalNumberOfRetries = tempOperator.globalNumberOfRetries;
    MutableLong tempGlobalNumberOfFailures = tempOperator.globalNumberOfRetries;

    /*
     * Build collective state from all instances of the operator.
     */
    Set<String> totalProcessedFiles = Sets.newHashSet();
    Set<FailedFile> currentFiles = Sets.newHashSet();
    List<DirectoryScanner> oldscanners = Lists.newLinkedList();
    List<FailedFile> totalFailedFiles = Lists.newLinkedList();
    List<String> totalPendingFiles = Lists.newLinkedList();
    Set<Integer> deletedOperators = Sets.newHashSet();

    for (Partition<AbstractFileInputOperator<T>> partition : partitions) {
        AbstractFileInputOperator<T> oper = partition.getPartitionedInstance();
        totalProcessedFiles.addAll(oper.processedFiles);
        totalFailedFiles.addAll(oper.failedFiles);
        totalPendingFiles.addAll(oper.pendingFiles);
        currentFiles.addAll(unfinishedFiles);
        tempGlobalNumberOfRetries.add(oper.localNumberOfRetries);
        tempGlobalNumberOfFailures.add(oper.localNumberOfFailures);
        if (oper.currentFile != null) {
            currentFiles.add(new FailedFile(oper.currentFile, oper.offset));
        }
        oldscanners.add(oper.getScanner());
        deletedOperators.add(oper.operatorId);
    }

    /*
     * Create partitions of scanners, scanner's partition method will do state
     * transfer for DirectoryScanner objects.
     */
    List<DirectoryScanner> scanners = scanner.partition(totalCount, oldscanners);

    Collection<Partition<AbstractFileInputOperator<T>>> newPartitions = Lists
            .newArrayListWithExpectedSize(totalCount);
    List<WindowDataManager> newManagers = windowDataManager.partition(totalCount, deletedOperators);

    KryoCloneUtils<AbstractFileInputOperator<T>> cloneUtils = KryoCloneUtils.createCloneUtils(this);
    for (int i = 0; i < scanners.size(); i++) {

        @SuppressWarnings("unchecked")
        AbstractFileInputOperator<T> oper = cloneUtils.getClone();

        DirectoryScanner scn = scanners.get(i);
        oper.setScanner(scn);

        // Do state transfer for processed files.
        oper.processedFiles.addAll(totalProcessedFiles);
        oper.globalNumberOfFailures = tempGlobalNumberOfRetries;
        oper.localNumberOfFailures.setValue(0);
        oper.globalNumberOfRetries = tempGlobalNumberOfFailures;
        oper.localNumberOfRetries.setValue(0);

        /* redistribute unfinished files properly */
        oper.unfinishedFiles.clear();
        oper.currentFile = null;
        oper.offset = 0;
        Iterator<FailedFile> unfinishedIter = currentFiles.iterator();
        while (unfinishedIter.hasNext()) {
            FailedFile unfinishedFile = unfinishedIter.next();
            if (scn.acceptFile(unfinishedFile.path)) {
                oper.unfinishedFiles.add(unfinishedFile);
                unfinishedIter.remove();
            }
        }

        /* transfer failed files */
        oper.failedFiles.clear();
        Iterator<FailedFile> iter = totalFailedFiles.iterator();
        while (iter.hasNext()) {
            FailedFile ff = iter.next();
            if (scn.acceptFile(ff.path)) {
                oper.failedFiles.add(ff);
                iter.remove();
            }
        }

        /* redistribute pending files properly */
        oper.pendingFiles.clear();
        Iterator<String> pendingFilesIterator = totalPendingFiles.iterator();
        while (pendingFilesIterator.hasNext()) {
            String pathString = pendingFilesIterator.next();
            if (scn.acceptFile(pathString)) {
                oper.pendingFiles.add(pathString);
                pendingFilesIterator.remove();
            }
        }
        oper.setWindowDataManager(newManagers.get(i));
        newPartitions.add(new DefaultPartition<AbstractFileInputOperator<T>>(oper));
    }

    LOG.info("definePartitions called returning {} partitions", newPartitions.size());
    return newPartitions;
}

From source file:edu.harvard.iq.dataverse.harvest.client.HarvesterServiceBean.java

@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public Long processRecord(DataverseRequest dataverseRequest, Logger hdLogger, PrintWriter importCleanupLog,
        OaiHandler oaiHandler, String identifier, MutableBoolean recordErrorOccurred,
        MutableLong processedSizeThisBatch, List<String> deletedIdentifiers) {
    String errMessage = null;/*from   ww  w  . ja v  a2s.  c o m*/
    Dataset harvestedDataset = null;
    logGetRecord(hdLogger, oaiHandler, identifier);
    File tempFile = null;

    try {
        FastGetRecord record = oaiHandler.runGetRecord(identifier);
        errMessage = record.getErrorMessage();

        if (errMessage != null) {
            hdLogger.log(Level.SEVERE, "Error calling GetRecord - " + errMessage);
        } else if (record.isDeleted()) {
            hdLogger.info(
                    "Deleting harvesting dataset for " + identifier + ", per the OAI server's instructions.");

            Dataset dataset = datasetService
                    .getDatasetByHarvestInfo(oaiHandler.getHarvestingClient().getDataverse(), identifier);
            if (dataset != null) {
                hdLogger.info("Deleting dataset " + dataset.getGlobalId());
                deleteHarvestedDataset(dataset, dataverseRequest, hdLogger);
                // TODO: 
                // check the status of that Delete - see if it actually succeeded
                deletedIdentifiers.add(identifier);
            } else {
                hdLogger.info("No dataset found for " + identifier + ", skipping delete. ");
            }

        } else {
            hdLogger.info("Successfully retrieved GetRecord response.");

            tempFile = record.getMetadataFile();
            PrintWriter cleanupLog;
            harvestedDataset = importService.doImportHarvestedDataset(dataverseRequest,
                    oaiHandler.getHarvestingClient(), identifier, oaiHandler.getMetadataPrefix(),
                    record.getMetadataFile(), importCleanupLog);

            hdLogger.fine("Harvest Successful for identifier " + identifier);
            hdLogger.fine("Size of this record: " + record.getMetadataFile().length());
            processedSizeThisBatch.add(record.getMetadataFile().length());
        }
    } catch (Throwable e) {
        logGetRecordException(hdLogger, oaiHandler, identifier, e);
        errMessage = "Caught exception while executing GetRecord on " + identifier;
        //logException(e, hdLogger);

    } finally {
        if (tempFile != null) {
            // temporary - let's not delete the temp metadata file if anything went wrong, for now:
            if (errMessage == null) {
                try {
                    tempFile.delete();
                } catch (Throwable t) {
                }
                ;
            }
        }
    }

    // TODO: the message below is taken from DVN3; - figure out what it means...
    // 
    // If we got an Error from the OAI server or an exception happened during import, then
    // set recordErrorOccurred to true (if recordErrorOccurred is being used)
    // otherwise throw an exception (if recordErrorOccurred is not used, i.e null)

    if (errMessage != null) {
        if (recordErrorOccurred != null) {
            recordErrorOccurred.setValue(true);
        } else {
            throw new EJBException(errMessage);
        }
    }

    return harvestedDataset != null ? harvestedDataset.getId() : null;
}