Example usage for org.apache.commons.lang3.mutable MutableLong add

List of usage examples for org.apache.commons.lang3.mutable MutableLong add

Introduction

In this page you can find the example usage for org.apache.commons.lang3.mutable MutableLong add.

Prototype

public void add(final Number operand) 

Source Link

Document

Adds a value to the value of this instance.

Usage

From source file:gobblin.writer.ThrottleWriterTest.java

public void testThrottleBytes() throws IOException {
    DataWriter<Void> writer = mock(DataWriter.class);
    final MutableLong mockBytes = new MutableLong();
    when(writer.bytesWritten()).thenAnswer(new Answer<Long>() {
        @Override/*from   w ww. j a va  2 s .  co  m*/
        public Long answer(InvocationOnMock invocation) throws Throwable {
            mockBytes.add(1L); //Delta bytes
            return mockBytes.getValue();
        }
    });

    int parallelism = 2;
    int bps = 2;
    DataWriter<Void> throttleWriter = setup(writer, parallelism, bps, ThrottleType.Bytes);

    int count = 0;
    long duration = 10L;
    Stopwatch stopwatch = Stopwatch.createStarted();
    while (stopwatch.elapsed(TimeUnit.SECONDS) <= duration) {
        throttleWriter.write(null);
        count++;
    }

    int expected = (int) (bps * duration);
    Assert.assertTrue(count <= expected + bps * 2);
    Assert.assertTrue(count >= expected - bps * 2);
}

From source file:info.mikaelsvensson.devtools.analysis.db2eventlog.QueryStatistics.java

void addSampleData(Db2EventLogSample sample) {
    MutableLong sum = operationsTime.get(sample.getOperation());
    if (sum == null) {
        sum = new MutableLong(sample.getResponseTime());
        operationsTime.put(sample.getOperation(), sum);
    } else {/*from ww  w  .j a v a  2 s .  c o  m*/
        sum.add(sample.getResponseTime());
    }
    totalSamples++;
    fetchCount += sample.getFetchCount();
    sorts += sample.getSorts();
    totalSortTime += sample.getTotalSortTime();
    sortOverflows += sample.getSortOverflows();
    rowsRead += sample.getRowsRead();
    rowsWritten += sample.getRowsWritten();

    if (lastTimeStamp == null || sample.getTimeStamp().after(lastTimeStamp)) {
        lastTimeStamp = sample.getTimeStamp();
    }
    if (firstTimeStamp == null || sample.getTimeStamp().before(firstTimeStamp)) {
        firstTimeStamp = sample.getTimeStamp();
    }
}

From source file:cc.kave.commons.pointsto.evaluation.TimeEvaluation.java

private DescriptiveStatistics measurePointerAnalysis(List<Context> contexts, PointsToAnalysisFactory ptFactory,
        MutableLong sink) {
    DescriptiveStatistics stats = new DescriptiveStatistics();

    for (Context context : contexts) {
        PointsToAnalysis ptAnalysis = ptFactory.create();
        Stopwatch watch = Stopwatch.createStarted();
        PointsToContext ptContext = ptAnalysis.compute(context);
        watch.stop();//from  w w w.j a v  a 2  s.c o  m
        sink.add(ptContext.hashCode());
        long time = watch.elapsed(TimeUnit.MICROSECONDS);
        stats.addValue(time);

        analysisTimes.add(new AnalysisTimeEntry(ptFactory.getName(),
                context.getTypeShape().getTypeHierarchy().getElement(), stmtCounts.get(context), time));
    }

    return stats;
}

From source file:fr.duminy.jbackup.core.archive.FileCollector.java

private void collectFilesImpl(List<SourceWithPath> collectedFiles, Collection<ArchiveParameters.Source> sources,
        MutableLong totalSize, Cancellable cancellable) throws IOException {
    totalSize.setValue(0L);/* ww  w.  j a v  a2  s  . c om*/

    for (ArchiveParameters.Source source : sources) {
        Path sourcePath = source.getSource();
        if (!sourcePath.isAbsolute()) {
            throw new IllegalArgumentException(String.format("The file '%s' is relative.", sourcePath));
        }

        long size;

        if (Files.isDirectory(sourcePath)) {
            size = collect(collectedFiles, sourcePath, source.getDirFilter(), source.getFileFilter(),
                    cancellable);
        } else {
            collectedFiles.add(new SourceWithPath(sourcePath, sourcePath));
            size = Files.size(sourcePath);
        }

        totalSize.add(size);
    }
}

From source file:nl.opengeogroep.filesetsync.FileRecord.java

public static String calculateHash(File f, MutableLong hashTimeMillisAccumulator)
        throws FileNotFoundException, IOException {

    long startTime = hashTimeMillisAccumulator == null ? 0 : System.currentTimeMillis();

    // On Windows do not use memory mapped files, because the client may
    // want to overwrite a file it has just calculated the checksum of.
    // http://bugs.java.com/view_bug.do?bug_id=4724038

    // Performance difference is minimal or negative in some tests

    //String hash = SystemUtils.IS_OS_WINDOWS ? calculateHashNormalIO(f) : calculateHashMappedIO(f);
    String hash = calculateHashNormalIO(f);

    if (hashTimeMillisAccumulator != null) {
        hashTimeMillisAccumulator.add(System.currentTimeMillis() - startTime);
    }/*from   ww w .  j  ava2  s. c o  m*/
    return hash;
}

From source file:nl.opengeogroep.filesetsync.server.FileHashCache.java

public static String getCachedFileHash(ServerFileset fileset, File f, long fileLastModified,
        MutableLong hashBytesAccumulator, MutableLong hashTimeMillisAccumulator) throws IOException {
    Cache cache = caches.get(fileset.getName());
    if (cache == null) {
        // fileset is one file or something went wrong during initialization...
        String hash = FileRecord.calculateHash(f, hashTimeMillisAccumulator);
        hashBytesAccumulator.add(f.length());
        return hash;
    }/*from   ww w.j  av a  2s. c om*/
    String canonicalPath = f.getCanonicalPath();
    Element e = cache.get(canonicalPath);
    String hash = null;
    if (e != null) {
        String[] parts = ((String) e.getObjectValue()).split(",", 2);
        long lastModified = Long.parseLong(parts[0]);
        if (lastModified == fileLastModified) {
            hash = parts[1];
        }
    }
    if (hash == null) {
        hash = FileRecord.calculateHash(f, hashTimeMillisAccumulator);
        hashBytesAccumulator.add(f.length());
        cache.put(new Element(canonicalPath, fileLastModified + "," + hash));
    }
    return hash;
}

From source file:org.apache.apex.malhar.lib.window.accumulation.Count.java

@Override
public MutableLong merge(MutableLong accumulatedValue1, MutableLong accumulatedValue2) {
    accumulatedValue1.add(accumulatedValue2);
    return accumulatedValue1;
}

From source file:org.apache.apex.malhar.lib.window.accumulation.SumLong.java

@Override
public MutableLong accumulate(MutableLong accumulatedValue, Long input) {
    accumulatedValue.add(input);
    return accumulatedValue;
}

From source file:org.apache.drill.exec.store.parquet.ParquetGroupScanStatistics.java

public void collect(List<RowGroupInfo> rowGroupInfos, ParquetTableMetadataBase parquetTableMetadata) {
    resetHolders();//  ww  w.  ja v  a  2s  .  co m
    boolean first = true;
    for (RowGroupInfo rowGroup : rowGroupInfos) {
        long rowCount = rowGroup.getRowCount();
        for (ColumnMetadata column : rowGroup.getColumns()) {
            SchemaPath schemaPath = SchemaPath.getCompoundPath(column.getName());
            MutableLong emptyCount = new MutableLong();
            MutableLong previousCount = columnValueCounts.putIfAbsent(schemaPath, emptyCount);
            if (previousCount == null) {
                previousCount = emptyCount;
            }
            if (previousCount.longValue() != GroupScan.NO_COLUMN_STATS && column.isNumNullsSet()) {
                previousCount.add(rowCount - column.getNulls());
            } else {
                previousCount.setValue(GroupScan.NO_COLUMN_STATS);
            }
            boolean partitionColumn = checkForPartitionColumn(column, first, rowCount, parquetTableMetadata);
            if (partitionColumn) {
                Map<SchemaPath, Object> map = partitionValueMap.computeIfAbsent(rowGroup.getPath(),
                        key -> new HashMap<>());
                Object value = map.get(schemaPath);
                Object currentValue = column.getMaxValue();
                if (value != null) {
                    if (value != currentValue) {
                        partitionColTypeMap.remove(schemaPath);
                    }
                } else {
                    // the value of a column with primitive type can not be null,
                    // so checks that there are really null value and puts it to the map
                    if (rowCount == column.getNulls()) {
                        map.put(schemaPath, null);
                    } else {
                        map.put(schemaPath, currentValue);
                    }
                }
            } else {
                partitionColTypeMap.remove(schemaPath);
            }
        }
        this.rowCount += rowGroup.getRowCount();
        first = false;
    }
}

From source file:org.apache.flink.streaming.connectors.kinesis.internals.KinesisDataFetcherTest.java

@Test
public void testPeriodicWatermark() {
    final MutableLong clock = new MutableLong();
    final MutableBoolean isTemporaryIdle = new MutableBoolean();
    final List<Watermark> watermarks = new ArrayList<>();

    String fakeStream1 = "fakeStream1";
    StreamShardHandle shardHandle = new StreamShardHandle(fakeStream1,
            new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0)));

    TestSourceContext<String> sourceContext = new TestSourceContext<String>() {
        @Override//from   ww w. j  a  v a2s. co m
        public void emitWatermark(Watermark mark) {
            watermarks.add(mark);
        }

        @Override
        public void markAsTemporarilyIdle() {
            isTemporaryIdle.setTrue();
        }
    };

    HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest = new HashMap<>();

    final KinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<String>(
            Collections.singletonList(fakeStream1), sourceContext, new java.util.Properties(),
            new KinesisDeserializationSchemaWrapper<>(
                    new org.apache.flink.streaming.util.serialization.SimpleStringSchema()),
            1, 1, new AtomicReference<>(), new LinkedList<>(), subscribedStreamsToLastSeenShardIdsUnderTest,
            FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(new HashMap<>())) {

        @Override
        protected long getCurrentTimeMillis() {
            return clock.getValue();
        }
    };
    Whitebox.setInternalState(fetcher, "periodicWatermarkAssigner", watermarkAssigner);

    SequenceNumber seq = new SequenceNumber("fakeSequenceNumber");
    // register shards to subsequently emit records
    int shardIndex = fetcher.registerNewSubscribedShardState(new KinesisStreamShardState(
            KinesisDataFetcher.convertToStreamShardMetadata(shardHandle), shardHandle, seq));

    StreamRecord<String> record1 = new StreamRecord<>(String.valueOf(Long.MIN_VALUE), Long.MIN_VALUE);
    fetcher.emitRecordAndUpdateState(record1.getValue(), record1.getTimestamp(), shardIndex, seq);
    Assert.assertEquals(record1, sourceContext.getCollectedOutputs().poll());

    fetcher.emitWatermark();
    Assert.assertTrue("potential watermark equals previous watermark", watermarks.isEmpty());

    StreamRecord<String> record2 = new StreamRecord<>(String.valueOf(1), 1);
    fetcher.emitRecordAndUpdateState(record2.getValue(), record2.getTimestamp(), shardIndex, seq);
    Assert.assertEquals(record2, sourceContext.getCollectedOutputs().poll());

    fetcher.emitWatermark();
    Assert.assertFalse("watermark advanced", watermarks.isEmpty());
    Assert.assertEquals(new Watermark(record2.getTimestamp()), watermarks.remove(0));
    Assert.assertFalse("not idle", isTemporaryIdle.booleanValue());

    // test idle timeout
    long idleTimeout = 10;
    // advance clock idleTimeout
    clock.add(idleTimeout + 1);
    fetcher.emitWatermark();
    Assert.assertFalse("not idle", isTemporaryIdle.booleanValue());
    Assert.assertTrue("not idle, no new watermark", watermarks.isEmpty());

    // activate idle timeout
    Whitebox.setInternalState(fetcher, "shardIdleIntervalMillis", idleTimeout);
    fetcher.emitWatermark();
    Assert.assertTrue("idle", isTemporaryIdle.booleanValue());
    Assert.assertTrue("idle, no watermark", watermarks.isEmpty());
}