Example usage for org.apache.commons.lang3.mutable MutableLong MutableLong

List of usage examples for org.apache.commons.lang3.mutable MutableLong MutableLong

Introduction

In this page you can find the example usage for org.apache.commons.lang3.mutable MutableLong MutableLong.

Prototype

public MutableLong() 

Source Link

Document

Constructs a new MutableLong with the default value of zero.

Usage

From source file:com.hazelcast.jet.benchmark.trademonitor.FlinkTradeMonitor.java

public static void main(String[] args) throws Exception {
    if (args.length != 13) {
        System.err.println("Usage:");
        System.err.println("  " + FlinkTradeMonitor.class.getSimpleName()
                + " <bootstrap.servers> <topic> <offset-reset> <maxLagMs> <windowSizeMs> <slideByMs> <outputPath> <checkpointInterval> <checkpointUri> <doAsyncSnapshot> <stateBackend> <kafkaParallelism> <windowParallelism>");
        System.err.println("<stateBackend> - fs | rocksDb");
        System.exit(1);//from w  w  w.j  av  a  2  s  .  co m
    }
    String brokerUri = args[0];
    String topic = args[1];
    String offsetReset = args[2];
    int lagMs = Integer.parseInt(args[3]);
    int windowSize = Integer.parseInt(args[4]);
    int slideBy = Integer.parseInt(args[5]);
    String outputPath = args[6];
    int checkpointInt = Integer.parseInt(args[7]);
    String checkpointUri = args[8];
    boolean doAsyncSnapshot = Boolean.parseBoolean(args[9]);
    String stateBackend = args[10];
    int kafkaParallelism = Integer.parseInt(args[11]);
    int windowParallelism = Integer.parseInt(args[12]);

    System.out.println("bootstrap.servers: " + brokerUri);
    System.out.println("topic: " + topic);
    System.out.println("offset-reset: " + offsetReset);
    System.out.println("lag: " + lagMs);
    System.out.println("windowSize: " + windowSize);
    System.out.println("slideBy: " + slideBy);
    System.out.println("outputPath: " + outputPath);
    System.out.println("checkpointInt: " + checkpointInt);
    System.out.println("checkpointUri: " + checkpointUri);
    System.out.println("doAsyncSnapshot: " + doAsyncSnapshot);
    System.out.println("stateBackend: " + stateBackend);
    System.out.println("kafkaParallelism: " + kafkaParallelism);
    System.out.println("windowParallelism: " + windowParallelism);

    // set up the execution environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
    if (checkpointInt > 0) {
        env.enableCheckpointing(checkpointInt);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(checkpointInt);
    }
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 5000));
    if ("fs".equalsIgnoreCase(stateBackend)) {
        env.setStateBackend(new FsStateBackend(checkpointUri, doAsyncSnapshot));
    } else if ("rocksDb".equalsIgnoreCase(stateBackend)) {
        env.setStateBackend(new RocksDBStateBackend(checkpointUri));
    } else {
        System.err.println("Bad value for stateBackend: " + stateBackend);
        System.exit(1);
    }

    DeserializationSchema<Trade> schema = new AbstractDeserializationSchema<Trade>() {
        TradeDeserializer deserializer = new TradeDeserializer();

        @Override
        public Trade deserialize(byte[] message) throws IOException {
            return deserializer.deserialize(null, message);
        }
    };

    DataStreamSource<Trade> trades = env
            .addSource(new FlinkKafkaConsumer010<>(topic, schema, getKafkaProperties(brokerUri, offsetReset)))
            .setParallelism(kafkaParallelism);
    AssignerWithPeriodicWatermarks<Trade> timestampExtractor = new BoundedOutOfOrdernessTimestampExtractor<Trade>(
            Time.milliseconds(lagMs)) {
        @Override
        public long extractTimestamp(Trade element) {
            return element.getTime();
        }
    };

    WindowAssigner window = windowSize == slideBy ? TumblingEventTimeWindows.of(Time.milliseconds(windowSize))
            : SlidingEventTimeWindows.of(Time.milliseconds(windowSize), Time.milliseconds(slideBy));

    trades.assignTimestampsAndWatermarks(timestampExtractor).keyBy((Trade t) -> t.getTicker()).window(window)
            .aggregate(new AggregateFunction<Trade, MutableLong, Long>() {

                @Override
                public MutableLong createAccumulator() {
                    return new MutableLong();
                }

                @Override
                public MutableLong add(Trade value, MutableLong accumulator) {
                    accumulator.increment();
                    return accumulator;
                }

                @Override
                public MutableLong merge(MutableLong a, MutableLong b) {
                    a.setValue(Math.addExact(a.longValue(), b.longValue()));
                    return a;
                }

                @Override
                public Long getResult(MutableLong accumulator) {
                    return accumulator.longValue();
                }
            }, new WindowFunction<Long, Tuple5<String, String, Long, Long, Long>, String, TimeWindow>() {
                @Override
                public void apply(String key, TimeWindow window, Iterable<Long> input,
                        Collector<Tuple5<String, String, Long, Long, Long>> out) throws Exception {
                    long timeMs = System.currentTimeMillis();
                    long count = input.iterator().next();
                    long latencyMs = timeMs - window.getEnd() - lagMs;
                    out.collect(
                            new Tuple5<>(Instant.ofEpochMilli(window.getEnd()).atZone(ZoneId.systemDefault())
                                    .toLocalTime().toString(), key, count, timeMs, latencyMs));
                }
            }).setParallelism(windowParallelism).writeAsCsv(outputPath, WriteMode.OVERWRITE);

    env.execute("Trade Monitor Example");
}

From source file:com.addthis.hydra.data.tree.concurrent.ConcurrentTreeDeletionTask.java

@Override
public void run() {
    try {//from w  ww  . j a  v  a2  s.com
        Map.Entry<DBKey, ConcurrentTreeNode> entry;
        MutableLong totalCount = new MutableLong();
        MutableLong nodeCount = new MutableLong();
        do {
            entry = dataTreeNodes.nextTrashNode();
            if (entry != null) {
                ConcurrentTreeNode node = entry.getValue();
                ConcurrentTreeNode prev = dataTreeNodes.source.remove(entry.getKey());
                if (prev != null) {
                    dataTreeNodes.deleteSubTree(node, totalCount, nodeCount, terminationCondition,
                            deletionLogger);
                    nodeCount.increment();
                    dataTreeNodes.treeTrashNode.incrementCounter();
                }
            }
        } while ((entry != null) && !terminationCondition.getAsBoolean());
    } catch (Exception ex) {
        ConcurrentTree.log.warn("{}", "Uncaught exception in concurrent tree background deletion thread", ex);
    }
}

From source file:fr.duminy.jbackup.core.archive.FileCollector.java

public void collectFiles(List<SourceWithPath> collectedFiles, ArchiveParameters archiveParameters,
        TaskListener listener, Cancellable cancellable) throws ArchiveException {
    MutableLong totalSize = new MutableLong();
    try {//from  w  ww  .java2  s .  co m
        collectFilesImpl(collectedFiles, archiveParameters.getSources(), totalSize, cancellable);
    } catch (IOException ioe) {
        throw new ArchiveException(ioe);
    }
    if (listener != null) {
        listener.totalSizeComputed(totalSize.longValue());
    }
}

From source file:fr.duminy.jbackup.core.archive.Compressor.java

public void compress(ArchiveParameters archiveParameters, List<SourceWithPath> files,
        final TaskListener listener, Cancellable cancellable) throws ArchiveException {
    final String name = archiveParameters.getArchive().toString();
    final MutableLong processedSize = new MutableLong();

    try (OutputStream fos = Files.newOutputStream(archiveParameters.getArchive());
            ArchiveOutputStream output = factory.create(fos)) {
        LOG.info("Backup '{}': creating archive {}", name, archiveParameters.getArchive());
        for (final SourceWithPath file : files) {
            if ((cancellable != null) && cancellable.isCancelled()) {
                break;
            }/*www .j av a2  s .c  om*/

            LOG.info("Backup '{}': compressing file {}", name, file.getPath().toAbsolutePath());
            try (InputStream input = createCountingInputStream(listener, processedSize,
                    Files.newInputStream(file.getPath()))) {
                final String path;
                if (archiveParameters.isRelativeEntries()) {
                    Path source = file.getSource();
                    if (Files.isDirectory(source)) {
                        if (source.getParent() == null) {
                            path = source.relativize(file.getPath()).toString();
                        } else {
                            path = source.getParent().relativize(file.getPath()).toString();
                        }
                    } else {
                        path = file.getPath().getFileName().toString();
                    }
                } else {
                    path = file.getPath().toString();
                }
                LOG.info("Backup '{}': adding entry {}", new Object[] { name, path });
                output.addEntry(path, input);
            }
        }
        LOG.info("Backup '{}': archive {} created ({})", new Object[] { name, archiveParameters.getArchive(),
                FileUtils.byteCountToDisplaySize(Files.size(archiveParameters.getArchive())) });
    } catch (IOException e) {
        throw new ArchiveException(e);
    } catch (Exception e) {
        throw new ArchiveException(e);
    }
}

From source file:fr.duminy.jbackup.core.archive.Decompressor.java

public void decompress(Path archive, Path targetDirectory, TaskListener listener, Cancellable cancellable)
        throws ArchiveException {
    if (listener != null) {
        try {/*w  ww .j av  a  2 s . c o  m*/
            listener.totalSizeComputed(Files.size(archive));
        } catch (IOException ioe) {
            throw new ArchiveException(ioe);
        }
    }

    targetDirectory = (targetDirectory == null) ? Paths.get(".") : targetDirectory;
    if (!Files.exists(targetDirectory)) {
        throw new IllegalArgumentException(
                String.format("The target directory '%s' doesn't exist.", targetDirectory));
    }

    MutableLong processedSize = new MutableLong();

    try (InputStream archiveStream = Files.newInputStream(archive);
            ArchiveInputStream input = factory.create(archiveStream)) {
        ArchiveInputStream.Entry entry = getNextEntryIfNotCancelled(input, cancellable);
        while (entry != null) {
            InputStream entryStream = createCountingInputStream(listener, processedSize, entry.getInput());
            try {
                Path file = targetDirectory.resolve(entry.getName());
                Files.createDirectories(file.getParent());
                Files.copy(entryStream, file);
            } finally {
                entry.close();
            }

            entry = getNextEntryIfNotCancelled(input, cancellable);
        }
    } catch (IOException e) {
        throw new ArchiveException(e);
    } catch (Exception e) {
        throw new ArchiveException(e);
    }
}

From source file:gobblin.writer.ThrottleWriterTest.java

public void testThrottleBytes() throws IOException {
    DataWriter<Void> writer = mock(DataWriter.class);
    final MutableLong mockBytes = new MutableLong();
    when(writer.bytesWritten()).thenAnswer(new Answer<Long>() {
        @Override/* www  . j  a v  a 2  s.co  m*/
        public Long answer(InvocationOnMock invocation) throws Throwable {
            mockBytes.add(1L); //Delta bytes
            return mockBytes.getValue();
        }
    });

    int parallelism = 2;
    int bps = 2;
    DataWriter<Void> throttleWriter = setup(writer, parallelism, bps, ThrottleType.Bytes);

    int count = 0;
    long duration = 10L;
    Stopwatch stopwatch = Stopwatch.createStarted();
    while (stopwatch.elapsed(TimeUnit.SECONDS) <= duration) {
        throttleWriter.write(null);
        count++;
    }

    int expected = (int) (bps * duration);
    Assert.assertTrue(count <= expected + bps * 2);
    Assert.assertTrue(count >= expected - bps * 2);
}

From source file:cc.kave.commons.pointsto.evaluation.TimeEvaluation.java

public void run(List<Context> contexts, List<PointsToAnalysisFactory> ptFactories) throws IOException {
    initializeStmtCountTimes(ptFactories, contexts);
    log("Using %d contexts for time measurement\n", contexts.size());

    Map<Pair<String, ITypeName>, AnalysisTimeEntry> timesRegistry = new LinkedHashMap<>(
            contexts.size() * ptFactories.size());
    for (int i = 0; i < WARM_UP_RUNS + MEASUREMENT_RUNS; ++i) {
        if (i == WARM_UP_RUNS) {
            timesRegistry.clear();//w  w  w.ja  v a2 s  .  co m
        }

        for (PointsToAnalysisFactory ptFactory : ptFactories) {
            analysisStatistics.put(ptFactory.getName(),
                    measurePointerAnalysis(contexts, ptFactory, new MutableLong()));
        }
        updateTimesRegistry(timesRegistry);
    }

    analysisTimes = timesRegistry.values().stream().map(entry -> new AnalysisTimeEntry(entry.analysisName,
            entry.contextType, entry.numStmts, entry.time / MEASUREMENT_RUNS)).collect(Collectors.toList());
}

From source file:com.norconex.committer.core.AbstractFileQueueCommitter.java

@Override
protected long getInitialQueueDocCount() {
    final MutableLong fileCount = new MutableLong();

    // --- Additions and Deletions ---
    FileUtil.visitAllFiles(new File(queue.getDirectory()), new IFileVisitor() {
        @Override/*ww w  . j a  v  a 2 s  .co  m*/
        public void visit(File file) {
            fileCount.increment();
        }
    }, REF_FILTER);
    return fileCount.longValue();
}

From source file:de.biomedical_imaging.ij.steger.Convol.java

public void convolve_gauss(float[] image, float[] k, int width, int height, double sigma, int deriv_type) {
    double[] hr = null, hc = null;
    double[] maskr, maskc;
    MutableLong nr = new MutableLong(), nc = new MutableLong();
    float[] h;//from  ww w  . ja  v  a 2 s .co m

    h = new float[(width * height)];

    switch (deriv_type) {
    case LinesUtil.DERIV_R:
        hr = compute_gauss_mask_1(nr, sigma);
        hc = compute_gauss_mask_0(nc, sigma);
        break;
    case LinesUtil.DERIV_C:
        hr = compute_gauss_mask_0(nr, sigma);
        hc = compute_gauss_mask_1(nc, sigma);
        break;
    case LinesUtil.DERIV_RR:
        hr = compute_gauss_mask_2(nr, sigma);
        hc = compute_gauss_mask_0(nc, sigma);
        break;
    case LinesUtil.DERIV_RC:
        hr = compute_gauss_mask_1(nr, sigma);
        hc = compute_gauss_mask_1(nc, sigma);
        break;
    case LinesUtil.DERIV_CC:
        hr = compute_gauss_mask_0(nr, sigma);
        hc = compute_gauss_mask_2(nc, sigma);
        break;
    }

    maskr = hr;// + nr; Wird ersetzt in den eigentlichen Funktionen, indem ich z.B. in convolve_rows_gauss immer beim Zugriff auf mask n dazuaddiere
    maskc = hc;// + nc;

    convolve_rows_gauss(image, maskr, nr.intValue(), h, width, height);
    convolve_cols_gauss(h, maskc, nc.intValue(), k, width, height);

}

From source file:com.addthis.hydra.data.tree.nonconcurrent.NonConcurrentTree.java

@Override
public void foregroundNodeDeletion(BooleanSupplier terminationCondition) {
    IPageDB.Range<DBKey, NonConcurrentTreeNode> range = fetchNodeRange(treeTrashNode.nodeDB());
    Map.Entry<DBKey, NonConcurrentTreeNode> entry;
    MutableLong totalCount = new MutableLong();
    MutableLong nodeCount = new MutableLong();
    try {/*from   w  w  w  .  ja  v a 2 s.c om*/
        while (range.hasNext() && !terminationCondition.getAsBoolean()) {
            entry = range.next();
            if (entry != null) {
                NonConcurrentTreeNode node = entry.getValue();
                NonConcurrentTreeNode prev = source.remove(entry.getKey());
                if (prev != null) {
                    deleteSubTree(node, totalCount, nodeCount, terminationCondition);
                    nodeCount.increment();
                    treeTrashNode.incrementCounter();
                }
            }
        }
    } finally {
        range.close();
    }
}