Example usage for org.apache.commons.lang3.mutable MutableLong increment

List of usage examples for org.apache.commons.lang3.mutable MutableLong increment

Introduction

In this page you can find the example usage for org.apache.commons.lang3.mutable MutableLong increment.

Prototype

public void increment() 

Source Link

Document

Increments the value.

Usage

From source file:com.hazelcast.jet.benchmark.trademonitor.FlinkTradeMonitor.java

public static void main(String[] args) throws Exception {
    if (args.length != 13) {
        System.err.println("Usage:");
        System.err.println("  " + FlinkTradeMonitor.class.getSimpleName()
                + " <bootstrap.servers> <topic> <offset-reset> <maxLagMs> <windowSizeMs> <slideByMs> <outputPath> <checkpointInterval> <checkpointUri> <doAsyncSnapshot> <stateBackend> <kafkaParallelism> <windowParallelism>");
        System.err.println("<stateBackend> - fs | rocksDb");
        System.exit(1);/*from  w  w  w.  java 2 s  .  c om*/
    }
    String brokerUri = args[0];
    String topic = args[1];
    String offsetReset = args[2];
    int lagMs = Integer.parseInt(args[3]);
    int windowSize = Integer.parseInt(args[4]);
    int slideBy = Integer.parseInt(args[5]);
    String outputPath = args[6];
    int checkpointInt = Integer.parseInt(args[7]);
    String checkpointUri = args[8];
    boolean doAsyncSnapshot = Boolean.parseBoolean(args[9]);
    String stateBackend = args[10];
    int kafkaParallelism = Integer.parseInt(args[11]);
    int windowParallelism = Integer.parseInt(args[12]);

    System.out.println("bootstrap.servers: " + brokerUri);
    System.out.println("topic: " + topic);
    System.out.println("offset-reset: " + offsetReset);
    System.out.println("lag: " + lagMs);
    System.out.println("windowSize: " + windowSize);
    System.out.println("slideBy: " + slideBy);
    System.out.println("outputPath: " + outputPath);
    System.out.println("checkpointInt: " + checkpointInt);
    System.out.println("checkpointUri: " + checkpointUri);
    System.out.println("doAsyncSnapshot: " + doAsyncSnapshot);
    System.out.println("stateBackend: " + stateBackend);
    System.out.println("kafkaParallelism: " + kafkaParallelism);
    System.out.println("windowParallelism: " + windowParallelism);

    // set up the execution environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
    if (checkpointInt > 0) {
        env.enableCheckpointing(checkpointInt);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(checkpointInt);
    }
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 5000));
    if ("fs".equalsIgnoreCase(stateBackend)) {
        env.setStateBackend(new FsStateBackend(checkpointUri, doAsyncSnapshot));
    } else if ("rocksDb".equalsIgnoreCase(stateBackend)) {
        env.setStateBackend(new RocksDBStateBackend(checkpointUri));
    } else {
        System.err.println("Bad value for stateBackend: " + stateBackend);
        System.exit(1);
    }

    DeserializationSchema<Trade> schema = new AbstractDeserializationSchema<Trade>() {
        TradeDeserializer deserializer = new TradeDeserializer();

        @Override
        public Trade deserialize(byte[] message) throws IOException {
            return deserializer.deserialize(null, message);
        }
    };

    DataStreamSource<Trade> trades = env
            .addSource(new FlinkKafkaConsumer010<>(topic, schema, getKafkaProperties(brokerUri, offsetReset)))
            .setParallelism(kafkaParallelism);
    AssignerWithPeriodicWatermarks<Trade> timestampExtractor = new BoundedOutOfOrdernessTimestampExtractor<Trade>(
            Time.milliseconds(lagMs)) {
        @Override
        public long extractTimestamp(Trade element) {
            return element.getTime();
        }
    };

    WindowAssigner window = windowSize == slideBy ? TumblingEventTimeWindows.of(Time.milliseconds(windowSize))
            : SlidingEventTimeWindows.of(Time.milliseconds(windowSize), Time.milliseconds(slideBy));

    trades.assignTimestampsAndWatermarks(timestampExtractor).keyBy((Trade t) -> t.getTicker()).window(window)
            .aggregate(new AggregateFunction<Trade, MutableLong, Long>() {

                @Override
                public MutableLong createAccumulator() {
                    return new MutableLong();
                }

                @Override
                public MutableLong add(Trade value, MutableLong accumulator) {
                    accumulator.increment();
                    return accumulator;
                }

                @Override
                public MutableLong merge(MutableLong a, MutableLong b) {
                    a.setValue(Math.addExact(a.longValue(), b.longValue()));
                    return a;
                }

                @Override
                public Long getResult(MutableLong accumulator) {
                    return accumulator.longValue();
                }
            }, new WindowFunction<Long, Tuple5<String, String, Long, Long, Long>, String, TimeWindow>() {
                @Override
                public void apply(String key, TimeWindow window, Iterable<Long> input,
                        Collector<Tuple5<String, String, Long, Long, Long>> out) throws Exception {
                    long timeMs = System.currentTimeMillis();
                    long count = input.iterator().next();
                    long latencyMs = timeMs - window.getEnd() - lagMs;
                    out.collect(
                            new Tuple5<>(Instant.ofEpochMilli(window.getEnd()).atZone(ZoneId.systemDefault())
                                    .toLocalTime().toString(), key, count, timeMs, latencyMs));
                }
            }).setParallelism(windowParallelism).writeAsCsv(outputPath, WriteMode.OVERWRITE);

    env.execute("Trade Monitor Example");
}

From source file:enumj.Reversible.java

/**
 * Applies a {@code Enumerator.map(BiFunction)} operation
 * upon {@code source}, in reverse if necessary.
 *
 * @param <E> type of unmapped enumerated elements.
 * @param <R> type of mapped enumerated elements.
 * @param source {@link Enumerator} to apply the operation on.
 * @param mapper {@link BiFunction} to apply.
 * @param reversed true if the operation is applied in reverse,
 * false otherwise./* w  w w .  j av  a  2  s.c om*/
 * @return mapped {@code Enumerator}.
 */
static <E, R> Enumerator<R> map(Enumerator<E> source, BiFunction<? super E, ? super Long, ? extends R> mapper,
        boolean reversed) {
    Checks.ensureNotNull(mapper, Messages.NULL_ENUMERATOR_MAPPER);
    final MutableLong index = new MutableLong(0);
    final Function<E, R> fun = e -> {
        final R result = mapper.apply(e, index.toLong());
        index.increment();
        return result;
    };
    if (reversed) {
        final PipeEnumerator pipe = (PipeEnumerator) source;
        return pipe.reversedMap(fun);
    }
    return source.map(fun);
}

From source file:lineage2.commons.threading.SteppingRunnableQueueManager.java

/**
 * Method getStats.//  w  w  w  .  ja  v  a 2  s .  c  o  m
 * @return CharSequence
 */
public CharSequence getStats() {
    StringBuilder list = new StringBuilder();
    Map<String, MutableLong> stats = new TreeMap<>();
    int total = 0;
    int done = 0;
    for (SteppingScheduledFuture<?> sr : queue) {
        if (sr.isDone()) {
            done++;
            continue;
        }
        total++;
        MutableLong count = stats.get(sr.r.getClass().getName());
        if (count == null) {
            stats.put(sr.r.getClass().getName(), count = new MutableLong(1L));
        } else {
            count.increment();
        }
    }
    for (Map.Entry<String, MutableLong> e : stats.entrySet()) {
        list.append('\t').append(e.getKey()).append(" : ").append(e.getValue().longValue()).append('\n');
    }
    list.append("Scheduled: ....... ").append(total).append('\n');
    list.append("Done/Cancelled: .. ").append(done).append('\n');
    return list;
}

From source file:com.addthis.hydra.data.tree.concurrent.ConcurrentTreeDeletionTask.java

@Override
public void run() {
    try {/*from   w  ww  .j ava 2  s.  c om*/
        Map.Entry<DBKey, ConcurrentTreeNode> entry;
        MutableLong totalCount = new MutableLong();
        MutableLong nodeCount = new MutableLong();
        do {
            entry = dataTreeNodes.nextTrashNode();
            if (entry != null) {
                ConcurrentTreeNode node = entry.getValue();
                ConcurrentTreeNode prev = dataTreeNodes.source.remove(entry.getKey());
                if (prev != null) {
                    dataTreeNodes.deleteSubTree(node, totalCount, nodeCount, terminationCondition,
                            deletionLogger);
                    nodeCount.increment();
                    dataTreeNodes.treeTrashNode.incrementCounter();
                }
            }
        } while ((entry != null) && !terminationCondition.getAsBoolean());
    } catch (Exception ex) {
        ConcurrentTree.log.warn("{}", "Uncaught exception in concurrent tree background deletion thread", ex);
    }
}

From source file:com.norconex.committer.core.AbstractFileQueueCommitter.java

@Override
protected long getInitialQueueDocCount() {
    final MutableLong fileCount = new MutableLong();

    // --- Additions and Deletions ---
    FileUtil.visitAllFiles(new File(queue.getDirectory()), new IFileVisitor() {
        @Override/*from  w  w w  .  ja  v a2  s.c  o m*/
        public void visit(File file) {
            fileCount.increment();
        }
    }, REF_FILTER);
    return fileCount.longValue();
}

From source file:com.addthis.hydra.data.tree.nonconcurrent.NonConcurrentTree.java

/**
 * Iteratively delete all the children of the input node.
 * Use a non-negative value for the counter parameter to
 * tally the nodes that have been deleted. Use a negative
 * value to disable logging of the number of deleted nodes.
 *
 * @param rootNode root of the subtree to delete
 *///from w ww .  j  a  v a 2  s .c  o m
void deleteSubTree(NonConcurrentTreeNode rootNode, MutableLong totalCount, MutableLong nodeCount,
        BooleanSupplier terminationCondition) {
    long nodeDB = rootNode.nodeDB();
    IPageDB.Range<DBKey, NonConcurrentTreeNode> range = fetchNodeRange(nodeDB);
    DBKey endRange;
    boolean reschedule;
    try {
        while (range.hasNext() && !terminationCondition.getAsBoolean()) {
            totalCount.increment();
            if ((totalCount.longValue() % deletionLogInterval) == 0) {
                log.info("Deleted {} total nodes in {} trash nodes from the trash.", totalCount.longValue(),
                        nodeCount.longValue());
            }
            Map.Entry<DBKey, NonConcurrentTreeNode> entry = range.next();
            NonConcurrentTreeNode next = entry.getValue();
            if (next.hasNodes() && !next.isAlias()) {
                deleteSubTree(next, totalCount, nodeCount, terminationCondition);
            }
        }
        if (range.hasNext()) {
            endRange = range.next().getKey();
            reschedule = true;
        } else {
            endRange = new DBKey(nodeDB + 1);
            reschedule = false;
        }
    } finally {
        range.close();
    }
    source.remove(new DBKey(nodeDB), endRange);
    if (reschedule) {
        markForChildDeletion(rootNode);
    }
}

From source file:com.addthis.hydra.data.tree.nonconcurrent.NonConcurrentTree.java

@Override
public void foregroundNodeDeletion(BooleanSupplier terminationCondition) {
    IPageDB.Range<DBKey, NonConcurrentTreeNode> range = fetchNodeRange(treeTrashNode.nodeDB());
    Map.Entry<DBKey, NonConcurrentTreeNode> entry;
    MutableLong totalCount = new MutableLong();
    MutableLong nodeCount = new MutableLong();
    try {//from   w  w w  .ja v  a 2 s  .  co m
        while (range.hasNext() && !terminationCondition.getAsBoolean()) {
            entry = range.next();
            if (entry != null) {
                NonConcurrentTreeNode node = entry.getValue();
                NonConcurrentTreeNode prev = source.remove(entry.getKey());
                if (prev != null) {
                    deleteSubTree(node, totalCount, nodeCount, terminationCondition);
                    nodeCount.increment();
                    treeTrashNode.incrementCounter();
                }
            }
        }
    } finally {
        range.close();
    }
}

From source file:com.addthis.hydra.data.tree.concurrent.ConcurrentTree.java

/**
 * Recursively delete all the children of the input node.
 * Use a non-negative value for the counter parameter to
 * tally the nodes that have been deleted. Use a negative
 * value to disable logging of the number of deleted nodes.
 *
 * @param rootNode root of the subtree to delete
 *//*from  w  ww  .j a v  a  2s.co m*/
void deleteSubTree(ConcurrentTreeNode rootNode, MutableLong totalCount, MutableLong nodeCount,
        BooleanSupplier terminationCondition, Logger deletionLogger) {
    long nodeDB = rootNode.nodeDB();
    IPageDB.Range<DBKey, ConcurrentTreeNode> range = fetchNodeRange(nodeDB);
    DBKey endRange;
    boolean reschedule;
    try {
        while (range.hasNext() && !terminationCondition.getAsBoolean()) {
            totalCount.increment();
            if ((totalCount.longValue() % deletionLogInterval) == 0) {
                deletionLogger.info("Deleted {} total nodes in {} trash nodes from the trash.",
                        totalCount.longValue(), nodeCount.longValue());
            }
            Map.Entry<DBKey, ConcurrentTreeNode> entry = range.next();
            ConcurrentTreeNode next = entry.getValue();

            if (next.hasNodes() && !next.isAlias()) {
                deleteSubTree(next, totalCount, nodeCount, terminationCondition, deletionLogger);
            }
            String name = entry.getKey().rawKey().toString();
            CacheKey key = new CacheKey(nodeDB, name);
            ConcurrentTreeNode cacheNode = cache.remove(key);
            /* Mark the node as deleted so that it will not be
             * pushed to disk when removed from the eviction queue.
             */
            if (cacheNode != null) {
                cacheNode.markDeleted();
            }
        }
        if (range.hasNext()) {
            endRange = range.next().getKey();
            reschedule = true;
        } else {
            endRange = new DBKey(nodeDB + 1);
            reschedule = false;
        }
    } finally {
        range.close();
    }
    source.remove(new DBKey(nodeDB), endRange);
    if (reschedule) {
        markForChildDeletion(rootNode);
    }
}

From source file:org.apache.apex.malhar.lib.window.accumulation.Count.java

@Override
public MutableLong accumulate(MutableLong accumulatedValue, Object input) {
    accumulatedValue.increment();
    return accumulatedValue;
}

From source file:org.apache.hadoop.hbase.coprocessor.example.WriteHeavyIncrementObserver.java

private long getUniqueTimestamp(byte[] row) {
    int slot = Bytes.hashCode(row) & mask;
    MutableLong lastTimestamp = lastTimestamps[slot];
    long now = System.currentTimeMillis();
    synchronized (lastTimestamp) {
        long pt = lastTimestamp.longValue() >> 10;
        if (now > pt) {
            lastTimestamp.setValue(now << 10);
        } else {//from w  w  w  .j a v a2s.  c o  m
            lastTimestamp.increment();
        }
        return lastTimestamp.longValue();
    }
}