Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.apache.usergrid.services.notifications.QueueListener.java

private void execute(int threadNumber) {

    if (Thread.currentThread().isDaemon()) {
        Thread.currentThread().setDaemon(true);
    }/*from   www  .j  ava 2 s.c o  m*/

    Thread.currentThread().setName(getClass().getSimpleName() + "_Push-"
            + RandomStringUtils.randomAlphanumeric(4) + "-" + threadNumber);

    final AtomicInteger consecutiveExceptions = new AtomicInteger();

    if (logger.isTraceEnabled()) {
        logger.trace("QueueListener: Starting execute process.");
    }

    Meter meter = metricsService.getMeter(QueueListener.class, "execute.commit");
    com.codahale.metrics.Timer timer = metricsService.getTimer(QueueListener.class, "execute.dequeue");

    if (logger.isTraceEnabled()) {
        logger.trace("getting from queue {} ", queueName);
    }

    LegacyQueueScope queueScope = new LegacyQueueScopeImpl(queueName,
            LegacyQueueScope.RegionImplementation.LOCAL);
    LegacyQueueManager legacyQueueManager = queueManagerFactory.getQueueManager(queueScope);

    // run until there are no more active jobs
    final AtomicLong runCount = new AtomicLong(0);

    while (true) {

        if (sleepBetweenRuns > 0) {
            if (logger.isTraceEnabled()) {
                logger.trace("sleep between rounds...sleep...{}", sleepBetweenRuns);
            }
            try {
                Thread.sleep(sleepBetweenRuns);
            } catch (InterruptedException ignored) {
            }
        }

        Timer.Context timerContext = timer.time();
        rx.Observable.from(legacyQueueManager.getMessages(MAX_TAKE, ApplicationQueueMessage.class))
                .buffer(MAX_TAKE).doOnNext(messages -> {

                    try {
                        if (logger.isTraceEnabled()) {
                            logger.trace("retrieved batch of {} messages from queue {}", messages.size(),
                                    queueName);
                        }

                        if (messages.size() > 0) {
                            HashMap<UUID, List<LegacyQueueMessage>> messageMap = new HashMap<>(messages.size());

                            //group messages into hash map by app id
                            for (LegacyQueueMessage message : messages) {
                                //TODO: stop copying around this area as it gets notification specific.
                                ApplicationQueueMessage queueMessage = (ApplicationQueueMessage) message
                                        .getBody();
                                UUID applicationId = queueMessage.getApplicationId();

                                // Groups queue messages by application Id,
                                // (they are all probably going to the same place)
                                if (!messageMap.containsKey(applicationId)) {
                                    //For each app id it sends the set.
                                    List<LegacyQueueMessage> lqms = new ArrayList<LegacyQueueMessage>();
                                    lqms.add(message);
                                    messageMap.put(applicationId, lqms);
                                } else {
                                    messageMap.get(applicationId).add(message);
                                }
                            }

                            long now = System.currentTimeMillis();
                            Observable merge = null;

                            //send each set of app ids together
                            for (Map.Entry<UUID, List<LegacyQueueMessage>> entry : messageMap.entrySet()) {
                                UUID applicationId = entry.getKey();

                                ApplicationQueueManager manager = applicationQueueManagerCache
                                        .getApplicationQueueManager(emf.getEntityManager(applicationId),
                                                legacyQueueManager,
                                                new JobScheduler(smf.getServiceManager(applicationId),
                                                        emf.getEntityManager(applicationId)),
                                                metricsService, properties);

                                if (logger.isTraceEnabled()) {
                                    logger.trace("send batch for app {} of {} messages", entry.getKey(),
                                            entry.getValue().size());
                                }
                                Observable current = manager.sendBatchToProviders(entry.getValue(), queueName);

                                if (merge == null)
                                    merge = current;
                                else {
                                    merge = Observable.merge(merge, current);
                                }
                            }

                            if (merge != null) {
                                merge.toBlocking().lastOrDefault(null);
                            }
                            legacyQueueManager.commitMessages(messages);

                            meter.mark(messages.size());
                            if (logger.isTraceEnabled()) {
                                logger.trace("sent batch {} messages duration {} ms", messages.size(),
                                        System.currentTimeMillis() - now);
                            }

                            if (runCount.incrementAndGet() % consecutiveCallsToRemoveDevices == 0) {
                                for (ApplicationQueueManager aqm : applicationQueueManagerCache.asMap()
                                        .values()) {
                                    try {
                                        aqm.asyncCheckForInactiveDevices();
                                    } catch (Exception inactiveDeviceException) {
                                        logger.error("Inactive Device Get failed", inactiveDeviceException);
                                    }
                                }
                                //clear everything
                                runCount.set(0);
                            }
                        }

                else {
                            if (logger.isTraceEnabled()) {
                                logger.trace("no messages...sleep...{}", sleepWhenNoneFound);
                            }
                            try {
                                Thread.sleep(sleepWhenNoneFound);
                            } catch (InterruptedException e) {
                                // noop
                            }
                        }
                        timerContext.stop();
                        //send to the providers
                        consecutiveExceptions.set(0);
                    } catch (Exception ex) {
                        logger.error("failed to dequeue", ex);

                        // clear the queue name cache b/c tests might have wiped the keyspace
                        legacyQueueManager.clearQueueNameCache();
                        try {
                            long sleeptime = sleepWhenNoneFound * consecutiveExceptions.incrementAndGet();
                            long maxSleep = 15000;
                            sleeptime = sleeptime > maxSleep ? maxSleep : sleeptime;
                            logger.info("sleeping due to failures {} ms", sleeptime);
                            Thread.sleep(sleeptime);
                        } catch (InterruptedException ie) {
                            if (logger.isTraceEnabled()) {
                                logger.trace("sleep interrupted");
                            }
                        }
                    }
                }).toBlocking().lastOrDefault(null);

    }
}

From source file:com.amaze.filemanager.utils.files.FileUtils.java

/**
 * Helper method to get size of an otg folder
 *//*from   w  w w .jav  a2s.  c  om*/
public static long otgFolderSize(String path, final Context context) {
    final AtomicLong totalBytes = new AtomicLong(0);
    OTGUtil.getDocumentFiles(path, context, file -> totalBytes.addAndGet(getBaseFileSize(file, context)));
    return totalBytes.longValue();
}

From source file:io.druid.client.cache.MemcachedCache.java

public static MemcachedCache create(final MemcachedCacheConfig config) {
    final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
    final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
    final AbstractMonitor monitor = new AbstractMonitor() {
        final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(
                new HashMap<String, Long>());

        @Override//  w w w.  jav a2  s  .  com
        public boolean doMonitor(ServiceEmitter emitter) {
            final Map<String, Long> priorValues = this.priorValues.get();
            final Map<String, Long> currentValues = getCurrentValues();
            final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
            for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
                emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                        .build("query/cache/memcached/total", entry.getValue()));
                final Long prior = priorValues.get(entry.getKey());
                if (prior != null) {
                    emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                            .build("query/cache/memcached/delta", entry.getValue() - prior));
                }
            }

            if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
                log.error("Prior value changed while I was reporting! updating anyways");
                this.priorValues.set(currentValues);
            }
            return true;
        }

        private Map<String, Long> getCurrentValues() {
            final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
            for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            return builder.build();
        }
    };
    try {
        LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());

        // always use compression
        transcoder.setCompressionThreshold(0);

        OperationQueueFactory opQueueFactory;
        long maxQueueBytes = config.getMaxOperationQueueSize();
        if (maxQueueBytes > 0) {
            opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
        } else {
            opQueueFactory = new LinkedOperationQueueFactory();
        }

        final Predicate<String> interesting = new Predicate<String>() {
            // See net.spy.memcached.MemcachedConnection.registerMetrics()
            private final Set<String> interestingMetrics = ImmutableSet.of(
                    "[MEM] Reconnecting Nodes (ReconnectQueue)",
                    //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
                    "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write",
                    "[MEM] Average Bytes read from OS per read",
                    "[MEM] Average Time on wire for operations (s)",
                    "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry",
                    "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");

            @Override
            public boolean apply(@Nullable String input) {
                return input != null && interestingMetrics.contains(input);
            }
        };

        final MetricCollector metricCollector = new MetricCollector() {
            @Override
            public void addCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                counters.putIfAbsent(name, new AtomicLong(0L));

                if (log.isDebugEnabled()) {
                    log.debug("Add Counter [%s]", name);
                }
            }

            @Override
            public void removeCounter(String name) {
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.addAndGet(amount);

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s] %d", name, amount);
                }
            }

            @Override
            public void decrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.decrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s]", name);
                }
            }

            @Override
            public void decrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0L));
                    counter = counters.get(name);
                }
                counter.addAndGet(-amount);

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s] %d", name, amount);
                }
            }

            @Override
            public void addMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                meters.putIfAbsent(name, new AtomicLong(0L));
                if (log.isDebugEnabled()) {
                    log.debug("Adding meter [%s]", name);
                }
            }

            @Override
            public void removeMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove meter [%s]", name);
                }
            }

            @Override
            public void markMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong meter = meters.get(name);
                if (meter == null) {
                    meters.putIfAbsent(name, new AtomicLong(0L));
                    meter = meters.get(name);
                }
                meter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment counter [%s]", name);
                }
            }

            @Override
            public void addHistogram(String name) {
                log.debug("Ignoring add histogram [%s]", name);
            }

            @Override
            public void removeHistogram(String name) {
                log.debug("Ignoring remove histogram [%s]", name);
            }

            @Override
            public void updateHistogram(String name, int amount) {
                log.debug("Ignoring update histogram [%s]: %d", name, amount);
            }
        };

        final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder()
                // 1000 repetitions gives us good distribution with murmur3_128
                // (approx < 5% difference in counts across nodes, with 5 cache nodes)
                .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128)
                .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY)
                .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true)
                .setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true)
                .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout())
                .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory)
                .setMetricCollector(metricCollector).setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds
                .build();

        final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());

        final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;

        if (config.getNumConnections() > 1) {
            clientSupplier = new LoadBalancingPool<MemcachedClientIF>(config.getNumConnections(),
                    new Supplier<MemcachedClientIF>() {
                        @Override
                        public MemcachedClientIF get() {
                            try {
                                return new MemcachedClient(connectionFactory, hosts);
                            } catch (IOException e) {
                                log.error(e, "Unable to create memcached client");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
        } else {
            clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder
                    .<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts)));
        }

        return new MemcachedCache(clientSupplier, config, monitor);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:esg.node.connection.ESGConnectionManager.java

public void init() {
    log.info("Initializing ESGFConnectionManager...");
    lastDispatchTime = new AtomicLong(-1L);

    //NOTE://from w  w  w.java  2s.  co  m
    //Just to make sure we have these guys if we decide to re-register.
    //since we did such a good job cleaning things out with we unregister.
    //Once could imagine wanting to re-establish the connection manager.
    if (peers == null)
        peers = Collections.synchronizedMap(new HashMap<String, ESGPeer>());
    if (unavailablePeers == null)
        unavailablePeers = Collections.synchronizedMap(new HashMap<String, ESGPeer>());

    try {
        props = new ESGFProperties();
        periodicallyPingToPeers();
        periodicallyRegisterToPeers();
    } catch (java.io.IOException e) {
        System.out.println("Damn, ESGConnectionManager, can't fire up... :-(");
        log.error(e);
    }

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            if (ESGConnectionManager.this.shutdownHookLatch) {
                System.out.println("Running Connection Manager Shutdown Hook");
                ESGConnectionManager.this.dispatchUnRegisterToPeers();
                System.out.println("Bye!");
            }
            ESGConnectionManager.this.shutdownHookLatch = true;
        }
    });
}

From source file:org.apache.hadoop.hbase.mapreduce.TestHLogRecordReader.java

/**
 * Test basic functionality//from  w w  w .j av  a 2  s  .  c o  m
 * @throws Exception
 */
@Test
public void testHLogRecordReader() throws Exception {
    HLog log = HLogFactory.createHLog(fs, hbaseDir, logName, conf);
    byte[] value = Bytes.toBytes("value");
    final AtomicLong sequenceId = new AtomicLong(0);
    WALEdit edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), System.currentTimeMillis(), value));
    log.append(info, tableName, edit, System.currentTimeMillis(), htd, sequenceId);

    Thread.sleep(1); // make sure 2nd log gets a later timestamp
    long secondTs = System.currentTimeMillis();
    log.rollWriter();

    edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value));
    log.append(info, tableName, edit, System.currentTimeMillis(), htd, sequenceId);
    log.close();
    long thirdTs = System.currentTimeMillis();

    // should have 2 log files now
    HLogInputFormat input = new HLogInputFormat();
    Configuration jobConf = new Configuration(conf);
    jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());

    // make sure both logs are found
    List<InputSplit> splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
    assertEquals(2, splits.size());

    // should return exactly one KV
    testSplit(splits.get(0), Bytes.toBytes("1"));
    // same for the 2nd split
    testSplit(splits.get(1), Bytes.toBytes("2"));

    // now test basic time ranges:

    // set an endtime, the 2nd log file can be ignored completely.
    jobConf.setLong(HLogInputFormat.END_TIME_KEY, secondTs - 1);
    splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
    assertEquals(1, splits.size());
    testSplit(splits.get(0), Bytes.toBytes("1"));

    // now set a start time
    jobConf.setLong(HLogInputFormat.END_TIME_KEY, Long.MAX_VALUE);
    jobConf.setLong(HLogInputFormat.START_TIME_KEY, thirdTs);
    splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
    // both logs need to be considered
    assertEquals(2, splits.size());
    // but both readers skip all edits
    testSplit(splits.get(0));
    testSplit(splits.get(1));
}

From source file:com.joyent.manta.benchmark.Benchmark.java

/**
 * Method used to run a multi-threaded benchmark.
 *
 * @param method to measure/*from w  ww. j  a  va2 s .  co  m*/
 * @param path path to store benchmarking test data
 * @param iterations number of iterations to run
 * @param concurrency number of threads to run
 * @throws IOException thrown when we can't communicate with the server
 */
private static void multithreadedBenchmark(final String method, final String path, final int iterations,
        final int concurrency) throws IOException {
    final AtomicLong fullAggregation = new AtomicLong(0L);
    final AtomicLong serverAggregation = new AtomicLong(0L);
    final AtomicLong count = new AtomicLong(0L);
    final long perThreadCount = perThreadCount(iterations, concurrency);

    System.out.printf("Running %d iterations per thread\n", perThreadCount);

    final long testStart = System.nanoTime();

    Runtime.getRuntime().addShutdownHook(new Thread(Benchmark::cleanUp));

    final Callable<Void> worker = () -> {
        for (int i = 0; i < perThreadCount; i++) {
            Duration[] durations;

            if (method.equals("put")) {
                durations = measurePut(sizeInBytesOrNoOfDirs);
            } else if (method.equals("putDir")) {
                durations = measurePutDir(sizeInBytesOrNoOfDirs);
            } else {
                durations = measureGet(path);
            }

            long fullLatency = durations[0].toMillis();
            long serverLatency = durations[1].toMillis();
            fullAggregation.addAndGet(fullLatency);
            serverAggregation.addAndGet(serverLatency);

            System.out.printf("%s %d full=%dms, server=%dms, thread=%s\n", method, count.getAndIncrement(),
                    fullLatency, serverLatency, Thread.currentThread().getName());
        }

        return null;
    };

    final Thread.UncaughtExceptionHandler handler = (t, e) -> LOG.error("Error when executing benchmark", e);

    final AtomicInteger threadCounter = new AtomicInteger(0);
    ThreadFactory threadFactory = r -> {
        Thread t = new Thread(r);
        t.setDaemon(true);
        t.setUncaughtExceptionHandler(handler);
        t.setName(String.format("benchmark-%d", threadCounter.incrementAndGet()));

        return t;
    };

    ExecutorService executor = Executors.newFixedThreadPool(concurrency, threadFactory);

    List<Callable<Void>> workers = new ArrayList<>(concurrency);
    for (int i = 0; i < concurrency; i++) {
        workers.add(worker);
    }

    try {
        List<Future<Void>> futures = executor.invokeAll(workers);

        boolean completed = false;
        while (!completed) {
            try (Stream<Future<Void>> stream = futures.stream()) {
                completed = stream.allMatch((f) -> f.isDone() || f.isCancelled());

                if (!completed) {
                    Thread.sleep(CHECK_INTERVAL);
                }
            }
        }

    } catch (InterruptedException e) {
        return;
    } finally {
        System.err.println("Shutting down the thread pool");
        executor.shutdown();
    }

    final long testEnd = System.nanoTime();

    final long fullAverage = Math.round(fullAggregation.get() / iterations);
    final long serverAverage = Math.round(serverAggregation.get() / iterations);
    final long totalTime = Duration.ofNanos(testEnd - testStart).toMillis();

    System.out.printf("Average full latency: %d ms\n", fullAverage);
    System.out.printf("Average server latency: %d ms\n", serverAverage);
    System.out.printf("Total test time: %d ms\n", totalTime);
    System.out.printf("Total invocations: %d\n", count.get());
}

From source file:esg.node.components.registry.ESGFRegistry.java

public void init() {
    log.info("Initializing ESGFRegistry...");
    try {/* w  w  w .jav a2 s. co  m*/
        //props = getDataNodeManager().getMatchingProperties("*"); //TODO: figure the right regex for only what is needed
        props = new ESGFProperties();
        gleaner = new RegistrationGleaner(props);
        nodecomp = new NodeHostnameComparator();
        processedMap = new HashMap<String, String>();
        removedMap = new HashMap<String, Long>();
        peerFilter = new PeerNetworkFilter(props);
        lastDispatchTime = new AtomicLong(-1L);
        if (ExclusionListReader.getInstance().loadExclusionList()) {
            exList = ExclusionListReader.getInstance().getExclusionList().useType(PRIVATE_BIT);
        }
    } catch (java.io.IOException e) {
        System.out.println("Damn ESGFRegistry can't fire up... :-(");
        log.error(e);
    }
}

From source file:org.apache.hadoop.hbase.replication.regionserver.TestReplicationSourceManager.java

@Test
public void testLogRoll() throws Exception {
    long seq = 0;
    long baseline = 1000;
    long time = baseline;
    KeyValue kv = new KeyValue(r1, f1, r1);
    WALEdit edit = new WALEdit();
    edit.add(kv);//from  ww w .  j av a 2s  .co  m

    List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
    listeners.add(replication);
    HLog hlog = HLogFactory.createHLog(fs, utility.getDataTestDir(), logName, conf, listeners,
            URLEncoder.encode("regionserver:60020", "UTF8"));
    final AtomicLong sequenceId = new AtomicLong(1);
    manager.init();
    HTableDescriptor htd = new HTableDescriptor();
    htd.addFamily(new HColumnDescriptor(f1));
    // Testing normal log rolling every 20
    for (long i = 1; i < 101; i++) {
        if (i > 1 && i % 20 == 0) {
            hlog.rollWriter();
        }
        LOG.info(i);
        HLogKey key = new HLogKey(hri.getRegionName(), test, seq++, System.currentTimeMillis(),
                HConstants.DEFAULT_CLUSTER_ID);
        hlog.append(hri, test, edit, System.currentTimeMillis(), htd, sequenceId);
    }

    // Simulate a rapid insert that's followed
    // by a report that's still not totally complete (missing last one)
    LOG.info(baseline + " and " + time);
    baseline += 101;
    time = baseline;
    LOG.info(baseline + " and " + time);

    for (int i = 0; i < 3; i++) {
        hlog.append(hri, test, edit, System.currentTimeMillis(), htd, sequenceId);
    }

    assertEquals(6, manager.getHLogs().get(slaveId).size());

    hlog.rollWriter();

    manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(), "1", 0, false, false);

    hlog.append(hri, test, edit, System.currentTimeMillis(), htd, sequenceId);

    assertEquals(1, manager.getHLogs().size());

    // TODO Need a case with only 2 HLogs and we only want to delete the first one
}

From source file:com.streamsets.pipeline.stage.bigquery.destination.BigQueryTarget.java

@Override
public void write(Batch batch) throws StageException {
    Map<TableId, List<Record>> tableIdToRecords = new LinkedHashMap<>();
    Map<Long, Record> requestIndexToRecords = new LinkedHashMap<>();

    if (batch.getRecords().hasNext()) {
        ELVars elVars = getContext().createELVars();
        batch.getRecords().forEachRemaining(record -> {
            RecordEL.setRecordInContext(elVars, record);
            try {
                String datasetName = dataSetEval.eval(elVars, conf.datasetEL, String.class);
                String tableName = tableNameELEval.eval(elVars, conf.tableNameEL, String.class);
                TableId tableId = TableId.of(datasetName, tableName);
                if (tableIdExistsCache.get(tableId)) {
                    List<Record> tableIdRecords = tableIdToRecords.computeIfAbsent(tableId,
                            t -> new ArrayList<>());
                    tableIdRecords.add(record);
                } else {
                    getContext().toError(record, Errors.BIGQUERY_17, datasetName, tableName,
                            conf.credentials.projectId);
                }//from www.  j a  va 2 s . c om
            } catch (ELEvalException e) {
                LOG.error("Error evaluating DataSet/TableName EL", e);
                getContext().toError(record, Errors.BIGQUERY_10, e);
            } catch (ExecutionException e) {
                LOG.error("Error when checking exists for tableId, Reason : {}", e);
                Throwable rootCause = Throwables.getRootCause(e);
                getContext().toError(record, Errors.BIGQUERY_13, rootCause);
            }
        });

        tableIdToRecords.forEach((tableId, records) -> {
            final AtomicLong index = new AtomicLong(0);
            final AtomicBoolean areThereRecordsToWrite = new AtomicBoolean(false);
            InsertAllRequest.Builder insertAllRequestBuilder = InsertAllRequest.newBuilder(tableId);
            records.forEach(record -> {
                try {
                    String insertId = getInsertIdForRecord(elVars, record);
                    Map<String, ?> rowContent = convertToRowObjectFromRecord(record);
                    if (rowContent.isEmpty()) {
                        throw new OnRecordErrorException(record, Errors.BIGQUERY_14);
                    }
                    insertAllRequestBuilder.addRow(insertId, rowContent);
                    areThereRecordsToWrite.set(true);
                    requestIndexToRecords.put(index.getAndIncrement(), record);
                } catch (OnRecordErrorException e) {
                    LOG.error("Error when converting record {} to row, Reason : {} ",
                            record.getHeader().getSourceId(), e.getMessage());
                    getContext().toError(record, e.getErrorCode(), e.getParams());
                }
            });

            if (areThereRecordsToWrite.get()) {
                insertAllRequestBuilder.setIgnoreUnknownValues(conf.ignoreInvalidColumn);
                insertAllRequestBuilder.setSkipInvalidRows(false);

                InsertAllRequest request = insertAllRequestBuilder.build();

                if (!request.getRows().isEmpty()) {
                    try {
                        InsertAllResponse response = bigQuery.insertAll(request);
                        if (response.hasErrors()) {
                            response.getInsertErrors().forEach((requestIdx, errors) -> {
                                Record record = requestIndexToRecords.get(requestIdx);
                                String messages = COMMA_JOINER.join(errors.stream()
                                        .map(BigQueryError::getMessage).collect(Collectors.toList()));
                                String reasons = COMMA_JOINER.join(errors.stream().map(BigQueryError::getReason)
                                        .collect(Collectors.toList()));
                                LOG.error("Error when inserting record {}, Reasons : {}, Messages : {}",
                                        record.getHeader().getSourceId(), reasons, messages);
                                getContext().toError(record, Errors.BIGQUERY_11, reasons, messages);
                            });
                        }
                    } catch (BigQueryException e) {
                        LOG.error(Errors.BIGQUERY_13.getMessage(), e);
                        //Put all records to error.
                        for (long i = 0; i < request.getRows().size(); i++) {
                            Record record = requestIndexToRecords.get(i);
                            getContext().toError(record, Errors.BIGQUERY_13, e);
                        }
                    }
                }
            }
        });
    }
}

From source file:nl.salp.warcraft4j.dev.casc.dbc.DbcAnalyser.java

public void analyse() {
    /*/*ww w.jav  a2 s.c  o m*/
    Set<String> knownDbcFileNames = getKnownDbcFilesByName();
    Set<String> invalidDbcFileNames = getInvalidDbcFilesByName();
    Set<String> noDataDbcFileNames = getDbcFilesWithNoDataByName();
    System.out.println(format("------------------------[  KNOWN DBC FILES (%d)  ]------------------------", knownDbcFileNames.size()));
    System.out.println(format("------------------------[ INVALID DBC FILES (%d) ]------------------------", invalidDbcFileNames.size()));
    invalidDbcFileNames.stream().forEach(System.out::println);
    System.out.println(format("------------------------[ NO DATA DBC FILES (%d) ]------------------------", noDataDbcFileNames.size()));
    noDataDbcFileNames.stream().forEach(System.out::println);
    */
    // Force CASC loading.
    final Set<Long> knownHashes = cascContext.getHashes();
    final int maxChars = 20;
    System.out.println(format("Brute forcing names with up to %d characters", maxChars));
    final AtomicLong count = new AtomicLong(0);
    final Map<String, Long> resolvedNames = new HashMap<>();
    new DbcFilenameGenerator(maxChars, () -> (filename) -> {
        long hash = CdnCascContext.hashFilename(filename);
        if (knownHashes.contains(hash)) {
            resolvedNames.put(filename, hash);
            LOGGER.debug("Resolved filename {} to CASC known hash {}", filename, hash);
        }
        count.incrementAndGet();
    }).execute();
    LOGGER.info(
            "Attempted hashing resolution on {} filenames against {} known hashes, resulting in {} resolved CASC hashes.",
            count.get(), knownHashes.size(), resolvedNames.size());

    /*
    getDbcFiles().stream()
        .forEach(f -> System.out.println(format("DbcFile [hash: %d, filename: %s, header: %s]",
                f.getFilenameHash(),
                f.getFilename()
                        .orElse("<unknown>"),
                f.getHeader()
                        .map(FileHeader::getHeader)
                        .map(String::new)
                        .orElse(""))));
    */
}