List of usage examples for java.util.concurrent.atomic AtomicLong get
public final long get()
From source file:org.apache.activemq.usecases.NetworkBridgeProducerFlowControlTest.java
/** * This test is parameterized by {@link #persistentTestMessages}, which * determines whether the producer on broker0 sends persistent or * non-persistent messages, and {@link #networkIsAlwaysSendSync}, which * determines how the bridge will forward both persistent and non-persistent * messages to broker1./*from w w w .j a v a 2 s. c o m*/ * * @see #initCombosForTestFastAndSlowRemoteConsumers() */ public void testFastAndSlowRemoteConsumers() throws Exception { final int NUM_MESSAGES = 100; final long TEST_MESSAGE_SIZE = 1024; final long SLOW_CONSUMER_DELAY_MILLIS = 100; // Consumer prefetch is disabled for broker1's consumers. final ActiveMQQueue SLOW_SHARED_QUEUE = new ActiveMQQueue( NetworkBridgeProducerFlowControlTest.class.getSimpleName() + ".slow.shared?consumer.prefetchSize=1"); final ActiveMQQueue FAST_SHARED_QUEUE = new ActiveMQQueue( NetworkBridgeProducerFlowControlTest.class.getSimpleName() + ".fast.shared?consumer.prefetchSize=1"); // Start a local and a remote broker. createBroker(new URI("broker:(tcp://localhost:0" + ")?brokerName=broker0&persistent=false&useJmx=true")); BrokerService remoteBroker = createBroker( new URI("broker:(tcp://localhost:0" + ")?brokerName=broker1&persistent=false&useJmx=true")); // Set a policy on the remote broker that limits the maximum size of the // slow shared queue. PolicyEntry policyEntry = new PolicyEntry(); policyEntry.setMemoryLimit(5 * TEST_MESSAGE_SIZE); PolicyMap policyMap = new PolicyMap(); policyMap.put(SLOW_SHARED_QUEUE, policyEntry); remoteBroker.setDestinationPolicy(policyMap); // Create an outbound bridge from the local broker to the remote broker. // The bridge is configured with the remoteDispatchType enhancement. NetworkConnector nc = bridgeBrokers("broker0", "broker1"); nc.setAlwaysSyncSend(networkIsAlwaysSendSync); nc.setPrefetchSize(1); startAllBrokers(); waitForBridgeFormation(); // Send the test messages to the local broker's shared queues. The // messages are either persistent or non-persistent to demonstrate the // difference between synchronous and asynchronous dispatch. persistentDelivery = persistentTestMessages; sendMessages("broker0", FAST_SHARED_QUEUE, NUM_MESSAGES); sendMessages("broker0", SLOW_SHARED_QUEUE, NUM_MESSAGES); // Start two asynchronous consumers on the remote broker, one for each // of the two shared queues, and keep track of how long it takes for // each of the consumers to receive all the messages. final CountDownLatch fastConsumerLatch = new CountDownLatch(NUM_MESSAGES); final CountDownLatch slowConsumerLatch = new CountDownLatch(NUM_MESSAGES); final long startTimeMillis = System.currentTimeMillis(); final AtomicLong fastConsumerTime = new AtomicLong(); final AtomicLong slowConsumerTime = new AtomicLong(); Thread fastWaitThread = new Thread() { @Override public void run() { try { fastConsumerLatch.await(); fastConsumerTime.set(System.currentTimeMillis() - startTimeMillis); } catch (InterruptedException ex) { exceptions.add(ex); Assert.fail(ex.getMessage()); } } }; Thread slowWaitThread = new Thread() { @Override public void run() { try { slowConsumerLatch.await(); slowConsumerTime.set(System.currentTimeMillis() - startTimeMillis); } catch (InterruptedException ex) { exceptions.add(ex); Assert.fail(ex.getMessage()); } } }; fastWaitThread.start(); slowWaitThread.start(); createConsumer("broker1", FAST_SHARED_QUEUE, fastConsumerLatch); MessageConsumer slowConsumer = createConsumer("broker1", SLOW_SHARED_QUEUE, slowConsumerLatch); MessageIdList messageIdList = brokers.get("broker1").consumers.get(slowConsumer); messageIdList.setProcessingDelay(SLOW_CONSUMER_DELAY_MILLIS); fastWaitThread.join(); slowWaitThread.join(); assertTrue("no exceptions on the wait threads:" + exceptions, exceptions.isEmpty()); LOG.info("Fast consumer duration (ms): " + fastConsumerTime.get()); LOG.info("Slow consumer duration (ms): " + slowConsumerTime.get()); // Verify the behaviour as described in the description of this class. if (networkIsAlwaysSendSync) { Assert.assertTrue(fastConsumerTime.get() < slowConsumerTime.get() / 20); } else { Assert.assertEquals(persistentTestMessages, fastConsumerTime.get() < slowConsumerTime.get() / 10); } }
From source file:org.zanata.sync.jobs.cache.RepoCacheImpl.java
private long copyDir(Path source, Path target) throws IOException { Files.createDirectories(target); AtomicLong totalSize = new AtomicLong(0); Files.walkFileTree(source, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new SimpleFileVisitor<Path>() { @Override//from w w w . ja va 2s . co m public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { Path targetdir = target.resolve(source.relativize(dir)); try { if (Files.isDirectory(targetdir) && Files.exists(targetdir)) { return CONTINUE; } Files.copy(dir, targetdir, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); } catch (FileAlreadyExistsException e) { if (!Files.isDirectory(targetdir)) { throw e; } } return CONTINUE; } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (Files.isRegularFile(file)) { totalSize.accumulateAndGet(Files.size(file), (l, r) -> l + r); } Path targetFile = target.resolve(source.relativize(file)); // only copy to target if it doesn't exist or it exist but the content is different if (!Files.exists(targetFile) || !com.google.common.io.Files.equal(file.toFile(), targetFile.toFile())) { Files.copy(file, targetFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); } return CONTINUE; } }); return totalSize.get(); }
From source file:org.neo4j.consistency.checking.full.FullCheckIntegrationTest.java
@Test public void shouldManageUnusedRecordsWithWeirdDataIn() throws Exception { // Given//from w ww.j a v a 2 s . c o m final AtomicLong id = new AtomicLong(); fixture.apply(new GraphStoreFixture.Transaction() { @Override protected void transactionData(TransactionDataBuilder tx, IdGenerator next) { id.set(next.relationship()); RelationshipRecord relationship = new RelationshipRecord(id.get()); relationship.setFirstNode(-1); relationship.setSecondNode(-1); relationship.setInUse(true); tx.create(relationship); } }); fixture.apply(new GraphStoreFixture.Transaction() { @Override protected void transactionData(TransactionDataBuilder tx, IdGenerator next) { RelationshipRecord relationship = new RelationshipRecord(id.get()); tx.delete(relationship); } }); // When ConsistencySummaryStatistics stats = check(); // Then assertTrue(stats.isConsistent()); }
From source file:org.voltdb.TableHelper.java
/** * Load random data into a partitioned table in VoltDB that has a bigint pkey. * * If the VoltTable indicates which column is its pkey, then it will use it, but otherwise it will * assume the first column is the bigint pkey. Note, this works with other integer keys, but * your keyspace is pretty small./*from ww w . ja va 2 s . c o m*/ * * If mb == 0, then maxRows is used. If maxRows == 0, then mb is used. * * @param table Table with or without schema metadata. * @param mb Target RSS (approximate) * @param maxRows Target maximum rows * @param client To load with. * @param offset Generated pkey values start here. * @param jump Generated pkey values increment by this value. * @throws Exception */ public void fillTableWithBigintPkey(VoltTable table, int mb, long maxRows, final Client client, long offset, long jump) throws Exception { // make sure some kind of limit is set assert ((maxRows > 0) || (mb > 0)); assert (maxRows >= 0); assert (mb >= 0); final int mbTarget = mb > 0 ? mb : Integer.MAX_VALUE; if (maxRows == 0) { maxRows = Long.MAX_VALUE; } System.out.printf( "Filling table %s with rows starting with pkey id %d (every %d rows) until either RSS=%dmb or rowcount=%d\n", table.m_extraMetadata.name, offset, jump, mbTarget, maxRows); // find the primary key, assume first col if not found int pkeyColIndex = getBigintPrimaryKeyIndexIfExists(table); if (pkeyColIndex == -1) { pkeyColIndex = 0; assert (table.getColumnType(0).isInteger()); } final AtomicLong rss = new AtomicLong(0); ProcedureCallback insertCallback = new ProcedureCallback() { @Override public void clientCallback(ClientResponse clientResponse) throws Exception { if (clientResponse.getStatus() != ClientResponse.SUCCESS) { System.out.println("Error in loader callback:"); System.out.println(((ClientResponseImpl) clientResponse).toJSONString()); assert (false); } } }; // update the rss value asynchronously final AtomicBoolean rssThreadShouldStop = new AtomicBoolean(false); Thread rssThread = new Thread() { @Override public void run() { long tempRss = rss.get(); long rssPrev = tempRss; while (!rssThreadShouldStop.get()) { tempRss = MiscUtils.getMBRss(client); if (tempRss != rssPrev) { rssPrev = tempRss; rss.set(tempRss); System.out.printf("RSS=%dmb\n", tempRss); // bail when done if (tempRss > mbTarget) { return; } } try { Thread.sleep(2000); } catch (Exception e) { } } } }; // load rows until RSS goal is met (status print every 100k) long i = offset; long rows = 0; rssThread.start(); final String insertProcName = table.m_extraMetadata.name.toUpperCase() + ".insert"; RandomRowMaker filler = createRandomRowMaker(table, Integer.MAX_VALUE, false, false); while (rss.get() < mbTarget) { Object[] row = filler.randomRow(); row[pkeyColIndex] = i; client.callProcedure(insertCallback, insertProcName, row); rows++; if ((rows % 100000) == 0) { System.out.printf("Loading 100000 rows. %d inserts sent (%d max id).\n", rows, i); } // if row limit is set, break if it's hit if (rows >= maxRows) { break; } i += jump; } rssThreadShouldStop.set(true); client.drain(); rssThread.join(); System.out.printf("Filled table %s with %d rows and now RSS=%dmb\n", table.m_extraMetadata.name, rows, rss.get()); }
From source file:com.joyent.manta.benchmark.Benchmark.java
/** * Method used to run a multi-threaded benchmark. * * @param method to measure/*from w ww. j av a 2s . co m*/ * @param path path to store benchmarking test data * @param iterations number of iterations to run * @param concurrency number of threads to run * @throws IOException thrown when we can't communicate with the server */ private static void multithreadedBenchmark(final String method, final String path, final int iterations, final int concurrency) throws IOException { final AtomicLong fullAggregation = new AtomicLong(0L); final AtomicLong serverAggregation = new AtomicLong(0L); final AtomicLong count = new AtomicLong(0L); final long perThreadCount = perThreadCount(iterations, concurrency); System.out.printf("Running %d iterations per thread\n", perThreadCount); final long testStart = System.nanoTime(); Runtime.getRuntime().addShutdownHook(new Thread(Benchmark::cleanUp)); final Callable<Void> worker = () -> { for (int i = 0; i < perThreadCount; i++) { Duration[] durations; if (method.equals("put")) { durations = measurePut(sizeInBytesOrNoOfDirs); } else if (method.equals("putDir")) { durations = measurePutDir(sizeInBytesOrNoOfDirs); } else { durations = measureGet(path); } long fullLatency = durations[0].toMillis(); long serverLatency = durations[1].toMillis(); fullAggregation.addAndGet(fullLatency); serverAggregation.addAndGet(serverLatency); System.out.printf("%s %d full=%dms, server=%dms, thread=%s\n", method, count.getAndIncrement(), fullLatency, serverLatency, Thread.currentThread().getName()); } return null; }; final Thread.UncaughtExceptionHandler handler = (t, e) -> LOG.error("Error when executing benchmark", e); final AtomicInteger threadCounter = new AtomicInteger(0); ThreadFactory threadFactory = r -> { Thread t = new Thread(r); t.setDaemon(true); t.setUncaughtExceptionHandler(handler); t.setName(String.format("benchmark-%d", threadCounter.incrementAndGet())); return t; }; ExecutorService executor = Executors.newFixedThreadPool(concurrency, threadFactory); List<Callable<Void>> workers = new ArrayList<>(concurrency); for (int i = 0; i < concurrency; i++) { workers.add(worker); } try { List<Future<Void>> futures = executor.invokeAll(workers); boolean completed = false; while (!completed) { try (Stream<Future<Void>> stream = futures.stream()) { completed = stream.allMatch((f) -> f.isDone() || f.isCancelled()); if (!completed) { Thread.sleep(CHECK_INTERVAL); } } } } catch (InterruptedException e) { return; } finally { System.err.println("Shutting down the thread pool"); executor.shutdown(); } final long testEnd = System.nanoTime(); final long fullAverage = Math.round(fullAggregation.get() / iterations); final long serverAverage = Math.round(serverAggregation.get() / iterations); final long totalTime = Duration.ofNanos(testEnd - testStart).toMillis(); System.out.printf("Average full latency: %d ms\n", fullAverage); System.out.printf("Average server latency: %d ms\n", serverAverage); System.out.printf("Total test time: %d ms\n", totalTime); System.out.printf("Total invocations: %d\n", count.get()); }
From source file:com.alibaba.druid.benckmark.pool.Case2.java
private void p0(final DataSource dataSource, String name, int threadCount) throws Exception { final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch endLatch = new CountDownLatch(threadCount); final AtomicLong blockedStat = new AtomicLong(); final AtomicLong waitedStat = new AtomicLong(); for (int i = 0; i < threadCount; ++i) { Thread thread = new Thread() { public void run() { try { startLatch.await();//from ww w.ja v a 2 s . co m long threadId = Thread.currentThread().getId(); long startBlockedCount, startWaitedCount; { ThreadInfo threadInfo = ManagementFactory.getThreadMXBean().getThreadInfo(threadId); startBlockedCount = threadInfo.getBlockedCount(); startWaitedCount = threadInfo.getWaitedCount(); } for (int i = 0; i < LOOP_COUNT; ++i) { Connection conn = dataSource.getConnection(); conn.close(); } ThreadInfo threadInfo = ManagementFactory.getThreadMXBean().getThreadInfo(threadId); long blockedCount = threadInfo.getBlockedCount() - startBlockedCount; long waitedCount = threadInfo.getWaitedCount() - startWaitedCount; blockedStat.addAndGet(blockedCount); waitedStat.addAndGet(waitedCount); } catch (Exception ex) { ex.printStackTrace(); } endLatch.countDown(); } }; thread.start(); } long startMillis = System.currentTimeMillis(); long startYGC = TestUtil.getYoungGC(); long startFullGC = TestUtil.getFullGC(); startLatch.countDown(); endLatch.await(); long millis = System.currentTimeMillis() - startMillis; long ygc = TestUtil.getYoungGC() - startYGC; long fullGC = TestUtil.getFullGC() - startFullGC; System.out.println("thread " + threadCount + " " + name + " millis : " + NumberFormat.getInstance().format(millis) + ", YGC " + ygc + " FGC " + fullGC + " blockedCount " + blockedStat.get() + " waitedCount " + waitedStat.get()); }
From source file:org.elasticsearch.test.ElasticsearchIntegrationTest.java
/** * Waits until at least a give number of document is visible for searchers * * @param numDocs number of documents to wait for * @param maxWaitTime if not progress have been made during this time, fail the test * @param maxWaitTimeUnit the unit in which maxWaitTime is specified * @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed. * This saves on unneeded searches. * @return the actual number of docs seen. * @throws InterruptedException//from w w w .j a va2s . c om */ public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit, final @Nullable BackgroundIndexer indexer) throws InterruptedException { final AtomicLong lastKnownCount = new AtomicLong(-1); long lastStartCount = -1; Predicate<Object> testDocs = new Predicate<Object>() { @Override public boolean apply(Object o) { if (indexer != null) { lastKnownCount.set(indexer.totalIndexedDocs()); } if (lastKnownCount.get() >= numDocs) { try { long count = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet() .getCount(); if (count == lastKnownCount.get()) { // no progress - try to refresh for the next time client().admin().indices().prepareRefresh().get(); } lastKnownCount.set(count); } catch (Throwable e) { // count now acts like search and barfs if all shards failed... logger.debug("failed to executed count", e); return false; } logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount.get(), numDocs); } else { logger.debug("[{}] docs indexed. waiting for [{}]", lastKnownCount.get(), numDocs); } return lastKnownCount.get() >= numDocs; } }; while (!awaitBusy(testDocs, maxWaitTime, maxWaitTimeUnit)) { if (lastStartCount == lastKnownCount.get()) { // we didn't make any progress fail("failed to reach " + numDocs + "docs"); } lastStartCount = lastKnownCount.get(); } return lastKnownCount.get(); }
From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java
@Override public boolean smove(final String source, final String destination, final String member) { Assert.hasText(source);//ww w . jav a 2 s . com Assert.hasText(destination); Assert.hasText(member); ShardedJedis jedis = null; try { jedis = POOL.getJedis(config.getRedisType()); final Collection<Jedis> allShards; if ((allShards = jedis.getAllShards()).size() == 1) { return isSuccess(allShards.iterator().next().smove(source, destination, member)); } else if (allShards.size() > 1) { final AtomicLong val = new AtomicLong(); allShards.parallelStream().forEach(shard -> { Pipeline pipeline = shard.pipelined(); pipeline.sismember(source, member); Response<Long> response = pipeline.smove(source, destination, member); pipeline.sync(); val.addAndGet(response.get()); }); if (val.get() > 0) { return true; } } return false; } catch (final Throwable e) { throw new RedisClientException(e.getMessage(), e); } finally { POOL.close(jedis); } }
From source file:org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable.java
/** * Iterate on the given 2 vocab words//w ww. j a v a2 s . co m * * @param w1 the first word to iterate on * @param w2 the second word to iterate on * @param nextRandom next random for sampling */ @Override @Deprecated public void iterateSample(T w1, T w2, AtomicLong nextRandom, double alpha) { if (w2 == null || w2.getIndex() < 0 || w1.getIndex() == w2.getIndex() || w1.getLabel().equals("STOP") || w2.getLabel().equals("STOP") || w1.getLabel().equals("UNK") || w2.getLabel().equals("UNK")) return; //current word vector INDArray l1 = this.syn0.slice(w2.getIndex()); //error for current word and context INDArray neu1e = Nd4j.create(vectorLength); for (int i = 0; i < w1.getCodeLength(); i++) { int code = w1.getCodes().get(i); int point = w1.getPoints().get(i); if (point >= syn0.rows() || point < 0) throw new IllegalStateException("Illegal point " + point); //other word vector INDArray syn1 = this.syn1.slice(point); double dot = Nd4j.getBlasWrapper().dot(l1, syn1); if (dot < -MAX_EXP || dot >= MAX_EXP) continue; int idx = (int) ((dot + MAX_EXP) * ((double) expTable.length / MAX_EXP / 2.0)); if (idx >= expTable.length) continue; //score double f = expTable[idx]; //gradient double g = useAdaGrad ? w1.getGradient(i, (1 - code - f), lr.get()) : (1 - code - f) * alpha; Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, syn1, neu1e); Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, l1, syn1); } int target = w1.getIndex(); int label; //negative sampling if (negative > 0) for (int d = 0; d < negative + 1; d++) { if (d == 0) label = 1; else { nextRandom.set(nextRandom.get() * 25214903917L + 11); int idx = Math.abs((int) (nextRandom.get() >> 16) % table.length()); target = table.getInt(idx); if (target <= 0) target = (int) nextRandom.get() % (vocab.numWords() - 1) + 1; if (target == w1.getIndex()) continue; label = 0; } if (target >= syn1Neg.rows() || target < 0) continue; double f = Nd4j.getBlasWrapper().dot(l1, syn1Neg.slice(target)); double g; if (f > MAX_EXP) g = useAdaGrad ? w1.getGradient(target, (label - 1), alpha) : (label - 1) * alpha; else if (f < -MAX_EXP) g = label * (useAdaGrad ? w1.getGradient(target, alpha, alpha) : alpha); else g = useAdaGrad ? w1.getGradient(target, label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))], alpha) : (label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))]) * alpha; if (syn0.data().dataType() == DataBuffer.Type.DOUBLE) Nd4j.getBlasWrapper().axpy(g, syn1Neg.slice(target), neu1e); else Nd4j.getBlasWrapper().axpy((float) g, syn1Neg.slice(target), neu1e); if (syn0.data().dataType() == DataBuffer.Type.DOUBLE) Nd4j.getBlasWrapper().axpy(g, l1, syn1Neg.slice(target)); else Nd4j.getBlasWrapper().axpy((float) g, l1, syn1Neg.slice(target)); } if (syn0.data().dataType() == DataBuffer.Type.DOUBLE) Nd4j.getBlasWrapper().axpy(1.0, neu1e, l1); else Nd4j.getBlasWrapper().axpy(1.0f, neu1e, l1); }
From source file:io.warp10.continuum.egress.EgressFetchHandler.java
private void packedDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean dedup, boolean signed, AtomicReference<Metadata> lastMeta, AtomicLong lastCount, int maxDecoderLen, String classSuffix, long chunksize, boolean sortMeta) throws IOException { String name = null;/* www . java 2s.c o m*/ Map<String, String> labels = null; StringBuilder sb = new StringBuilder(); Metadata lastMetadata = lastMeta.get(); long currentCount = lastCount.get(); List<GTSEncoder> encoders = new ArrayList<GTSEncoder>(); while (iter.hasNext()) { GTSDecoder decoder = iter.next(); if (dedup) { decoder = decoder.dedup(); } if (!decoder.next()) { continue; } long toDecodeCount = Long.MAX_VALUE; if (timespan < 0) { Metadata meta = decoder.getMetadata(); if (!meta.equals(lastMetadata)) { lastMetadata = meta; currentCount = 0; } toDecodeCount = Math.max(0, -timespan - currentCount); } GTSEncoder encoder = decoder.getEncoder(true); // // Only display the class + labels if they have changed since the previous GTS // Map<String, String> lbls = decoder.getLabels(); // // Compute the name // name = decoder.getName(); labels = lbls; sb.setLength(0); GTSHelper.encodeName(sb, name + classSuffix); sb.append("{"); boolean first = true; if (sortMeta) { lbls = new TreeMap<String, String>(lbls); } for (Entry<String, String> entry : lbls.entrySet()) { // // Skip owner/producer labels and any other 'private' labels // if (!signed) { if (Constants.PRODUCER_LABEL.equals(entry.getKey())) { continue; } if (Constants.OWNER_LABEL.equals(entry.getKey())) { continue; } } if (!first) { sb.append(","); } GTSHelper.encodeName(sb, entry.getKey()); sb.append("="); GTSHelper.encodeName(sb, entry.getValue()); first = false; } sb.append("}"); // We treat the case where encoder.getCount() is 0 in a special way // as this may be because the encoder was generated from a partly // consumed decoder and thus its count was reset to 0 if (0 == encoder.getCount() || encoder.getCount() > toDecodeCount) { // We have too much data, shrink the encoder GTSEncoder enc = new GTSEncoder(); enc.safeSetMetadata(decoder.getMetadata()); while (decoder.next() && toDecodeCount > 0) { enc.addValue(decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(), decoder.getValue()); toDecodeCount--; } encoder = enc; } if (timespan < 0) { currentCount += encoder.getCount(); } encoders.clear(); // // Add encoders per chunk // GTSDecoder chunkdec = encoder.getDecoder(true); GTSEncoder chunkenc = null; Long lastchunk = null; if (Long.MAX_VALUE == chunksize) { encoders.add(encoder); } else { while (chunkdec.next()) { long ts = chunkdec.getTimestamp(); long chunk = ts >= 0 ? ts / chunksize : ((ts + 1) / chunksize) - 1; // // If it is the first chunk or we changed chunk, create a new encoder // if (null == chunkenc || (null != lastchunk && chunk != lastchunk)) { chunkenc = new GTSEncoder(0L); chunkenc.setMetadata(encoder.getMetadata()); encoders.add(chunkenc); } lastchunk = chunk; chunkenc.addValue(ts, chunkdec.getLocation(), chunkdec.getElevation(), chunkdec.getValue()); } } while (!encoders.isEmpty()) { encoder = encoders.remove(0); if (encoder.size() > 0) { // // Determine most recent timestamp // GTSDecoder dec = encoder.getDecoder(true); dec.next(); long timestamp = dec.getTimestamp(); // // Build GTSWrapper // encoder.setMetadata(new Metadata()); // Clear labels encoder.setName(""); encoder.setLabels(new HashMap<String, String>()); encoder.getMetadata().setAttributes(new HashMap<String, String>()); GTSWrapper wrapper = GTSWrapperHelper.fromGTSEncoderToGTSWrapper(encoder, true); TSerializer ser = new TSerializer(new TCompactProtocol.Factory()); byte[] serialized; try { serialized = ser.serialize(wrapper); } catch (TException te) { throw new IOException(te); } // // Check the size of the generatd wrapper. If it is over 75% of maxDecoderLen, // split the original encoder in two // if (serialized.length >= Math.floor(0.75D * maxDecoderLen) && encoder.getCount() > 2) { GTSEncoder split = new GTSEncoder(0L); split.setMetadata(encoder.getMetadata()); List<GTSEncoder> splits = new ArrayList<GTSEncoder>(); splits.add(split); int threshold = encoder.size() / 2; GTSDecoder deco = encoder.getDecoder(true); while (deco.next()) { split.addValue(deco.getTimestamp(), deco.getLocation(), deco.getElevation(), deco.getValue()); if (split.size() > threshold) { split = new GTSEncoder(0L); splits.add(split); } } // // Now insert the splits at the beginning of 'encoders' // for (int i = splits.size() - 1; i >= 0; i--) { encoders.add(0, splits.get(i)); } continue; } if (serialized.length > Math.ceil(0.75D * maxDecoderLen)) { throw new IOException( "Encountered a value whose length is above the configured threshold of " + maxDecoderLen); } pw.print(timestamp); pw.print("//"); pw.print(encoder.getCount()); pw.print(" "); pw.print(sb.toString()); pw.print(" '"); OrderPreservingBase64.encodeToWriter(serialized, pw); pw.print("'"); pw.write('\r'); pw.write('\n'); } } } lastMeta.set(lastMetadata); lastCount.set(currentCount); }