List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:org.apache.hadoop.hbase.io.hfile.LruBlockCache.java
/** * Configurable constructor. Use this constructor if not using defaults. * @param maxSize maximum size of this cache, in bytes * @param blockSize expected average size of blocks, in bytes * @param evictionThread whether to run evictions in a bg thread or not * @param mapInitialSize initial size of backing ConcurrentHashMap * @param mapLoadFactor initial load factor of backing ConcurrentHashMap * @param mapConcurrencyLevel initial concurrency factor for backing CHM * @param minFactor percentage of total size that eviction will evict until * @param acceptableFactor percentage of total size that triggers eviction * @param singleFactor percentage of total size for single-access blocks * @param multiFactor percentage of total size for multiple-access blocks * @param memoryFactor percentage of total size for in-memory blocks *//* w w w. j av a 2 s .co m*/ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor, float singleFactor, float multiFactor, float memoryFactor, boolean forceInMemory) { if (singleFactor + multiFactor + memoryFactor != 1 || singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) { throw new IllegalArgumentException( "Single, multi, and memory factors " + " should be non-negative and total 1.0"); } if (minFactor >= acceptableFactor) { throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor"); } if (minFactor >= 1.0f || acceptableFactor >= 1.0f) { throw new IllegalArgumentException("all factors must be < 1"); } this.maxSize = maxSize; this.blockSize = blockSize; this.forceInMemory = forceInMemory; map = new ConcurrentHashMap<BlockCacheKey, CachedBlock>(mapInitialSize, mapLoadFactor, mapConcurrencyLevel); this.minFactor = minFactor; this.acceptableFactor = acceptableFactor; this.singleFactor = singleFactor; this.multiFactor = multiFactor; this.memoryFactor = memoryFactor; this.stats = new CacheStats(); this.count = new AtomicLong(0); this.elements = new AtomicLong(0); this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel); this.size = new AtomicLong(this.overhead); if (evictionThread) { this.evictionThread = new EvictionThread(this); this.evictionThread.start(); // FindBugs SC_START_IN_CTOR } else { this.evictionThread = null; } this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, statThreadPeriod, TimeUnit.SECONDS); }
From source file:de.hybris.platform.jdbcwrapper.ConnectionPoolTest.java
@Test public void testReturnWhenClosed() { HybrisDataSource dataSource = null;//from ww w . ja v a 2 s.com TestThreadsHolder threadsHolder = null; try { final Collection<TestConnectionImpl> allConnections = new ConcurrentLinkedQueue<TestConnectionImpl>(); final AtomicLong connectionCounter = new AtomicLong(0); dataSource = createDataSource(Registry.getCurrentTenantNoFallback(), allConnections, connectionCounter, false, false); final int maxConnections = dataSource.getMaxAllowedPhysicalOpen(); final CountDownLatch allFetched = new CountDownLatch(maxConnections); final CountDownLatch release = new CountDownLatch(1); final Runnable runnable = new GetOneConnectionRunnable(dataSource, allFetched, release); threadsHolder = new TestThreadsHolder(maxConnections, runnable); threadsHolder.startAll(); assertTrue(allFetched.await(10, TimeUnit.SECONDS)); LOG.info("all connection fetched"); assertEquals(maxConnections, dataSource.getNumInUse()); assertEquals(maxConnections, dataSource.getNumPhysicalOpen()); assertEquals(maxConnections, dataSource.getMaxInUse()); assertEquals(maxConnections, dataSource.getMaxPhysicalOpen()); // kill data source dataSource.destroy(); assertTrue(dataSource.getConnectionPool().isPoolClosed()); LOG.info("data source destroyed"); // test get error try { dataSource.getConnection(); fail("SQLExcpetion expected after destroy()"); } catch (final SQLException e) { // fine LOG.info("no new connection allowed"); } // check stats again -> should not have changed assertEquals(maxConnections, dataSource.getNumInUse()); assertEquals(maxConnections, dataSource.getNumPhysicalOpen()); assertEquals(maxConnections, dataSource.getMaxInUse()); assertEquals(maxConnections, dataSource.getMaxPhysicalOpen()); // now let all threads return their connections release.countDown(); LOG.info("all threads close connections now..."); threadsHolder.waitForAll(10, TimeUnit.SECONDS); LOG.info("all threads died"); assertTrue(waitForAllInactive(dataSource.getConnectionPool(), 10, TimeUnit.SECONDS)); // check final stats assertEquals(0, dataSource.getNumInUse()); assertEquals(0, dataSource.getNumPhysicalOpen()); assertEquals(maxConnections, dataSource.getMaxInUse()); assertEquals(maxConnections, dataSource.getMaxPhysicalOpen()); final Stats stats = getStats(allConnections); // make sure all connections have been finally closed assertEquals(0, stats.open); } catch (final InterruptedException e) { // ok } finally { stopThreads(threadsHolder); destroyDataSource(dataSource); } }
From source file:com.facebook.presto.accumulo.index.Indexer.java
private void addIndexMutation(ByteBuffer row, ByteBuffer family, ColumnVisibility visibility, byte[] qualifier) { // Create the mutation and add it to the batch writer Mutation indexMutation = new Mutation(row.array()); indexMutation.put(family.array(), qualifier, visibility, EMPTY_BYTES); try {//www .j a v a 2s .c om indexWriter.addMutation(indexMutation); } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation rejected by server", e); } // Increment the cardinality metrics for this value of index // metrics is a mapping of row ID to column family MetricsKey key = new MetricsKey(row, family, visibility); AtomicLong count = metrics.get(key); if (count == null) { count = new AtomicLong(0); metrics.put(key, count); } count.incrementAndGet(); }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java
/** * Just write multiple logs then split. Before fix for HADOOP-2283, this * would fail./*from ww w. j ava 2s . c o m*/ * @throws IOException */ @Test public void testSplit() throws IOException { final TableName tableName = TableName.valueOf(getName()); final byte[] rowName = tableName.getName(); Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME); HLog log = HLogFactory.createHLog(fs, hbaseDir, HConstants.HREGION_LOGDIR_NAME, conf); final int howmany = 3; HRegionInfo[] infos = new HRegionInfo[3]; Path tabledir = FSUtils.getTableDir(hbaseDir, tableName); fs.mkdirs(tabledir); for (int i = 0; i < howmany; i++) { infos[i] = new HRegionInfo(tableName, Bytes.toBytes("" + i), Bytes.toBytes("" + (i + 1)), false); fs.mkdirs(new Path(tabledir, infos[i].getEncodedName())); LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString()); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("column")); // Add edits for three regions. final AtomicLong sequenceId = new AtomicLong(1); try { for (int ii = 0; ii < howmany; ii++) { for (int i = 0; i < howmany; i++) { for (int j = 0; j < howmany; j++) { WALEdit edit = new WALEdit(); byte[] family = Bytes.toBytes("column"); byte[] qualifier = Bytes.toBytes(Integer.toString(j)); byte[] column = Bytes.toBytes("column:" + Integer.toString(j)); edit.add(new KeyValue(rowName, family, qualifier, System.currentTimeMillis(), column)); LOG.info("Region " + i + ": " + edit); log.append(infos[i], tableName, edit, System.currentTimeMillis(), htd, sequenceId); } } log.rollWriter(); } log.close(); List<Path> splits = HLogSplitter.split(hbaseDir, logdir, oldLogDir, fs, conf); verifySplits(splits, howmany); log = null; } finally { if (log != null) { log.closeAndDelete(); } } }
From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java
/** * It gives the row count, by summing up the individual results obtained from * regions. In case the qualifier is null, FirstKeyValueFilter is used to * optimised the operation. In case qualifier is provided, I can't use the * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be * counted ==> an error./*from ww w .j a v a 2 s. co m*/ * @param table * @param ci * @param scan * @return <R, S> * @throws Throwable */ public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true); class RowNumCallback implements Batch.Callback<Long> { private final AtomicLong rowCountL = new AtomicLong(0); public long getRowNumCount() { return rowCountL.get(); } @Override public void update(byte[] region, byte[] row, Long result) { rowCountL.addAndGet(result.longValue()); } } RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<AggregateService, Long>() { @Override public Long call(AggregateService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>(); instance.getRowNum(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); bb.rewind(); return bb.getLong(); } }, rowNum); return rowNum.getRowNumCount(); }
From source file:org.apache.nifi.processors.kite.ConvertAvroSchema.java
@Override public void onTrigger(ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile incomingAvro = session.get(); if (incomingAvro == null) { return;/*from w w w . j av a 2 s .c o m*/ } String inputSchemaProperty = context.getProperty(INPUT_SCHEMA).evaluateAttributeExpressions(incomingAvro) .getValue(); final Schema inputSchema; try { inputSchema = getSchema(inputSchemaProperty, DefaultConfiguration.get()); } catch (SchemaNotFoundException e) { getLogger().error("Cannot find schema: " + inputSchemaProperty); session.transfer(incomingAvro, FAILURE); return; } String outputSchemaProperty = context.getProperty(OUTPUT_SCHEMA).evaluateAttributeExpressions(incomingAvro) .getValue(); final Schema outputSchema; try { outputSchema = getSchema(outputSchemaProperty, DefaultConfiguration.get()); } catch (SchemaNotFoundException e) { getLogger().error("Cannot find schema: " + outputSchemaProperty); session.transfer(incomingAvro, FAILURE); return; } final Map<String, String> fieldMapping = new HashMap<>(); for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) { if (entry.getKey().isDynamic()) { fieldMapping.put(entry.getKey().getName(), entry.getValue()); } } // Set locale final String localeProperty = context.getProperty(LOCALE).getValue(); final Locale locale = localeProperty.equals(DEFAULT_LOCALE_VALUE) ? Locale.getDefault() : LocaleUtils.toLocale(localeProperty); final AvroRecordConverter converter = new AvroRecordConverter(inputSchema, outputSchema, fieldMapping, locale); final DataFileWriter<Record> writer = new DataFileWriter<>( AvroUtil.newDatumWriter(outputSchema, Record.class)); writer.setCodec(getCodecFactory(context.getProperty(COMPRESSION_TYPE).getValue())); final DataFileWriter<Record> failureWriter = new DataFileWriter<>( AvroUtil.newDatumWriter(outputSchema, Record.class)); failureWriter.setCodec(getCodecFactory(context.getProperty(COMPRESSION_TYPE).getValue())); try { final AtomicLong written = new AtomicLong(0L); final FailureTracker failures = new FailureTracker(); final List<Record> badRecords = Lists.newLinkedList(); FlowFile incomingAvroCopy = session.clone(incomingAvro); FlowFile outgoingAvro = session.write(incomingAvro, new StreamCallback() { @Override public void process(InputStream in, OutputStream out) throws IOException { try (DataFileStream<Record> stream = new DataFileStream<Record>(in, new GenericDatumReader<Record>(converter.getInputSchema()))) { try (DataFileWriter<Record> w = writer.create(outputSchema, out)) { for (Record record : stream) { try { Record converted = converter.convert(record); w.append(converted); written.incrementAndGet(); } catch (AvroConversionException e) { failures.add(e); getLogger().error("Error converting data: " + e.getMessage()); badRecords.add(record); } } } } } }); FlowFile badOutput = session.write(incomingAvroCopy, new StreamCallback() { @Override public void process(InputStream in, OutputStream out) throws IOException { try (DataFileWriter<Record> w = failureWriter.create(inputSchema, out)) { for (Record record : badRecords) { w.append(record); } } } }); long errors = failures.count(); // update only if file transfer is successful session.adjustCounter("Converted records", written.get(), false); // update only if file transfer is successful session.adjustCounter("Conversion errors", errors, false); if (written.get() > 0L) { session.transfer(outgoingAvro, SUCCESS); } else { session.remove(outgoingAvro); if (errors == 0L) { badOutput = session.putAttribute(badOutput, "errors", "No incoming records"); session.transfer(badOutput, FAILURE); } } if (errors > 0L) { getLogger().warn("Failed to convert {}/{} records between Avro Schemas", new Object[] { errors, errors + written.get() }); badOutput = session.putAttribute(badOutput, "errors", failures.summary()); session.transfer(badOutput, FAILURE); } else { session.remove(badOutput); } } catch (ProcessException | DatasetIOException e) { getLogger().error("Failed reading or writing", e); session.transfer(incomingAvro, FAILURE); } catch (DatasetException e) { getLogger().error("Failed to read FlowFile", e); session.transfer(incomingAvro, FAILURE); } finally { try { writer.close(); } catch (IOException e) { getLogger().warn("Unable to close writer ressource", e); } try { failureWriter.close(); } catch (IOException e) { getLogger().warn("Unable to close writer ressource", e); } } }
From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorageTest.java
@Test public void testShellCommands() throws Exception { interleavedStorage.flush();//from w w w.j a v a2 s .c om interleavedStorage.shutdown(); final Pattern entryPattern = Pattern .compile("entry (?<entry>\\d+)\t:\t((?<na>N/A)|\\(log:(?<logid>\\d+), pos: (?<pos>\\d+)\\))"); class Metadata { final Pattern keyPattern = Pattern.compile("master key +: ([0-9a-f])"); final Pattern sizePattern = Pattern.compile("size +: (\\d+)"); final Pattern entriesPattern = Pattern.compile("entries +: (\\d+)"); final Pattern isFencedPattern = Pattern.compile("isFenced +: (\\w+)"); public String masterKey; public long size = -1; public long entries = -1; public boolean foundFenced = false; void check(String s) { Matcher keyMatcher = keyPattern.matcher(s); if (keyMatcher.matches()) { masterKey = keyMatcher.group(1); return; } Matcher sizeMatcher = sizePattern.matcher(s); if (sizeMatcher.matches()) { size = Long.valueOf(sizeMatcher.group(1)); return; } Matcher entriesMatcher = entriesPattern.matcher(s); if (entriesMatcher.matches()) { entries = Long.valueOf(entriesMatcher.group(1)); return; } Matcher isFencedMatcher = isFencedPattern.matcher(s); if (isFencedMatcher.matches()) { Assert.assertEquals("true", isFencedMatcher.group(1)); foundFenced = true; return; } } void validate(long foundEntries) { Assert.assertTrue(entries >= numWrites * entriesPerWrite); Assert.assertEquals(entries, foundEntries); Assert.assertTrue(foundFenced); Assert.assertNotEquals(-1, size); } } final Metadata foundMetadata = new Metadata(); AtomicLong curEntry = new AtomicLong(0); AtomicLong someEntryLogger = new AtomicLong(-1); BookieShell shell = new BookieShell(LedgerIdFormatter.LONG_LEDGERID_FORMATTER, EntryFormatter.STRING_FORMATTER) { @Override void printInfoLine(String s) { Matcher matcher = entryPattern.matcher(s); System.out.println(s); if (matcher.matches()) { assertEquals(Long.toString(curEntry.get()), matcher.group("entry")); if (matcher.group("na") == null) { String logId = matcher.group("logid"); Assert.assertNotEquals(matcher.group("logid"), null); Assert.assertNotEquals(matcher.group("pos"), null); Assert.assertTrue((curEntry.get() % entriesPerWrite) == 0); Assert.assertTrue(curEntry.get() <= numWrites * entriesPerWrite); if (someEntryLogger.get() == -1) { someEntryLogger.set(Long.valueOf(logId)); } } else { Assert.assertEquals(matcher.group("logid"), null); Assert.assertEquals(matcher.group("pos"), null); Assert.assertTrue(((curEntry.get() % entriesPerWrite) != 0) || ((curEntry.get() >= (entriesPerWrite * numWrites)))); } curEntry.incrementAndGet(); } else { foundMetadata.check(s); } } }; shell.setConf(conf); int res = shell.run(new String[] { "ledger", "-m", "0" }); Assert.assertEquals(0, res); Assert.assertTrue(curEntry.get() >= numWrites * entriesPerWrite); foundMetadata.validate(curEntry.get()); // Should pass consistency checker res = shell.run(new String[] { "localconsistencycheck" }); Assert.assertEquals(0, res); // Remove a logger EntryLogger entryLogger = new EntryLogger(conf); entryLogger.removeEntryLog(someEntryLogger.get()); // Should fail consistency checker res = shell.run(new String[] { "localconsistencycheck" }); Assert.assertEquals(1, res); }
From source file:com.indeed.lsmtree.recordcache.PersistentRecordCache.java
/** * Performs lookup for multiple keys and returns a streaming iterator to results. * Each element in the iterator is one of * (1) an exception associated with a single lookup * (2) a key value tuple/*www . jav a 2s . co m*/ * * @param keys lookup keys * @param progress (optional) an AtomicInteger for tracking progress * @param skipped (optional) an AtomicInteger for tracking missing keys * @return iterator of lookup results */ public Iterator<Either<Exception, P2<K, V>>> getStreaming(final @Nonnull Iterator<K> keys, final @Nullable AtomicInteger progress, final @Nullable AtomicInteger skipped) { log.info("starting store lookups"); LongArrayList addressList = new LongArrayList(); int notFound = 0; while (keys.hasNext()) { final K key = keys.next(); final Long address; try { address = index.get(key); } catch (IOException e) { log.error("error", e); return Iterators.singletonIterator(Left.<Exception, P2<K, V>>of(new IndexReadException(e))); } if (address != null) { addressList.add(address); } else { notFound++; } } if (progress != null) progress.addAndGet(notFound); if (skipped != null) skipped.addAndGet(notFound); log.info("store lookups complete, sorting addresses"); final long[] addresses = addressList.elements(); Arrays.sort(addresses, 0, addressList.size()); log.info("initializing store lookup iterator"); final BlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(100); final Iterator<List<Long>> iterable = Iterators.partition(addressList.iterator(), 1000); final ExecutorService primerThreads = new ThreadPoolExecutor(10, 10, 0L, TimeUnit.MILLISECONDS, taskQueue, new NamedThreadFactory("store priming thread", true, log), new RejectedExecutionHandler() { @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { taskQueue.put(r); } catch (InterruptedException e) { log.error("error", e); throw new RuntimeException(e); } } }); final BlockingQueue<List<Either<Exception, P2<K, V>>>> completionQueue = new ArrayBlockingQueue<List<Either<Exception, P2<K, V>>>>( 10); final AtomicLong runningTasks = new AtomicLong(0); final AtomicBoolean taskSubmitterRunning = new AtomicBoolean(true); new Thread(new Runnable() { @Override public void run() { while (iterable.hasNext()) { runningTasks.incrementAndGet(); final List<Long> addressesSublist = iterable.next(); primerThreads.submit(new FutureTask<List<Either<Exception, P2<K, V>>>>( new RecordLookupTask(addressesSublist)) { @Override protected void done() { try { final List<Either<Exception, P2<K, V>>> results = get(); if (progress != null) { progress.addAndGet(results.size()); } completionQueue.put(results); } catch (InterruptedException e) { log.error("error", e); throw new RuntimeException(e); } catch (ExecutionException e) { log.error("error", e); throw new RuntimeException(e); } } }); } taskSubmitterRunning.set(false); } }, "RecordLookupTaskSubmitterThread").start(); return new Iterator<Either<Exception, P2<K, V>>>() { Iterator<Either<Exception, P2<K, V>>> currentIterator; @Override public boolean hasNext() { if (currentIterator != null && currentIterator.hasNext()) return true; while (taskSubmitterRunning.get() || runningTasks.get() > 0) { try { final List<Either<Exception, P2<K, V>>> list = completionQueue.poll(1, TimeUnit.SECONDS); if (list != null) { log.debug("remaining: " + runningTasks.decrementAndGet()); currentIterator = list.iterator(); if (currentIterator.hasNext()) return true; } } catch (InterruptedException e) { log.error("error", e); throw new RuntimeException(e); } } primerThreads.shutdown(); return false; } @Override public Either<Exception, P2<K, V>> next() { return currentIterator.next(); } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
From source file:com.facebook.presto.accumulo.index.Indexer.java
/** * Flushes all Mutations in the index writer. And all metric mutations to the metrics table. * Note that the metrics table is not updated until this method is explicitly called (or implicitly via close). *//*from w w w . ja v a2 s . c o m*/ public void flush() { try { // Flush index writer indexWriter.flush(); // Write out metrics mutations BatchWriter metricsWriter = connector.createBatchWriter(table.getMetricsTableName(), writerConfig); metricsWriter.addMutations(getMetricsMutations()); metricsWriter.close(); // Re-initialize the metrics metrics.clear(); metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0)); } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation was rejected by server on flush", e); } catch (TableNotFoundException e) { throw new PrestoException(ACCUMULO_TABLE_DNE, "Accumulo table does not exist", e); } }
From source file:dk.dma.ais.store.FileExportRest.java
/** {@inheritDoc} */ @Override/* w w w . j a v a 2s . c o m*/ protected void run(Injector injector) throws Exception { printAisStoreNL("AIS STORE COMMAND LINE TOOL INITIATED"); printAisStoreLine(); // Hardcoded values // interval = "2015-1-5T14:00:00Z/2015-1-5T14:10:00Z"; // java -jar ais-store-cli-0.3-SNAPSHOT.jar export -area 15,-18,-10,14 -filter // "m.country=DNK & t.pos within bbox(15,-18,-10,14) & (t.lat<-0.3|t.lat>0.3) & (t.lon<-0.3|t.lon>0.3)" -fetchSize 30000 // -interval // java -jar ais-store-cli-0.3-SNAPSHOT.jar export -area 15,-18,-10,14 -filter // "m.country=DNK & t.pos within bbox(15,-18,-10,14) & (t.lat<-0.3|t.lat>0.3) & (t.lon<-0.3|t.lon>0.3)" -fetchSize 30000 // -interval 2015-1-5T14:00:00Z/2015-1-5T14:10:00Z // Create request String request = ""; if (interval == null || interval.equals("")) { printAisStoreNL("No Interval provided, please check your request."); // return; terminateAndPrintHelp(); } try { intervalVar = Interval.parse(interval); } catch (Exception e) { printAisStoreNL("Invalid Interval provided, please check your request."); terminateAndPrintHelp(); } // intervalVar = DateTimeUtil.toInterval(intervalStr); intervalStartTime = intervalVar.getStartMillis(); String intervalStr = interval.toString(); request = request + "?interval=" + interval; // System.out.println("Interval parsed correct " + intervalStr); // Check if interval is valid, throw exception etc // Create task for exception throwing if args are required // If error, throw exception of error description then -help if (mmsis.size() > 0) { request = request + "&mmsi="; for (int i = 0; i < mmsis.size() - 1; i++) { request = request + Integer.toString(mmsis.get(i)) + ","; } request = request + mmsis.get(mmsis.size() - 1); } // filter // Google URL Encoder // "t.name like H* & t.name like HAMLET" // CHeck if url filter is valid, then url encode it and add to request // filter = "t.name like H* & t.name like HAMLET"; // filter = "s.country in (DNK)"; if (filter != null && !filter.equals("")) { String encodedFilter = URLEncoder.encode(filter, "UTF-8"); try { AisPacketFilters.parseExpressionFilter(filter); } catch (Exception e) { printAisStoreNL("Invalid filter expression"); terminateAndPrintHelp(); } request = request + "&filter=" + encodedFilter; } // area // "&box=lat1,lon1,lat2,lon2" // blabla check if valid if (area != null && !area.equals("")) { request = request + "&box=" + area; } // If table, make sure column is added if ((columns == null || columns.equals("")) && (outputFormat.equals("table") || outputFormat.equals("jsonObject"))) { printAisStoreNL("When using outputFormat " + outputFormat + ", columns are required"); terminateAndPrintHelp(); } try { sink = AisPacketOutputSinks.getOutputSink(outputFormat, columns, separator); request = request + "&outputFormat=" + outputFormat; } catch (Exception e) { printAisStoreNL("Invalid output format provided, " + outputFormat + ", please check your request."); terminateAndPrintHelp(); } // if table do shit // columns \/ REQUIRED // "columns=mmsi;time;timestamp" // Check if valid // Split on ";" // "columns=<listElement0>:list1" if (columns != null) { request = request + "&columns=" + columns; } // seperator // "seperator=\t" // Url encode and add if (separator != null || !separator.equals("")) { String encodedSeparator = URLEncoder.encode(separator, "UTF-8"); request = request + "&seperator=" + encodedSeparator; } // fetchSize if (fetchSize != -1) { request = request + "&fetchsize=" + fetchSize; } // Get path from request, if none it will store in root of ais store client // filePath = "C:\\AisStoreData\\"; if (filePath == null) { filePath = ""; } else { filePath = filePath + "/"; } // No filename provided, generate unique based on request parameters if (fileName == null || fileName.equals("")) { MessageDigest md5 = MessageDigest.getInstance("MD5"); String hex = (new HexBinaryAdapter()).marshal(md5.digest(request.getBytes())); fileName = hex; } // Generate unique hashsum based on request metaFileName = fileName + ".aisstore"; // boolean isTryResume = true; // If we are trying to resume, don't override previous file try { fileOutputStream = new FileOutputStream(filePath + fileName, !forceDownload); } catch (Exception e) { printAisStoreNL("Error occuring writing to disk, make sure the folder path exists "); terminateAndPrintHelp(); } outputStream = new BufferedOutputStream(fileOutputStream); // Should we resume anything // We have read the file // If the file exists that means a previous transaction has been done // Do we resume, if we do, we need to find the resume point ie move the start interval /** * System.out.println("Test Compare"); System.out.println(intervalStr); * * DateTime time = new DateTime(interval.getStartMillis(), DateTimeZone.UTC); DateTime time2 = new * DateTime(interval.getEndMillis(), DateTimeZone.UTC); * * String newIntervalStr = dateTimeFormatter.withZoneUTC().print(time) + "/" + dateTimeFormatter.withZoneUTC().print(time2); * System.out.println(newIntervalStr); // DateTime dateTime = * dateTimeFormatter.parseDateTime("15-Oct-2013 11:34:26 AM").withZone(DateTimeZone.UTC); * * // System.out.println(dateTime); * * // Interval var = Interval.parse(intervalStr); // String dateStr = formatter.withZone(DateTimeZone.UTC).print(dateTime1); * // * * System.exit(0); **/ printAisStoreNL("Request generation complete."); printAisStoreNL("AIS Data will be saved to " + filePath + fileName); // System.out.println("--------------------------------------------------------------------------------"); // We are resuming, insert a Carriage Return Line Feed if (!forceDownload) { // Load the meta data in readMeta(); // We have processed some packages already if (packageCount != 0) { String str = "\r\n"; outputStream.write(str.getBytes()); printAisStoreLine(); printAisStoreNL("Resume detected - Updating Request"); // System.out.println("From " + intervalStr); // Update intervalStr DateTime time = new DateTime(lastLoadedTimestamp, DateTimeZone.UTC); DateTime time2 = new DateTime(intervalVar.getEndMillis(), DateTimeZone.UTC); intervalStr = dateTimeFormatter.withZoneUTC().print(time) + "/" + dateTimeFormatter.withZoneUTC().print(time2); // System.out.println("To " + intervalStr); printAisStoreLine(); // System.out.println("The last stored timestamp was \n" + lastLoadedTimestamp); // Interval interval2 = DateTimeUtil.toInterval(intervalStr); // System.out.println(interval2.getStartMillis()); } else { writeMetaInit(intervalVar.getStartMillis()); lastLoadedTimestamp = intervalVar.getStartMillis(); } } else { // We are starting a new request, create a new meta init writeMetaInit(intervalVar.getStartMillis()); lastLoadedTimestamp = intervalVar.getStartMillis(); } // System.out.println("Interval Str is " + intervalStr); // System.exit(0); // Initialize counter = new AtomicLong(packageCount); // Do we need to set a new interval start based on the meta data read? DefaultHttpClient httpClient = new DefaultHttpClient(); HttpHost target = new HttpHost("ais2.e-navigation.net", 443, "https"); request = "/aisview/rest/store/query" + request; HttpGet getRequest = new HttpGet(request); // HttpGet getRequest = new // HttpGet("/aisview/rest/store/query?interval=2015-1-1T10:00:00Z/2015-2-1T10:10:00Z&box=65.145,-5.373,34.450,76.893"); // + "&mmsi=219230000" // + "&mmsi=219230000" printAisStoreNL("Executing request to " + target); printAisStoreNL("Request is: " + request); HttpResponse httpResponse = httpClient.execute(target, getRequest); HttpEntity entity = httpResponse.getEntity(); // Check we have an OK from server etc. printAisStoreLine(); boolean terminateFailure = false; StatusLine reply = httpResponse.getStatusLine(); switch (reply.getStatusCode()) { case HttpStatus.SC_OK: printAisStoreNL("Server Accepted Connection, download will begin shortly"); printAisStoreLine(); break; default: printAisStoreNL("An error occured establishing connection to the server. "); printAisStoreNL( "Server returned Status Code " + reply.getStatusCode() + " with " + reply.getReasonPhrase()); terminateFailure = true; break; } if (terminateFailure) { return; } // System.out.println("Got reply " + reply.getReasonPhrase() + " status code " + reply.getStatusCode()); // String httpServerReply = httpResponse.getStatusLine(); // System.out.println(httpResponse.getStatusLine()); // // Header[] headers = httpResponse.getAllHeaders(); // for (int i = 0; i < headers.length; i++) { // System.out.println(headers[i]); // } // Do we use the footer? AisReader aisReader; if (entity != null) { InputStream inputStream = entity.getContent(); aisReader = aisReadWriter(inputStream); aisReader.start(); aisReader.join(); // Write the remainder still stored in buffer, update the final meta data with the finished data writeMetaUpdate(currentTimeStamp, counter.get()); // Write the footer sink.footer(outputStream, counter.get()); // Closer and flush the buffer outputStream.flush(); outputStream.close(); // Close and flush the file stream fileOutputStream.flush(); fileOutputStream.close(); } // print a new line to move on from previous /r printAisStoreNL( "Downloading AIS Data 100% Estimated Time Left: 00:00:00 "); printAisStoreLine(); printAisStoreNL("DOWNLOAD SUCCESS"); printAisStoreLine(); // We know current time long currentTime = System.currentTimeMillis(); // How long have we been running long millis = currentTime - timeStart; String timeLeftStr = String.format("%02d:%02d:%02d", TimeUnit.MILLISECONDS.toHours(millis), TimeUnit.MILLISECONDS.toMinutes(millis) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(millis)), TimeUnit.MILLISECONDS.toSeconds(millis) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(millis))); printAisStoreNL("Total Time " + timeLeftStr); printAisStoreNL("Finished at: " + new Date()); printAisStoreNL("Messages recieved " + counter); // printAisStore("Query took " + timeLeftStr); }