List of usage examples for java.util.concurrent.atomic AtomicLong addAndGet
public final long addAndGet(long delta)
From source file:io.druid.data.input.impl.PrefetchableTextFilesFirehoseFactory.java
private Closeable getFetchedFileCloser(final FetchedFile fetchedFile, final AtomicLong fetchedBytes) { return () -> { final long fileSize = fetchedFile.length(); fetchedFile.delete();//from w ww.jav a2s. c o m fetchedBytes.addAndGet(-fileSize); }; }
From source file:io.druid.client.cache.MemcachedCache.java
public static MemcachedCache create(final MemcachedCacheConfig config) { final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>(); final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>(); final AbstractMonitor monitor = new AbstractMonitor() { final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>( new HashMap<String, Long>()); @Override//from www. j av a 2s . c om public boolean doMonitor(ServiceEmitter emitter) { final Map<String, Long> priorValues = this.priorValues.get(); final Map<String, Long> currentValues = getCurrentValues(); final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder(); for (Map.Entry<String, Long> entry : currentValues.entrySet()) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/total", entry.getValue())); final Long prior = priorValues.get(entry.getKey()); if (prior != null) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/delta", entry.getValue() - prior)); } } if (!this.priorValues.compareAndSet(priorValues, currentValues)) { log.error("Prior value changed while I was reporting! updating anyways"); this.priorValues.set(currentValues); } return true; } private Map<String, Long> getCurrentValues() { final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } return builder.build(); } }; try { LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize()); // always use compression transcoder.setCompressionThreshold(0); OperationQueueFactory opQueueFactory; long maxQueueBytes = config.getMaxOperationQueueSize(); if (maxQueueBytes > 0) { opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes); } else { opQueueFactory = new LinkedOperationQueueFactory(); } final Predicate<String> interesting = new Predicate<String>() { // See net.spy.memcached.MemcachedConnection.registerMetrics() private final Set<String> interestingMetrics = ImmutableSet.of( "[MEM] Reconnecting Nodes (ReconnectQueue)", //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write", "[MEM] Average Bytes read from OS per read", "[MEM] Average Time on wire for operations (s)", "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry", "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success"); @Override public boolean apply(@Nullable String input) { return input != null && interestingMetrics.contains(input); } }; final MetricCollector metricCollector = new MetricCollector() { @Override public void addCounter(String name) { if (!interesting.apply(name)) { return; } counters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Add Counter [%s]", name); } } @Override public void removeCounter(String name) { if (log.isDebugEnabled()) { log.debug("Ignoring request to remove [%s]", name); } } @Override public void incrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment [%s]", name); } } @Override public void incrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.addAndGet(amount); if (log.isDebugEnabled()) { log.debug("Increment [%s] %d", name, amount); } } @Override public void decrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.decrementAndGet(); if (log.isDebugEnabled()) { log.debug("Decrement [%s]", name); } } @Override public void decrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0L)); counter = counters.get(name); } counter.addAndGet(-amount); if (log.isDebugEnabled()) { log.debug("Decrement [%s] %d", name, amount); } } @Override public void addMeter(String name) { if (!interesting.apply(name)) { return; } meters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Adding meter [%s]", name); } } @Override public void removeMeter(String name) { if (!interesting.apply(name)) { return; } if (log.isDebugEnabled()) { log.debug("Ignoring request to remove meter [%s]", name); } } @Override public void markMeter(String name) { if (!interesting.apply(name)) { return; } AtomicLong meter = meters.get(name); if (meter == null) { meters.putIfAbsent(name, new AtomicLong(0L)); meter = meters.get(name); } meter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment counter [%s]", name); } } @Override public void addHistogram(String name) { log.debug("Ignoring add histogram [%s]", name); } @Override public void removeHistogram(String name) { log.debug("Ignoring remove histogram [%s]", name); } @Override public void updateHistogram(String name, int amount) { log.debug("Ignoring update histogram [%s]: %d", name, amount); } }; final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder() // 1000 repetitions gives us good distribution with murmur3_128 // (approx < 5% difference in counts across nodes, with 5 cache nodes) .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128) .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY) .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true) .setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true) .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout()) .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory) .setMetricCollector(metricCollector).setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds .build(); final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts()); final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier; if (config.getNumConnections() > 1) { clientSupplier = new LoadBalancingPool<MemcachedClientIF>(config.getNumConnections(), new Supplier<MemcachedClientIF>() { @Override public MemcachedClientIF get() { try { return new MemcachedClient(connectionFactory, hosts); } catch (IOException e) { log.error(e, "Unable to create memcached client"); throw Throwables.propagate(e); } } }); } else { clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder .<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts))); } return new MemcachedCache(clientSupplier, config, monitor); } catch (IOException e) { throw Throwables.propagate(e); } }
From source file:ubicrypt.core.Utils.java
public static InputStream readIs(final Path path) { final PipedInputStream pis = new PipedInputStream(); final AtomicLong pos = new AtomicLong(0); try {/*from w w w . j a v a2 s.c o m*/ final PipedOutputStream ostream = new PipedOutputStream(pis); final AsynchronousFileChannel channel = AsynchronousFileChannel.open(path, StandardOpenOption.READ); final ByteBuffer buffer = ByteBuffer.allocate(1 << 16); channel.read(buffer, pos.get(), buffer, new CompletionHandler<Integer, ByteBuffer>() { @Override public void completed(final Integer result, final ByteBuffer buf) { try { if (result == -1) { ostream.close(); return; } final byte[] bytes = new byte[result]; System.arraycopy(buf.array(), 0, bytes, 0, result); ostream.write(bytes); ostream.flush(); if (result < 1 << 16) { ostream.close(); return; } pos.addAndGet(result); final ByteBuffer buffer = ByteBuffer.allocate(1 << 16); channel.read(buffer, pos.get(), buffer, this); } catch (final IOException e) { Throwables.propagate(e); } } @Override public void failed(final Throwable exc, final ByteBuffer attachment) { log.error(exc.getMessage(), exc); } }); } catch (final IOException e) { if (e instanceof NoSuchFileException) { throw new NotFoundException(path); } Throwables.propagate(e); } return pis; }
From source file:org.apache.hadoop.hbase.procedure2.store.wal.TestStressWALProcedureStore.java
@Test public void testInsertUpdateDelete() throws Exception { final long LAST_PROC_ID = 19999; final Thread[] thread = new Thread[PROCEDURE_STORE_SLOTS]; final AtomicLong procCounter = new AtomicLong((long) Math.round(Math.random() * 100)); for (int i = 0; i < thread.length; ++i) { thread[i] = new Thread() { @Override//from w w w . java 2 s . co m public void run() { Random rand = new Random(); TestProcedure proc; do { // After HBASE-15579 there may be gap in the procId sequence, trying to simulate that. long procId = procCounter.addAndGet(1 + rand.nextInt(3)); proc = new TestProcedure(procId); // Insert procStore.insert(proc, null); // Update for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { try { Thread.sleep(0, rand.nextInt(15)); } catch (InterruptedException e) { } procStore.update(proc); } // Delete procStore.delete(proc.getProcId()); } while (proc.getProcId() < LAST_PROC_ID); } }; thread[i].start(); } for (int i = 0; i < thread.length; ++i) { thread[i].join(); } procStore.getStoreTracker().dump(); assertTrue(procCounter.get() >= LAST_PROC_ID); assertTrue(procStore.getStoreTracker().isEmpty()); assertEquals(1, procStore.getActiveLogs().size()); }
From source file:org.apache.druid.client.cache.MemcachedCache.java
public static MemcachedCache create(final MemcachedCacheConfig config) { final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>(); final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>(); final AbstractMonitor monitor = new AbstractMonitor() { final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>( new HashMap<String, Long>()); @Override//from w ww .j a v a2s . c om public boolean doMonitor(ServiceEmitter emitter) { final Map<String, Long> priorValues = this.priorValues.get(); final Map<String, Long> currentValues = getCurrentValues(); final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder(); for (Map.Entry<String, Long> entry : currentValues.entrySet()) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/total", entry.getValue())); final Long prior = priorValues.get(entry.getKey()); if (prior != null) { emitter.emit(builder.setDimension("memcached metric", entry.getKey()) .build("query/cache/memcached/delta", entry.getValue() - prior)); } } if (!this.priorValues.compareAndSet(priorValues, currentValues)) { log.error("Prior value changed while I was reporting! updating anyways"); this.priorValues.set(currentValues); } return true; } private Map<String, Long> getCurrentValues() { final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) { builder.put(entry.getKey(), entry.getValue().get()); } return builder.build(); } }; try { LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize()); // always use compression transcoder.setCompressionThreshold(0); OperationQueueFactory opQueueFactory; long maxQueueBytes = config.getMaxOperationQueueSize(); if (maxQueueBytes > 0) { opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes); } else { opQueueFactory = new LinkedOperationQueueFactory(); } final Predicate<String> interesting = new Predicate<String>() { // See net.spy.memcached.MemcachedConnection.registerMetrics() private final Set<String> interestingMetrics = ImmutableSet.of( "[MEM] Reconnecting Nodes (ReconnectQueue)", //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write", "[MEM] Average Bytes read from OS per read", "[MEM] Average Time on wire for operations (s)", "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry", "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success"); @Override public boolean apply(@Nullable String input) { return input != null && interestingMetrics.contains(input); } }; final MetricCollector metricCollector = new MetricCollector() { @Override public void addCounter(String name) { if (!interesting.apply(name)) { return; } counters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Add Counter [%s]", name); } } @Override public void removeCounter(String name) { if (log.isDebugEnabled()) { log.debug("Ignoring request to remove [%s]", name); } } @Override public void incrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment [%s]", name); } } @Override public void incrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.addAndGet(amount); if (log.isDebugEnabled()) { log.debug("Increment [%s] %d", name, amount); } } @Override public void decrementCounter(String name) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0)); counter = counters.get(name); } counter.decrementAndGet(); if (log.isDebugEnabled()) { log.debug("Decrement [%s]", name); } } @Override public void decrementCounter(String name, int amount) { if (!interesting.apply(name)) { return; } AtomicLong counter = counters.get(name); if (counter == null) { counters.putIfAbsent(name, new AtomicLong(0L)); counter = counters.get(name); } counter.addAndGet(-amount); if (log.isDebugEnabled()) { log.debug("Decrement [%s] %d", name, amount); } } @Override public void addMeter(String name) { if (!interesting.apply(name)) { return; } meters.putIfAbsent(name, new AtomicLong(0L)); if (log.isDebugEnabled()) { log.debug("Adding meter [%s]", name); } } @Override public void removeMeter(String name) { if (!interesting.apply(name)) { return; } if (log.isDebugEnabled()) { log.debug("Ignoring request to remove meter [%s]", name); } } @Override public void markMeter(String name) { if (!interesting.apply(name)) { return; } AtomicLong meter = meters.get(name); if (meter == null) { meters.putIfAbsent(name, new AtomicLong(0L)); meter = meters.get(name); } meter.incrementAndGet(); if (log.isDebugEnabled()) { log.debug("Increment counter [%s]", name); } } @Override public void addHistogram(String name) { log.debug("Ignoring add histogram [%s]", name); } @Override public void removeHistogram(String name) { log.debug("Ignoring remove histogram [%s]", name); } @Override public void updateHistogram(String name, int amount) { log.debug("Ignoring update histogram [%s]: %d", name, amount); } }; final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder() // 1000 repetitions gives us good distribution with murmur3_128 // (approx < 5% difference in counts across nodes, with 5 cache nodes) .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128) .setProtocol(ConnectionFactoryBuilder.Protocol .valueOf(StringUtils.toUpperCase(config.getProtocol()))) .setLocatorType( ConnectionFactoryBuilder.Locator.valueOf(StringUtils.toUpperCase(config.getLocator()))) .setDaemon(true).setFailureMode(FailureMode.Cancel).setTranscoder(transcoder) .setShouldOptimize(true).setOpQueueMaxBlockTime(config.getTimeout()) .setOpTimeout(config.getTimeout()).setReadBufferSize(config.getReadBufferSize()) .setOpQueueFactory(opQueueFactory).setMetricCollector(metricCollector) .setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds .build(); final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts()); final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier; if (config.getNumConnections() > 1) { clientSupplier = new MemcacheClientPool(config.getNumConnections(), new Supplier<MemcachedClientIF>() { @Override public MemcachedClientIF get() { try { return new MemcachedClient(connectionFactory, hosts); } catch (IOException e) { log.error(e, "Unable to create memcached client"); throw Throwables.propagate(e); } } }); } else { clientSupplier = Suppliers .ofInstance(StupidResourceHolder.create(new MemcachedClient(connectionFactory, hosts))); } return new MemcachedCache(clientSupplier, config, monitor); } catch (IOException e) { throw Throwables.propagate(e); } }
From source file:org.apache.pulsar.compaction.CompactedTopicTest.java
/** * Build a compacted ledger, and return the id of the ledger, the position of the different * entries in the ledger, and a list of gaps, and the entry which should be returned after the gap. *//*www. j av a2 s . c o m*/ private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger( BookKeeper bk, int count) throws Exception { LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); List<Pair<MessageIdData, Long>> positions = new ArrayList<>(); List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>(); AtomicLong ledgerIds = new AtomicLong(10L); AtomicLong entryIds = new AtomicLong(0L); CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> { List<MessageIdData> idsInGap = new ArrayList<MessageIdData>(); if (r.nextInt(10) == 1) { long delta = r.nextInt(10) + 1; idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); ledgerIds.addAndGet(delta); entryIds.set(0); } long delta = r.nextInt(5); if (delta != 0) { idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); } MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get()) .setEntryId(entryIds.addAndGet(delta + 1)).build(); @Cleanup RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER); CompletableFuture<Void> f = new CompletableFuture<>(); ByteBuf buffer = m.serialize(); lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> { if (rc != BKException.Code.OK) { f.completeExceptionally(BKException.create(rc)); } else { positions.add(Pair.of(id, eid)); idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid))); f.complete(null); } }, null); return f; }).toArray(CompletableFuture[]::new)).get(); lh.close(); return Triple.of(lh.getId(), positions, idsInGaps); }
From source file:io.druid.data.input.impl.PrefetchableTextFilesFirehoseFactory.java
private Closeable cacheIfPossibleAndGetCloser(FetchedFile fetchedFile, AtomicLong fetchedBytes) { final Closeable closeable; if (cacheIfPossible(fetchedFile)) { closeable = getNoopCloser();/*from www.j av a 2s .c o m*/ // If the fetchedFile is cached, make a room for fetching more data immediately. // This is because cache space and fetch space are separated. fetchedBytes.addAndGet(-fetchedFile.length()); } else { closeable = getFetchedFileCloser(fetchedFile, fetchedBytes); } return closeable; }
From source file:org.apache.usergrid.services.assets.data.GoogleBinaryStore.java
@Override public void write(UUID appId, Entity entity, InputStream inputStream) throws Exception { getService();//from ww w . j ava 2s .co m final AtomicLong writtenSize = new AtomicLong(); final int chunkSize = 1024; // one KB // determine max size file allowed, default to 50mb long maxSizeBytes = 50 * FileUtils.ONE_MB; String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50"); if (StringUtils.isNumeric(maxSizeMbString)) { maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB; } byte[] firstData = new byte[chunkSize]; int firstSize = inputStream.read(firstData); writtenSize.addAndGet(firstSize); // from the first sample chunk, determine the file size final String contentType = AssetMimeHandler.get().getMimeType(entity, firstData); // Convert to the Google Cloud Storage Blob final BlobId blobId = BlobId.of(bucketName, AssetUtils.buildAssetKey(appId, entity)); final BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(contentType).build(); // always allow files up to 5mb if (maxSizeBytes < 5 * FileUtils.ONE_MB) { maxSizeBytes = 5 * FileUtils.ONE_MB; } EntityManager em = entityManagerFactory.getEntityManager(appId); Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity); // directly upload files that are smaller than the chunk size if (writtenSize.get() < chunkSize) { // Upload to Google cloud Storage instance.create(blobInfo, firstData); } else { WriteChannel writer = instance.writer(blobInfo); // write the initial sample data used to determine file type writer.write(ByteBuffer.wrap(firstData, 0, firstData.length)); // start writing remaining chunks from the stream byte[] buffer = new byte[chunkSize]; int limit; while ((limit = inputStream.read(buffer)) >= 0) { writtenSize.addAndGet(limit); if (writtenSize.get() > maxSizeBytes) { try { fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes); em.update(entity); } catch (Exception e) { logger.error("Error updating entity with error message", e); } return; } try { writer.write(ByteBuffer.wrap(buffer, 0, limit)); } catch (Exception ex) { logger.error("Error writing chunk to Google Cloud Storage for asset "); } } writer.close(); } fileMetadata.put(AssetUtils.CONTENT_LENGTH, writtenSize.get()); fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis()); fileMetadata.put(AssetUtils.E_TAG, RandomStringUtils.randomAlphanumeric(10)); fileMetadata.put(AssetUtils.CONTENT_TYPE, contentType); try { em.update(entity); } catch (Exception e) { throw new IOException("Unable to update entity filedata", e); } }
From source file:ubicrypt.core.Utils.java
public static Observable<Long> write(final Path fullPath, final InputStream inputStream) { return Observable.create(subscriber -> { try {// w ww .ja va 2s . c o m final AtomicLong offset = new AtomicLong(0); final AsynchronousFileChannel afc = AsynchronousFileChannel.open(fullPath, StandardOpenOption.WRITE, StandardOpenOption.CREATE); afc.lock(new Object(), new CompletionHandler<FileLock, Object>() { @Override public void completed(final FileLock lock, final Object attachment) { //acquired lock final byte[] buf = new byte[1 << 16]; try { final int len = inputStream.read(buf); if (len == -1) { unsubscribe(subscriber, inputStream, lock); return; } afc.write(ByteBuffer.wrap(Arrays.copyOfRange(buf, 0, len)), offset.get(), null, new CompletionHandler<Integer, Object>() { @Override public void completed(final Integer result, final Object attachment) { //written chunk of bytes subscriber.onNext(offset.addAndGet(result)); final byte[] buf = new byte[1 << 16]; int len; try { len = inputStream.read(buf); if (len == -1) { unsubscribe(subscriber, inputStream, lock); log.debug("written:{}", fullPath); return; } } catch (final IOException e) { subscriber.onError(e); return; } if (len == -1) { unsubscribe(subscriber, inputStream, lock); log.debug("written:{}", fullPath); return; } afc.write(ByteBuffer.wrap(Arrays.copyOfRange(buf, 0, len)), offset.get(), null, this); } @Override public void failed(final Throwable exc, final Object attachment) { subscriber.onError(exc); } }); } catch (final Exception e) { close(inputStream, lock); subscriber.onError(e); } } @Override public void failed(final Throwable exc, final Object attachment) { log.error("error on getting lock for:{}, error:{}", fullPath, exc.getMessage()); try { inputStream.close(); } catch (final IOException e) { } subscriber.onError(exc); } }); } catch (final Exception e) { log.error("error on file:{}", fullPath); subscriber.onError(e); } }); }
From source file:io.warp10.script.functions.URLFETCH.java
@Override public Object apply(WarpScriptStack stack) throws WarpScriptException { if (!stack.isAuthenticated()) { throw new WarpScriptException(getName() + " requires the stack to be authenticated."); }/*from w ww .j a v a 2s . co m*/ Object o = stack.pop(); if (!(o instanceof String) && !(o instanceof List)) { throw new WarpScriptException(getName() + " expects a URL or list thereof on top of the stack."); } List<URL> urls = new ArrayList<URL>(); try { if (o instanceof String) { urls.add(new URL(o.toString())); } else { for (Object oo : (List) o) { urls.add(new URL(oo.toString())); } } } catch (MalformedURLException mue) { throw new WarpScriptException(getName() + " encountered an invalid URL."); } // // Check URLs // for (URL url : urls) { if (!StandaloneWebCallService.checkURL(url)) { throw new WarpScriptException(getName() + " encountered an invalid URL '" + url + "'"); } } // // Check that we do not exceed the maxurlfetch limit // AtomicLong urlfetchCount = (AtomicLong) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_COUNT); AtomicLong urlfetchSize = (AtomicLong) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_SIZE); if (urlfetchCount.get() + urls.size() > (long) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_LIMIT)) { throw new WarpScriptException(getName() + " is limited to " + stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_LIMIT) + " calls."); } List<Object> results = new ArrayList<Object>(); for (URL url : urls) { urlfetchCount.addAndGet(1); HttpURLConnection conn = null; try { conn = (HttpURLConnection) url.openConnection(); conn.setDoInput(true); conn.setDoOutput(false); conn.setRequestMethod("GET"); byte[] buf = new byte[8192]; ByteArrayOutputStream baos = new ByteArrayOutputStream(); InputStream in = conn.getInputStream(); while (true) { int len = in.read(buf); if (len < 0) { break; } if (urlfetchSize.get() + baos.size() + len > (long) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_MAXSIZE)) { throw new WarpScriptException(getName() + " would exceed maximum size of content which can be retrieved via URLFETCH (" + stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_MAXSIZE) + " bytes)"); } baos.write(buf, 0, len); } urlfetchSize.addAndGet(baos.size()); List<Object> res = new ArrayList<Object>(); res.add(conn.getResponseCode()); Map<String, List<String>> hdrs = conn.getHeaderFields(); if (hdrs.containsKey(null)) { List<String> statusMsg = hdrs.get(null); if (statusMsg.size() > 0) { res.add(statusMsg.get(0)); } else { res.add(""); } } else { res.add(""); } hdrs.remove(null); res.add(hdrs); res.add(Base64.encodeBase64String(baos.toByteArray())); results.add(res); } catch (IOException ioe) { throw new WarpScriptException(getName() + " encountered an error while fetching '" + url + "'"); } finally { if (null != conn) { conn.disconnect(); } } } stack.push(results); return stack; }