Example usage for com.google.common.util.concurrent ListenableFuture get

List of usage examples for com.google.common.util.concurrent ListenableFuture get

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListenableFuture get.

Prototype

V get() throws InterruptedException, ExecutionException;

Source Link

Document

Waits if necessary for the computation to complete, and then retrieves its result.

Usage

From source file:org.voltdb.DeprecatedDefaultSnapshotDataTarget.java

public DeprecatedDefaultSnapshotDataTarget(final File file, final int hostId, final String clusterName,
        final String databaseName, final String tableName, final int numPartitions, final boolean isReplicated,
        final int partitionIds[], final VoltTable schemaTable, final long txnId, int version[])
        throws IOException {
    String hostname = CoreUtils.getHostnameOrAddress();
    m_file = file;//from   w w  w  .j  ava2s .  co m
    m_tableName = tableName;
    m_fos = new FileOutputStream(file);
    m_channel = m_fos.getChannel();
    final FastSerializer fs = new FastSerializer();
    fs.writeInt(0);//CRC
    fs.writeInt(0);//Header length placeholder
    fs.writeByte(1);//Indicate the snapshot was not completed, set to true for the CRC calculation, false later
    for (int ii = 0; ii < 4; ii++) {
        fs.writeInt(version[ii]);//version
    }
    fs.writeLong(txnId);
    fs.writeInt(hostId);
    fs.writeString(hostname);
    fs.writeString(clusterName);
    fs.writeString(databaseName);
    fs.writeString(tableName.toUpperCase());
    fs.writeBoolean(isReplicated);
    if (!isReplicated) {
        fs.writeArray(partitionIds);
        fs.writeInt(numPartitions);
    }
    final BBContainer container = fs.getBBContainer();
    container.b.position(4);
    container.b.putInt(container.b.remaining() - 4);
    container.b.position(0);

    FastSerializer schemaSerializer = new FastSerializer();
    schemaTable.writeExternal(schemaSerializer);
    final BBContainer schemaContainer = schemaSerializer.getBBContainer();
    schemaContainer.b.limit(schemaContainer.b.limit() - 4);//Don't want the row count
    schemaContainer.b.position(schemaContainer.b.position() + 4);//Don't want total table length

    final CRC32 crc = new CRC32();
    ByteBuffer aggregateBuffer = ByteBuffer.allocate(container.b.remaining() + schemaContainer.b.remaining());
    aggregateBuffer.put(container.b);
    aggregateBuffer.put(schemaContainer.b);
    aggregateBuffer.flip();
    crc.update(aggregateBuffer.array(), 4, aggregateBuffer.capacity() - 4);

    final int crcValue = (int) crc.getValue();
    aggregateBuffer.putInt(crcValue).position(8);
    aggregateBuffer.put((byte) 0).position(0);//Haven't actually finished writing file

    if (m_simulateFullDiskWritingHeader) {
        m_writeException = new IOException("Disk full");
        m_writeFailed = true;
        m_fos.close();
        throw m_writeException;
    }

    /*
     * Be completely sure the write succeeded. If it didn't
     * the disk is probably full or the path is bunk etc.
     */
    m_acceptOneWrite = true;
    ListenableFuture<?> writeFuture = write(Callables.returning((BBContainer) DBBPool.wrapBB(aggregateBuffer)),
            false);
    try {
        writeFuture.get();
    } catch (InterruptedException e) {
        m_fos.close();
        throw new java.io.InterruptedIOException();
    } catch (ExecutionException e) {
        m_fos.close();
        throw m_writeException;
    }
    if (m_writeFailed) {
        m_fos.close();
        throw m_writeException;
    }

    ScheduledFuture<?> syncTask = null;
    syncTask = m_syncService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            int bytesSinceLastSync = 0;
            while ((bytesSinceLastSync = m_bytesWrittenSinceLastSync.getAndSet(0)) > 0) {
                try {
                    m_channel.force(false);
                } catch (IOException e) {
                    hostLog.error("Error syncing snapshot", e);
                }
                m_bytesAllowedBeforeSync.release(bytesSinceLastSync);
            }
        }
    }, 1, 1, TimeUnit.SECONDS);
    m_syncTask = syncTask;
}

From source file:c5db.client.FakeHTable.java

public ResultScanner getScanner(final Scan scan) throws IOException {
    if (scan.getStartRow() != null && scan.getStartRow().length > 0 && scan.getStopRow() != null
            && scan.getStopRow().length > 0 && Bytes.compareTo(scan.getStartRow(), scan.getStopRow()) > 0) {
        throw new IOException("StopRow needs to be greater than StartRow");
    }//  w  ww.j av  a 2s  .com

    final ScanRequest scanRequest = new ScanRequest(regionSpecifier, ProtobufUtil.toScan(scan), 0L,
            C5Constants.DEFAULT_INIT_SCAN, false, 0L);
    ListenableFuture<ClientScanner> scanner;
    try {
        Long scanResult = c5AsyncDatabase.scan(scanRequest).get();
        scanner = clientScannerManager.get(scanResult);
        if (scanner == null) {
            throw new IOException("Unable to find scanner");
        }
        return scanner.get();
    } catch (ExecutionException | InterruptedException e) {
        throw new IOException(e);
    }
}

From source file:com.google.devtools.build.remote.worker.WatcherServer.java

@Override
public void watch(Request wr, StreamObserver<ChangeBatch> responseObserver) {
    final String opName = wr.getTarget();
    ListenableFuture<ActionResult> future = operationsCache.get(opName);
    if (future == null) {
        responseObserver.onError(StatusProto.toStatusRuntimeException(Status.newBuilder()
                .setCode(Code.NOT_FOUND.getNumber()).setMessage("Operation not found: " + opName).build()));
        return;/*from  w  w w  .  ja  v  a  2 s  .  com*/
    }

    future.addListener(() -> {
        try {
            try {
                ActionResult result = future.get();
                responseObserver.onNext(packExists(Operation.newBuilder().setName(opName).setDone(true)
                        .setResponse(Any.pack(ExecuteResponse.newBuilder().setResult(result).build()))));
                responseObserver.onCompleted();
            } catch (ExecutionException e) {
                Throwables.throwIfUnchecked(e.getCause());
                throw (Exception) e.getCause();
            }
        } catch (Exception e) {
            ExecuteResponse resp;
            if (e instanceof ExecutionStatusException) {
                resp = ((ExecutionStatusException) e).getResponse();
            } else {
                logger.log(Level.SEVERE, "Work failed: " + opName, e);
                resp = ExecuteResponse.newBuilder().setStatus(StatusUtils.internalErrorStatus(e)).build();
            }
            responseObserver.onNext(ChangeBatch.newBuilder()
                    .addChanges(Change
                            .newBuilder().setState(Change.State.EXISTS).setData(Any.pack(Operation.newBuilder()
                                    .setName(opName).setDone(true).setResponse(Any.pack(resp)).build()))
                            .build())
                    .build());
            responseObserver.onCompleted();
            if (e instanceof InterruptedException) {
                Thread.currentThread().interrupt();
            }
        } finally {
            operationsCache.remove(opName);
        }
    }, MoreExecutors.directExecutor());
}

From source file:org.apache.druid.indexing.common.task.IndexTask.java

private static SegmentsAndMetadata awaitPublish(ListenableFuture<SegmentsAndMetadata> publishFuture,
        long publishTimeout) throws ExecutionException, InterruptedException, TimeoutException {
    if (publishTimeout == 0) {
        return publishFuture.get();
    } else {//ww  w. j ava 2 s .co m
        return publishFuture.get(publishTimeout, TimeUnit.MILLISECONDS);
    }
}

From source file:org.waveprotocol.box.server.waveserver.DeltaWaveletStateMap.java

@Inject
public DeltaWaveletStateMap(final WaveletStateFactory waveletStateFactory, final Injector injector,
        @Named(CoreSettings.DELTA_STATE_CACHE_SIZE) final int cacheSize,
        @Named(CoreSettings.DELTA_STATE_CACHE_EXPIRE) final int cacheExpire,
        @ExecutorAnnotations.WaveletLoadingExecutor final Executor waveletLoadingExecutor) {

    states = CacheBuilder.newBuilder().maximumSize(cacheSize).expireAfterAccess(cacheExpire, TimeUnit.MINUTES)
            .removalListener(new RemovalListener<WaveletName, ListenableFuture<DeltaWaveletState>>() {

                @Override//w w w  . ja  va2 s.c om
                public void onRemoval(
                        final RemovalNotification<WaveletName, ListenableFuture<DeltaWaveletState>> rn) {
                    final ListenableFuture<DeltaWaveletState> future = rn.getValue();
                    future.addListener(new Runnable() {

                        @Override
                        public void run() {
                            DeltaWaveletState state;
                            try {
                                state = future.get();
                            } catch (InterruptedException | ExecutionException ex) {
                                LOG.log(Level.WARNING, "Opening delta state error", ex);
                                return;
                            }
                            final ListenableFuture closeFuture = state.close();
                            closingStates.put(rn.getKey(), closeFuture);
                            closeFuture.addListener(new Runnable() {

                                @Override
                                public void run() {
                                    try {
                                        closeFuture.get();
                                        closingStates.remove(rn.getKey());
                                    } catch (InterruptedException | ExecutionException ex) {
                                        LOG.log(Level.WARNING, "Closing delta state error", ex);
                                    }
                                }
                            }, waveletLoadingExecutor);
                        }
                    }, MoreExecutors.sameThreadExecutor());
                }
            }).build(new CacheLoader<WaveletName, ListenableFuture<DeltaWaveletState>>() {

                @Override
                public ListenableFuture<DeltaWaveletState> load(WaveletName waveletName) throws Exception {
                    ListenableFuture closingFuture = closingStates.get(waveletName);
                    if (closingFuture != null) {
                        try {
                            closingFuture.get();
                        } catch (InterruptedException | ExecutionException ex) {
                            throw new WaveletStateException("Closing earlier opened delta state exception", ex);
                        }
                    }
                    final DeltaWaveletState state = waveletStateFactory.createDeltaWaveletState(waveletName);
                    ListenableFutureTask<DeltaWaveletState> future = ListenableFutureTask
                            .create(new Callable<DeltaWaveletState>() {

                                @Override
                                public DeltaWaveletState call() throws Exception {
                                    state.open();
                                    return state;
                                }
                            });
                    waveletLoadingExecutor.execute(future);
                    return future;
                }
            });
}

From source file:org.apache.hadoop.hive.druid.security.KerberosHttpClient.java

private <Intermediate, Final> void innerGo(final Request request,
        final HttpResponseHandler<Intermediate, Final> httpResponseHandler, final Duration duration,
        final SettableFuture<Final> future) {
    try {//w ww .  jav a  2  s.c  o m
        final String host = request.getUrl().getHost();
        final URI uri = request.getUrl().toURI();

        /* Cookies Manager is used to cache cookie returned by service.
         The goal us to avoid doing KDC requests for every request.*/

        Map<String, List<String>> cookieMap = cookieManager.get(uri, Collections.emptyMap());
        for (Map.Entry<String, List<String>> entry : cookieMap.entrySet()) {
            request.addHeaderValues(entry.getKey(), entry.getValue());
        }
        final boolean shouldRetryOnUnauthorizedResponse;

        if (DruidKerberosUtil.needToSendCredentials(cookieManager.getCookieStore(), uri)) {
            // No Cookies for requested URI, authenticate user and add authentication header
            LOG.debug("No Auth Cookie found for URI{}. Existing Cookies{} Authenticating... ", uri,
                    cookieManager.getCookieStore().getCookies());
            // Assuming that a valid UGI with kerberos cred is created by HS2 or LLAP
            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
            currentUser.checkTGTAndReloginFromKeytab();
            LOG.debug("The user credential is {}", currentUser);
            String challenge = currentUser
                    .doAs((PrivilegedExceptionAction<String>) () -> DruidKerberosUtil.kerberosChallenge(host));
            request.setHeader(HttpHeaders.Names.AUTHORIZATION, "Negotiate " + challenge);
            /* no reason to retry if the challenge ticket is not valid. */
            shouldRetryOnUnauthorizedResponse = false;
        } else {
            /* In this branch we had already a cookie that did expire
            therefore we need to resend a valid Kerberos challenge*/
            LOG.debug("Found Auth Cookie found for URI {} cookie {}", uri,
                    DruidKerberosUtil.getAuthCookie(cookieManager.getCookieStore(), uri).toString());
            shouldRetryOnUnauthorizedResponse = true;
        }

        @SuppressWarnings("unchecked")
        ListenableFuture<RetryResponseHolder<Final>> internalFuture = delegate.go(request,
                new RetryIfUnauthorizedResponseHandler<Intermediate, Final>(new ResponseCookieHandler(
                        request.getUrl().toURI(), cookieManager, httpResponseHandler)),
                duration);

        RetryResponseHolder<Final> responseHolder = internalFuture.get();

        if (shouldRetryOnUnauthorizedResponse && responseHolder.shouldRetry()) {
            LOG.debug("Preparing for Retry boolean {} and result {}, object{} ", true,
                    responseHolder.shouldRetry(), responseHolder.getObj());
            // remove Auth cookie
            DruidKerberosUtil.removeAuthCookie(cookieManager.getCookieStore(), uri);
            // clear existing cookie
            request.setHeader("Cookie", "");
            innerGo(request.copy(), httpResponseHandler, duration, future);

        } else {
            future.set(responseHolder.getObj());
        }
    } catch (Throwable e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.jclouds.openstack.swift.blobstore.strategy.internal.ParallelMultipartUploadStrategy.java

protected void prepareUploadPart(final String container, final Blob blob, final String key, final Integer part,
        final Payload payload, final long offset, final long size, final SortedMap<Integer, String> etags,
        final BlockingQueue<Integer> activeParts, final Map<Integer, ListenableFuture<String>> futureParts,
        final AtomicInteger errors, final int maxRetries, final Map<Integer, Exception> errorMap,
        final Queue<Part> toRetry, final CountDownLatch latch, BlobToObject blob2Object) {
    if (errors.get() > maxRetries) {
        activeParts.remove(part); // remove part from the bounded-queue without blocking
        latch.countDown();//from  ww w .j ava2s  .co  m
        return;
    }
    final CommonSwiftAsyncClient client = ablobstore.getContext().unwrap(SwiftApiMetadata.CONTEXT_TOKEN)
            .getAsyncApi();
    Payload chunkedPart = slicer.slice(payload, offset, size);
    logger.debug(String.format("async uploading part %s of %s to container %s", part, key, container));
    final long start = System.currentTimeMillis();
    String blobPartName = blob.getMetadata().getName() + PART_SEPARATOR + String.valueOf(part);

    Blob blobPart = ablobstore.blobBuilder(blobPartName).payload(chunkedPart).contentDisposition(blobPartName)
            .build();
    final ListenableFuture<String> futureETag = client.putObject(container, blob2Object.apply(blobPart));
    futureETag.addListener(new Runnable() {
        @Override
        public void run() {
            try {
                etags.put(part, futureETag.get());
                logger.debug(String.format("async uploaded part %s of %s to container %s in %sms", part, key,
                        container, System.currentTimeMillis() - start));
            } catch (CancellationException e) {
                errorMap.put(part, e);
                String message = String.format(
                        "%s while uploading part %s - [%s,%s] to container %s with running since %dms",
                        e.getMessage(), part, offset, size, container, System.currentTimeMillis() - start);
                logger.debug(message);
            } catch (Exception e) {
                errorMap.put(part, e);
                String message = String.format(
                        "%s while uploading part %s - [%s,%s] to container %s running since %dms",
                        e.getMessage(), part, offset, size, container, System.currentTimeMillis() - start);
                logger.error(message, e);
                if (errors.incrementAndGet() <= maxRetries)
                    toRetry.add(new Part(part, offset, size));
            } finally {
                activeParts.remove(part); // remove part from the bounded-queue without blocking
                futureParts.remove(part);
                latch.countDown();
            }
        }
    }, ioExecutor);
    futureParts.put(part, futureETag);
}

From source file:org.opendaylight.laas.impl.CentinelLaasStreamImpl.java

/**
 * @param change/*from  w w  w .ja  v  a  2  s  . co  m*/
 * @param tx
 *            updates data in operational data store.
 */
private void updateOperational(AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change,
        final ReadWriteTransaction tx) {
    DataObject configUpdatedData = change.getUpdatedSubtree();
    DataObject configOriginalData = change.getOriginalSubtree();
    if (configUpdatedData instanceof StreamRecord && configOriginalData instanceof StreamRecord) {
        StreamRecord configUpdatedStreamRecord = (StreamRecord) configUpdatedData;
        StreamRecord configOriginalStreamRecord = (StreamRecord) configOriginalData;
        List<StreamList> configUpdatedStreamList = configUpdatedStreamRecord.getStreamList();
        List<StreamList> configOriginalStreamList = configOriginalStreamRecord.getStreamList();
        StreamList streamList = null;
        Iterator<StreamList> streamListIterator = configUpdatedStreamList.iterator();
        while (streamListIterator.hasNext()) {
            StreamList configObj = null;
            configObj = streamListIterator.next();
            if (!configOriginalStreamList.contains(configObj)) {
                streamList = configObj;
                break;
            }
        }

        ListenableFuture<Optional<StreamRecord>> readFutureFromOperational = tx
                .read(LogicalDatastoreType.OPERATIONAL, streamRecordId);

        try {
            Optional<StreamRecord> streamRecord = readFutureFromOperational.get();
            List<StreamList> streamListOperational = new ArrayList<StreamList>();

            if (streamRecord.isPresent()) {
                streamListOperational = streamRecord.get().getStreamList();
            }

            java.util.Iterator<StreamList> iteratorStreamList = streamListOperational.iterator();

            while (iteratorStreamList.hasNext()) {
                StreamList streamOperationalObj = iteratorStreamList.next();
                LOG.info("Operational Data store .getConfigID()" + streamOperationalObj.getConfigID());
                LOG.info("Config data store .getConfigID() " + streamList.getConfigID());
                if (streamList.getConfigID().equalsIgnoreCase(streamOperationalObj.getConfigID())) {
                    if (streamList.getStreamRules().size() == streamOperationalObj.getStreamRules().size()
                            && streamOperationalObj.getDisabled().equalsIgnoreCase(streamList.getDisabled())
                            && restService.updateToOperationalStream(streamList)) {

                        tx.merge(LogicalDatastoreType.OPERATIONAL,
                                streamRecordId.child(StreamList.class, streamOperationalObj.getKey()),
                                streamList);
                        tx.submit();
                    } else {
                        if (!streamOperationalObj.getDisabled().equalsIgnoreCase(streamList.getDisabled())
                                && restService.updateToOperationalStreamEnabler(streamList,
                                        streamOperationalObj.getStreamID())) {
                            tx.merge(LogicalDatastoreType.OPERATIONAL,
                                    streamRecordId.child(StreamList.class, streamOperationalObj.getKey()),
                                    streamList);
                            tx.submit();
                        }

                        if (streamList.getStreamRules().size() != streamOperationalObj.getStreamRules()
                                .size()) {
                            List<StreamRules> opStreamRuleList = streamOperationalObj.getStreamRules();
                            List<StreamRules> configStreamRuleList = streamList.getStreamRules();
                            StreamRule streamRule = null;
                            Iterator<StreamRules> iteratorStreamRule = configStreamRuleList.iterator();
                            while (iteratorStreamRule.hasNext()) {
                                StreamRule streamRuleToBeUpdated = null;
                                streamRuleToBeUpdated = iteratorStreamRule.next();
                                if (!opStreamRuleList.contains(streamRuleToBeUpdated)) {
                                    streamRule = streamRuleToBeUpdated;
                                    LOG.debug("STREAM RULE" + streamRule);
                                    break;
                                }
                            }

                            if (restService.updateToOperationalStreamRule(streamRule,
                                    streamOperationalObj.getStreamID()))
                                tx.merge(LogicalDatastoreType.OPERATIONAL,
                                        streamRecordId.child(StreamList.class, streamOperationalObj.getKey()),
                                        streamList);
                            tx.submit();
                            LOG.info("Stream rule commited sucessfully to operational datastore");
                        }
                    }

                }

            }
        }

        catch (Exception e) {
            LOG.error("Exception occured while getting stream rule record from operational data store", e);
        }

    }
}

From source file:org.jclouds.aws.s3.blobstore.strategy.internal.ParallelMultipartUploadStrategy.java

protected void prepareUploadPart(final String container, final String key, final String uploadId,
        final Integer part, final Payload payload, final long offset, final long size,
        final SortedMap<Integer, String> etags, final BlockingQueue<Integer> activeParts,
        final Map<Integer, ListenableFuture<String>> futureParts, final AtomicInteger errors,
        final int maxRetries, final Map<Integer, Exception> errorMap, final Queue<Part> toRetry,
        final CountDownLatch latch) {
    if (errors.get() > maxRetries) {
        activeParts.remove(part); // remove part from the bounded-queue without blocking
        latch.countDown();//  w  w w  . ja va2  s.c  om
        return;
    }
    final AWSS3AsyncClient client = ablobstore.getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN)
            .getAsyncApi();
    Payload chunkedPart = slicer.slice(payload, offset, size);
    logger.debug(String.format("async uploading part %s of %s to container %s with uploadId %s", part, key,
            container, uploadId));
    final long start = System.currentTimeMillis();
    final ListenableFuture<String> futureETag = client.uploadPart(container, key, part, uploadId, chunkedPart);
    futureETag.addListener(new Runnable() {
        @Override
        public void run() {
            try {
                etags.put(part, futureETag.get());
                logger.debug(
                        String.format("async uploaded part %s of %s to container %s in %sms with uploadId %s",
                                part, key, container, System.currentTimeMillis() - start, uploadId));
            } catch (CancellationException e) {
                errorMap.put(part, e);
                String message = String.format(
                        "%s while uploading part %s - [%s,%s] to container %s with uploadId: %s running since %dms",
                        e.getMessage(), part, offset, size, container, uploadId,
                        System.currentTimeMillis() - start);
                logger.debug(message);
            } catch (Exception e) {
                errorMap.put(part, e);
                String message = String.format(
                        "%s while uploading part %s - [%s,%s] to container %s with uploadId: %s running since %dms",
                        e.getMessage(), part, offset, size, container, uploadId,
                        System.currentTimeMillis() - start);
                logger.error(message, e);
                if (errors.incrementAndGet() <= maxRetries)
                    toRetry.add(new Part(part, offset, size));
            } finally {
                activeParts.remove(part); // remove part from the bounded-queue without blocking
                futureParts.remove(part);
                latch.countDown();
            }
        }
    }, ioExecutor);
    futureParts.put(part, futureETag);
}

From source file:org.rhq.server.metrics.MetricsServer.java

private List<AggregateNumericMetric> loadMetrics(List<Integer> scheduleIds, long begin, long end,
        Bucket bucket) {/*from   w ww  .jav a  2 s  . co  m*/
    List<StorageResultSetFuture> futures = new ArrayList<StorageResultSetFuture>(scheduleIds.size());
    for (Integer scheduleId : scheduleIds) {
        futures.add(dao.findAggregateMetricsAsync(scheduleId, bucket, begin, end));
    }
    ListenableFuture<List<ResultSet>> resultSetsFuture = Futures.successfulAsList(futures);
    try {
        List<ResultSet> resultSets = resultSetsFuture.get();
        AggregateNumericMetricMapper mapper = new AggregateNumericMetricMapper();
        List<AggregateNumericMetric> metrics = new ArrayList<AggregateNumericMetric>();
        for (ResultSet resultSet : resultSets) {
            metrics.addAll(mapper.mapAll(resultSet));
        }
        return metrics;
    } catch (Exception e) {
        log.warn("There was an error while fetching " + bucket + " data for {scheduleIds: " + scheduleIds
                + ", beginTime: " + begin + ", endTime: " + end + "}", e);
        return Collections.emptyList();
    }
}