Example usage for com.google.common.util.concurrent ListenableFuture get

List of usage examples for com.google.common.util.concurrent ListenableFuture get

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListenableFuture get.

Prototype

V get() throws InterruptedException, ExecutionException;

Source Link

Document

Waits if necessary for the computation to complete, and then retrieves its result.

Usage

From source file:org.opendaylight.centinel.impl.CentinelImpl.java

/**
 * returns the list of alerts according to the stream id entered by the
 * user.//from   ww  w. j  av a 2  s  . com
 * 
 */
@Override
public Future<RpcResult<GetAllAlertRuleOutput>> getAllAlertRule(final GetAllAlertRuleInput input) {

    final ReadWriteTransaction tx = dataProvider.newReadWriteTransaction();
    final SettableFuture<RpcResult<GetAllAlertRuleOutput>> futureResult = SettableFuture.create();
    LOG.info("GetAllAlertRuleOutput: " + input);
    final GetAllAlertRuleOutputBuilder allAlertRuleOutputBuilder = new GetAllAlertRuleOutputBuilder();

    ListenableFuture<Optional<AlertMessageCountRuleRecord>> alertMessageCountReadFuture = tx
            .read(LogicalDatastoreType.OPERATIONAL, alertMessageCountRuleRecordId);
    ListenableFuture<Optional<AlertFieldContentRuleRecord>> alertFieldContentReadFuture = tx
            .read(LogicalDatastoreType.OPERATIONAL, alertFeildContentRuleRecordId);
    ListenableFuture<Optional<AlertFieldValueRuleRecord>> alertFieldValueReadFuture = tx
            .read(LogicalDatastoreType.OPERATIONAL, alertFieldValueRuleRecordId);
    try {
        Optional<AlertMessageCountRuleRecord> alertMessageCountRuleRecord = alertMessageCountReadFuture.get();
        List<StreamAlertMessageCountRuleList> streamAlertRuleList = new ArrayList<StreamAlertMessageCountRuleList>();

        if (alertMessageCountRuleRecord.isPresent()) {
            streamAlertRuleList = alertMessageCountRuleRecord.get().getStreamAlertMessageCountRuleList();

        }
        java.util.Iterator<StreamAlertMessageCountRuleList> iterator = streamAlertRuleList.iterator();
        List<StreamAlertMessageCountRuleListSorted> streamAlertMessageCountRuleListSortedList = new ArrayList<StreamAlertMessageCountRuleListSorted>();
        StreamAlertMessageCountRuleListSortedBuilder streamAlertMessageCountRuleListSortedBuilder = new StreamAlertMessageCountRuleListSortedBuilder();

        while (iterator.hasNext()) {
            StreamAlertMessageCountRuleList streamAlertMessageObj = iterator.next();
            if (streamAlertMessageObj.getStreamID().equals(input.getStreamID())) {

                streamAlertMessageCountRuleListSortedBuilder.setStreamID(streamAlertMessageObj.getStreamID());
                streamAlertMessageCountRuleListSortedBuilder.setConfigID(streamAlertMessageObj.getConfigID());
                streamAlertMessageCountRuleListSortedBuilder.setRuleID(streamAlertMessageObj.getRuleID());
                streamAlertMessageCountRuleListSortedBuilder
                        .setAlertTypeClassifier(streamAlertMessageObj.getAlertTypeClassifier());
                streamAlertMessageCountRuleListSortedBuilder
                        .setMessageCountCount(streamAlertMessageObj.getMessageCountCount());
                streamAlertMessageCountRuleListSortedBuilder
                        .setMessageCountGrace(streamAlertMessageObj.getMessageCountGrace());
                streamAlertMessageCountRuleListSortedBuilder.setTimeStamp(streamAlertMessageObj.getTimeStamp());
                streamAlertMessageCountRuleListSortedBuilder
                        .setMessageCountBacklog(streamAlertMessageObj.getMessageCountBacklog());

                streamAlertMessageCountRuleListSortedList.add(
                        (StreamAlertMessageCountRuleListSorted) streamAlertMessageCountRuleListSortedBuilder
                                .build());
                allAlertRuleOutputBuilder
                        .setStreamAlertMessageCountRuleListSorted(streamAlertMessageCountRuleListSortedList);

            }
        }

    } catch (InterruptedException | ExecutionException ex) {
        // TODO Auto-generated catch block
        ex.printStackTrace();
    }
    try {
        Optional<AlertFieldContentRuleRecord> alertFieldContentRuleRecord = alertFieldContentReadFuture.get();
        List<StreamAlertFieldContentRuleList> streamAlertRuleList = new ArrayList<StreamAlertFieldContentRuleList>();

        if (alertFieldContentRuleRecord.isPresent()) {
            streamAlertRuleList = alertFieldContentRuleRecord.get().getStreamAlertFieldContentRuleList();
        }
        java.util.Iterator<StreamAlertFieldContentRuleList> iterator = streamAlertRuleList.iterator();
        List<StreamAlertFieldContentRuleListSorted> streamAlertFieldContentRuleListSorted = new ArrayList<StreamAlertFieldContentRuleListSorted>();
        StreamAlertFieldContentRuleListSortedBuilder streamAlertFieldContentRuleListSortedBuilder = new StreamAlertFieldContentRuleListSortedBuilder();

        while (iterator.hasNext()) {
            StreamAlertFieldContentRuleList streamAlertFieldContentObj = iterator.next();

            if (streamAlertFieldContentObj.getStreamID().equals(input.getStreamID())) {
                streamAlertFieldContentRuleListSortedBuilder
                        .setConfigID(streamAlertFieldContentObj.getConfigID());
                streamAlertFieldContentRuleListSortedBuilder.setRuleID(streamAlertFieldContentObj.getRuleID());
                streamAlertFieldContentRuleListSortedBuilder
                        .setStreamID(streamAlertFieldContentObj.getStreamID());
                streamAlertFieldContentRuleListSortedBuilder
                        .setFieldContentBacklog(streamAlertFieldContentObj.getFieldContentBacklog());
                streamAlertFieldContentRuleListSortedBuilder.setFieldContentCompareToValue(
                        streamAlertFieldContentObj.getFieldContentCompareToValue());
                streamAlertFieldContentRuleListSortedBuilder
                        .setFieldContentGrace(streamAlertFieldContentObj.getFieldContentGrace());
                streamAlertFieldContentRuleListSortedBuilder
                        .setFieldContentField(streamAlertFieldContentObj.getFieldContentField());
                streamAlertFieldContentRuleListSorted.add(
                        (StreamAlertFieldContentRuleListSorted) streamAlertFieldContentRuleListSortedBuilder
                                .build());
                allAlertRuleOutputBuilder
                        .setStreamAlertFieldContentRuleListSorted(streamAlertFieldContentRuleListSorted);

            }
        }
    } catch (InterruptedException | ExecutionException ex) {
        ex.printStackTrace();
    }
    try {
        Optional<AlertFieldValueRuleRecord> alertFieldValueRuleRecord = alertFieldValueReadFuture.get();
        List<StreamAlertFieldValueRuleList> streamAlertRuleList = new ArrayList<StreamAlertFieldValueRuleList>();

        if (alertFieldValueRuleRecord.isPresent()) {
            streamAlertRuleList = alertFieldValueRuleRecord.get().getStreamAlertFieldValueRuleList();

        }

        java.util.Iterator<StreamAlertFieldValueRuleList> iterator = streamAlertRuleList.iterator();
        List<StreamAlertFieldValueRuleListSorted> streamAlertFieldValueRuleListSorted = new ArrayList<StreamAlertFieldValueRuleListSorted>();
        StreamAlertFieldValueRuleListSortedBuilder streamAlertFieldValueRuleListSortedBuilder = new StreamAlertFieldValueRuleListSortedBuilder();

        while (iterator.hasNext()) {
            StreamAlertFieldValueRuleList streamAlertFieldValueRuleObj = iterator.next();
            if (streamAlertFieldValueRuleObj.getStreamID().equals(input.getStreamID())) {
                streamAlertFieldValueRuleListSortedBuilder
                        .setConfigID(streamAlertFieldValueRuleObj.getConfigID());
                streamAlertFieldValueRuleListSortedBuilder.setRuleID(streamAlertFieldValueRuleObj.getRuleID());
                streamAlertFieldValueRuleListSortedBuilder
                        .setStreamID(streamAlertFieldValueRuleObj.getStreamID());
                streamAlertFieldValueRuleListSortedBuilder
                        .setFieldValueBacklog(streamAlertFieldValueRuleObj.getFieldValueBacklog());
                streamAlertFieldValueRuleListSortedBuilder
                        .setFieldValueField(streamAlertFieldValueRuleObj.getFieldValueField());
                streamAlertFieldValueRuleListSortedBuilder.setFieldValueThreshholdType(
                        streamAlertFieldValueRuleObj.getFieldValueThreshholdType());
                streamAlertFieldValueRuleListSortedBuilder
                        .setFieldValueThreshhold(streamAlertFieldValueRuleObj.getFieldValueThreshhold());
                streamAlertFieldValueRuleListSortedBuilder
                        .setFieldValueType(streamAlertFieldValueRuleObj.getFieldValueType());
                streamAlertFieldValueRuleListSortedBuilder
                        .setFieldValueGrace(streamAlertFieldValueRuleObj.getFieldValueGrace());
                streamAlertFieldValueRuleListSorted
                        .add((StreamAlertFieldValueRuleListSorted) streamAlertFieldValueRuleListSortedBuilder
                                .build());
                allAlertRuleOutputBuilder
                        .setStreamAlertFieldValueRuleListSorted(streamAlertFieldValueRuleListSorted);
            }
        }
        futureResult.set(
                RpcResultBuilder.<GetAllAlertRuleOutput>success(allAlertRuleOutputBuilder.build()).build());

    } catch (InterruptedException | ExecutionException ex) {
        ex.printStackTrace();
    }

    return futureResult;

}

From source file:com.afewmoreamps.JitCaskImpl.java

@Override
public ListenableFuture<PutResult> put(final byte[] key, final byte[] value, final boolean waitForSync) {
    if (key == null || value == null) {
        throw new IllegalArgumentException();
    }//from   ww  w.  j ava 2s  .  com

    final int uncompressedSize = key.length + value.length + MiniCask.HEADER_SIZE + 8;//8 is the length prefixes in the compressed entry for the key and value
    /*
     * Record when the put started
     */
    final long start = System.currentTimeMillis();

    /*
     * This is the return value that will be set with the result
     * or any exceptions thrown during the put
     */
    final SettableFuture<PutResult> retval = SettableFuture.create();

    /*
     * If compression is requested, attempt to compress the value
     * and generate the CRC in a separate thread pool before submitting
     * to the single write thread. This allows parallelism for what is potentially
     * the more CPU intensive part of a write. Can't have more write
     * threads so best to scale out as far as possible before giving it work.
     */
    final ListenableFuture<Object[]> assembledEntryFuture = m_compressionThreads
            .submit(new Callable<Object[]>() {
                @Override
                public Object[] call() throws Exception {
                    return MiniCask.constructEntry(key, value);
                }
            });

    /*
     * Limit the maximum number of outstanding writes
     * to avoid OOM in naive benchmarks/applications
     */
    try {
        m_maxOutstandingWrites.acquire();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    retval.addListener(new Runnable() {
        @Override
        public void run() {
            m_maxOutstandingWrites.release();
        }
    }, MoreExecutors.sameThreadExecutor());

    /*
     * Submit the write to the single write thread
     * which will write the kv pair to the current file and
     * upsert it into the keydir. If sync was not requested
     * then the retval future will be set as soon as the write
     * thread writes the new kv pair to the memory mapped file (page cache).
     * Otherwise it adds a listener to the next sync task that will set the value
     * once sync has been performed.
     */
    m_writeThread.execute(new Runnable() {
        @Override
        public void run() {
            /*
             * Retrieve the compression results, forwarding any exceptions
             * to the retval future.
             */
            Object assembledEntry[];
            try {
                assembledEntry = assembledEntryFuture.get();
            } catch (Throwable t) {
                retval.setException(t);
                return;
            }

            final byte entryBytes[] = (byte[]) assembledEntry[0];
            final byte keyHash[] = (byte[]) assembledEntry[1];

            try {
                putImpl(entryBytes, keyHash, false);
            } catch (Throwable t) {
                retval.setException(t);
                return;
            }

            /*
             * If the put requested waiting for sync then don't set the retval future
             * immediately. Add a listener for the next sync task that will do it
             * once the data is really durable.
             *
             * Otherwise set it immediately and use the current time to reflect the latency
             * of the put
             */
            if (waitForSync) {
                final ListenableFuture<Long> syncTask = m_nextSyncTask;
                syncTask.addListener(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            retval.set(new PutResult(uncompressedSize, entryBytes.length,
                                    (int) (syncTask.get() - start)));
                        } catch (Throwable t) {
                            retval.setException(t);
                            return;
                        }
                    }
                }, MoreExecutors.sameThreadExecutor());
            } else {
                retval.set(new PutResult(uncompressedSize, entryBytes.length,
                        (int) (System.currentTimeMillis() - start)));
            }
        }
    });

    return retval;
}

From source file:org.opendaylight.ovsdb.plugin.ConfigurationService.java

/**
 * Create a Port Attached to a Bridge//from w w  w .ja  va 2  s .c  o m
 * Ex. ovs-vsctl add-port br0 vif0
 * @param node Node serving this configuration service
 * @param bridgeDomainIdentifier String representation of a Bridge Domain
 * @param portIdentifier String representation of a user defined Port Name
 */
@Override
public Status addPort(Node node, String bridgeIdentifier, String portIdentifier,
        Map<ConfigConstants, Object> configs) {
    try {
        if (connectionService == null) {
            logger.error("Couldn't refer to the ConnectionService");
            return new Status(StatusCode.NOSERVICE);
        }
        Connection connection = this.getConnection(node);
        if (connection == null) {
            return new Status(StatusCode.NOSERVICE, "Connection to ovsdb-server not available");
        }
        if (connection != null) {
            Map<String, Table<?>> brTable = inventoryServiceInternal.getTableCache(node, Bridge.NAME.getName());
            String newBridge = "new_bridge";
            String newInterface = "new_interface";
            String newPort = "new_port";

            if (brTable != null) {
                Operation addBrMutRequest = null;
                String brUuid = null;
                for (String uuid : brTable.keySet()) {
                    Bridge bridge = (Bridge) brTable.get(uuid);
                    if (bridge.getName().contains(bridgeIdentifier)) {
                        brUuid = uuid;
                    }
                }

                UUID brUuidPair = new UUID(newPort);
                Mutation bm = new Mutation("ports", Mutator.INSERT, brUuidPair);
                List<Mutation> mutations = new ArrayList<Mutation>();
                mutations.add(bm);

                UUID uuid = new UUID(brUuid);
                Condition condition = new Condition("_uuid", Function.EQUALS, uuid);
                List<Condition> where = new ArrayList<Condition>();
                where.add(condition);
                addBrMutRequest = new MutateOperation(Bridge.NAME.getName(), where, mutations);

                OvsDBMap<String, String> options = null;
                String type = null;
                OvsDBSet<BigInteger> tags = null;
                if (configs != null) {
                    type = (String) configs.get(ConfigConstants.TYPE);
                    Map<String, String> customConfigs = (Map<String, String>) configs
                            .get(ConfigConstants.CUSTOM);
                    if (customConfigs != null) {
                        options = new OvsDBMap<String, String>();
                        for (String customConfig : customConfigs.keySet()) {
                            options.put(customConfig, customConfigs.get(customConfig));
                        }
                    }
                }

                Interface interfaceRow = new Interface();
                interfaceRow.setName(portIdentifier);

                if (type != null) {
                    if (type.equalsIgnoreCase(OvsdbType.PortType.TUNNEL.name())) {
                        interfaceRow.setType((String) configs.get(ConfigConstants.TUNNEL_TYPE));
                        if (options == null)
                            options = new OvsDBMap<String, String>();
                        options.put("remote_ip", (String) configs.get(ConfigConstants.DEST_IP));
                    } else if (type.equalsIgnoreCase(OvsdbType.PortType.VLAN.name())) {
                        tags = new OvsDBSet<BigInteger>();
                        tags.add(BigInteger
                                .valueOf(Integer.parseInt((String) configs.get(ConfigConstants.VLAN))));
                    } else if (type.equalsIgnoreCase(OvsdbType.PortType.PATCH.name())) {
                        interfaceRow.setType(type.toLowerCase());
                    } else if (type.equalsIgnoreCase(OvsdbType.PortType.INTERNAL.name())) {
                        interfaceRow.setType(type.toLowerCase());
                    }
                }
                if (options != null) {
                    interfaceRow.setOptions(options);
                }

                InsertOperation addIntfRequest = new InsertOperation(Interface.NAME.getName(), newInterface,
                        interfaceRow);

                Port portRow = new Port();
                portRow.setName(portIdentifier);
                if (tags != null)
                    portRow.setTag(tags);
                OvsDBSet<UUID> interfaces = new OvsDBSet<UUID>();
                UUID interfaceid = new UUID(newInterface);
                interfaces.add(interfaceid);
                portRow.setInterfaces(interfaces);
                InsertOperation addPortRequest = new InsertOperation(Port.NAME.getName(), newPort, portRow);

                TransactBuilder transaction = new TransactBuilder();
                transaction.addOperations(new ArrayList<Operation>(
                        Arrays.asList(addBrMutRequest, addPortRequest, addIntfRequest)));

                ListenableFuture<List<OperationResult>> transResponse = connection.getRpc()
                        .transact(transaction);
                List<OperationResult> tr = transResponse.get();
                List<Operation> requests = transaction.getRequests();
                Status status = new Status(StatusCode.SUCCESS);
                for (int i = 0; i < tr.size(); i++) {
                    if (i < requests.size())
                        requests.get(i).setResult(tr.get(i));
                    if (tr.get(i).getError() != null && tr.get(i).getError().trim().length() > 0) {
                        OperationResult result = tr.get(i);
                        status = new Status(StatusCode.BADREQUEST,
                                result.getError() + " : " + result.getDetails());
                    }
                }

                if (tr.size() > requests.size()) {
                    OperationResult result = tr.get(tr.size() - 1);
                    logger.error("Error creating Bridge : {}\n Error : {}\n Details : {}", bridgeIdentifier,
                            result.getError(), result.getDetails());
                    status = new Status(StatusCode.BADREQUEST, result.getError() + " : " + result.getDetails());
                }
                return status;
            }
            return new Status(StatusCode.INTERNALERROR);
        }
    } catch (Exception e) {
        logger.error("Error in addPort()", e);
    }
    return new Status(StatusCode.INTERNALERROR);
}

From source file:eu.eubrazilcc.lvl.core.entrez.EntrezHelper.java

private void efetchGBSeqXMLFiles(final List<String> ids, final int retstart, final int retmax,
        final File directory) throws Exception {
    // save the bulk of files to a temporary file
    final File tmpFile = createTempFile("gb-", ".tmp", directory);
    final String idsParam = Joiner.on(",").skipNulls().join(ids);
    LOGGER.trace("Fetching " + ids.size() + " files from GenBank, retstart=" + retstart + ", retmax=" + retmax
            + ", file=" + tmpFile.getPath());
    httpClient.request(EFETCH_BASE_URI).post()
            .bodyForm(efetchForm(NUCLEOTIDE_DB, idsParam, retstart, retmax, "xml").build())
            .saveContent(tmpFile, true);
    final ListenableFuture<String[]> future = TASK_RUNNER.submit(new Callable<String[]>() {
        @Override//from ww  w  . jav  a2 s .  c o m
        public String[] call() throws Exception {
            final Set<String> files = newHashSet();
            final GBSet gbSet = GBSEQ_XMLB.typeFromFile(tmpFile);
            checkState(gbSet != null,
                    "Expected GBSeqXML, but no content read from temporary file downloaded with efetch");
            if (gbSet.getGBSeq() != null) {
                final List<GBSeq> gbSeqs = gbSet.getGBSeq();
                for (final GBSeq gbSeq : gbSeqs) {
                    final Integer gi = getGenInfoIdentifier(gbSeq);
                    if (gi != null) {
                        final File file = new File(directory, gi.toString() + ".xml");
                        GBSEQ_XMLB.typeToFile(gbSeq, file);
                        files.add(file.getCanonicalPath());
                    } else {
                        LOGGER.warn("Ingoring malformed sequence (gi not found) in efetch response");
                    }
                }
            } else {
                LOGGER.warn("Ingoring malformed sequence (GBSeq not found) in efetch response");
            }
            return files.toArray(new String[files.size()]);
        }
    });
    addCallback(future, new FutureCallback<String[]>() {
        @Override
        public void onSuccess(final String[] result) {
            LOGGER.info("One bulk sequence file was processed successfully: " + tmpFile.getName()
                    + ", number of created files: " + result.length);
            deleteQuietly(tmpFile);
        }

        @Override
        public void onFailure(final Throwable error) {
            LOGGER.error("Failed to process bulk sequence file " + tmpFile.getName(), error);
        }
    });
    // wait for files to be processed
    future.get();
}

From source file:org.voltdb.export.ExportGeneration.java

private void createAndRegisterAckMailboxes(final Set<Integer> localPartitions, HostMessenger messenger) {
    m_zk = messenger.getZK();/*  ww w.j av a 2s.co  m*/
    m_mailboxesZKPath = VoltZK.exportGenerations + "/" + m_timestamp + "/" + "mailboxes";

    m_mbox = new LocalMailbox(messenger) {
        @Override
        public void deliver(VoltMessage message) {
            if (message instanceof BinaryPayloadMessage) {
                BinaryPayloadMessage bpm = (BinaryPayloadMessage) message;
                ByteBuffer buf = ByteBuffer.wrap(bpm.m_payload);
                final int partition = buf.getInt();
                final int length = buf.getInt();
                byte stringBytes[] = new byte[length];
                buf.get(stringBytes);
                String signature = new String(stringBytes, Constants.UTF8ENCODING);
                final long ackUSO = buf.getLong();

                final HashMap<String, ExportDataSource> partitionSources = m_dataSourcesByPartition
                        .get(partition);
                if (partitionSources == null) {
                    exportLog.error("Received an export ack for partition " + partition
                            + " which does not exist on this node");
                    return;
                }

                final ExportDataSource eds = partitionSources.get(signature);
                if (eds == null) {
                    exportLog.error("Received an export ack for partition " + partition + " source signature "
                            + signature + " which does not exist on this node");
                    return;
                }

                try {
                    eds.ack(ackUSO);
                } catch (RejectedExecutionException ignoreIt) {
                    // ignore it: as it is already shutdown
                }
            } else {
                exportLog.error("Receive unexpected message " + message + " in export subsystem");
            }
        }
    };
    messenger.createMailbox(null, m_mbox);

    for (Integer partition : localPartitions) {
        final String partitionDN = m_mailboxesZKPath + "/" + partition;
        ZKUtil.asyncMkdirs(m_zk, partitionDN);

        ZKUtil.StringCallback cb = new ZKUtil.StringCallback();
        m_zk.create(partitionDN + "/" + m_mbox.getHSId(), null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, cb,
                null);
    }

    ListenableFuture<?> fut = m_childUpdatingThread.submit(new Runnable() {
        @Override
        public void run() {
            List<Pair<Integer, ZKUtil.ChildrenCallback>> callbacks = new ArrayList<Pair<Integer, ZKUtil.ChildrenCallback>>();
            for (Integer partition : localPartitions) {
                ZKUtil.ChildrenCallback callback = new ZKUtil.ChildrenCallback();
                m_zk.getChildren(m_mailboxesZKPath + "/" + partition, constructMailboxChildWatcher(), callback,
                        null);
                callbacks.add(Pair.of(partition, callback));
            }
            for (Pair<Integer, ZKUtil.ChildrenCallback> p : callbacks) {
                final Integer partition = p.getFirst();
                List<String> children = null;
                try {
                    children = p.getSecond().getChildren();
                } catch (InterruptedException e) {
                    Throwables.propagate(e);
                } catch (KeeperException e) {
                    Throwables.propagate(e);
                }
                ImmutableList.Builder<Long> mailboxes = ImmutableList.builder();

                for (String child : children) {
                    if (child.equals(Long.toString(m_mbox.getHSId())))
                        continue;
                    mailboxes.add(Long.valueOf(child));
                }
                ImmutableList<Long> mailboxHsids = mailboxes.build();

                for (ExportDataSource eds : m_dataSourcesByPartition.get(partition).values()) {
                    eds.updateAckMailboxes(Pair.of(m_mbox, mailboxHsids));
                }
            }
        }
    });
    try {
        fut.get();
    } catch (Throwable t) {
        Throwables.propagate(t);
    }

}

From source file:io.druid.query.ChainedExecutionQueryRunner.java

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final int priority = query.getContextPriority(0);

    return new BaseSequence<T, Iterator<T>>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {
        @Override/*from w w  w . java2  s.  c o  m*/
        public Iterator<T> make() {
            // Make it a List<> to materialize all of the values (so that it will submit everything to the executor)
            ListenableFuture<List<Iterable<T>>> futures = Futures.allAsList(Lists.newArrayList(Iterables
                    .transform(queryables, new Function<QueryRunner<T>, ListenableFuture<Iterable<T>>>() {
                        @Override
                        public ListenableFuture<Iterable<T>> apply(final QueryRunner<T> input) {
                            if (input == null) {
                                throw new ISE(
                                        "Null queryRunner! Looks to be some segment unmapping action happening");
                            }

                            return exec.submit(new AbstractPrioritizedCallable<Iterable<T>>(priority) {
                                @Override
                                public Iterable<T> call() throws Exception {
                                    try {
                                        Sequence<T> result = input.run(query, responseContext);
                                        if (result == null) {
                                            throw new ISE("Got a null result! Segments are missing!");
                                        }

                                        List<T> retVal = Sequences.toList(result, Lists.<T>newArrayList());
                                        if (retVal == null) {
                                            throw new ISE("Got a null list of results! WTF?!");
                                        }

                                        return retVal;
                                    } catch (QueryInterruptedException e) {
                                        throw Throwables.propagate(e);
                                    } catch (Exception e) {
                                        log.error(e, "Exception with one of the sequences!");
                                        throw Throwables.propagate(e);
                                    }
                                }
                            });
                        }
                    })));

            queryWatcher.registerQuery(query, futures);

            try {
                final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT, (Number) null);
                return new MergeIterable<>(ordering.nullsFirst(), timeout == null ? futures.get()
                        : futures.get(timeout.longValue(), TimeUnit.MILLISECONDS)).iterator();
            } catch (InterruptedException e) {
                log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
                futures.cancel(true);
                throw new QueryInterruptedException("Query interrupted");
            } catch (CancellationException e) {
                throw new QueryInterruptedException("Query cancelled");
            } catch (TimeoutException e) {
                log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
                futures.cancel(true);
                throw new QueryInterruptedException("Query timeout");
            } catch (ExecutionException e) {
                throw Throwables.propagate(e.getCause());
            }
        }

        @Override
        public void cleanup(Iterator<T> tIterator) {

        }
    });
}

From source file:eu.eubrazilcc.lvl.core.entrez.EntrezHelper.java

private void efetchPubmedXMLFiles(final List<String> ids, final int retstart, final int retmax,
        final File directory) throws Exception {
    // save the bulk of files to a temporary file
    final File tmpFile = createTempFile("pm-", ".tmp", directory);
    final String idsParam = Joiner.on(",").skipNulls().join(ids);
    LOGGER.trace("Fetching " + ids.size() + " files from PubMed, retstart=" + retstart + ", retmax=" + retmax
            + ", file=" + tmpFile.getPath());
    httpClient.request(EFETCH_BASE_URI).post()
            .bodyForm(efetchForm(PUBMED_DB, idsParam, retstart, retmax, "xml").build())
            .saveContent(tmpFile, true);
    final ListenableFuture<String[]> future = TASK_RUNNER.submit(new Callable<String[]>() {
        @Override//from ww w.  j  a va2  s. co m
        public String[] call() throws Exception {
            final Set<String> files = newHashSet();
            final PubmedArticleSet articleSet = PUBMED_XMLB.typeFromFile(tmpFile);
            checkState(articleSet != null,
                    "Expected PubMed article XML, but no content read from temporary file downloaded with efetch");
            if (articleSet.getPubmedArticle() != null) {
                final List<PubmedArticle> articles = articleSet.getPubmedArticle();
                for (final PubmedArticle article : articles) {
                    final String pmid = getPubMedId(article);
                    if (pmid != null) {
                        final File file = new File(directory, pmid + ".xml");
                        PUBMED_XMLB.typeToFile(article, file);
                        files.add(file.getCanonicalPath());
                    } else {
                        LOGGER.warn("Ingoring malformed article (pmid not found) in efetch response");
                    }
                }
            } else {
                LOGGER.warn("Ingoring malformed article (PubmedArticle not found) in efetch response");
            }
            return files.toArray(new String[files.size()]);
        }
    });
    addCallback(future, new FutureCallback<String[]>() {
        @Override
        public void onSuccess(final String[] result) {
            LOGGER.info("One bulk publication file was processed successfully: " + tmpFile.getName()
                    + ", number of created files: " + result.length);
            deleteQuietly(tmpFile);
        }

        @Override
        public void onFailure(final Throwable error) {
            LOGGER.error("Failed to process bulk publication file " + tmpFile.getName(), error);
        }
    });
    // wait for files to be processed
    future.get();
}

From source file:org.apache.qpid.server.model.AbstractConfiguredObject.java

protected final <R> R doSync(ListenableFuture<R> async) {
    try {/*from   w  ww .  ja  v  a  2  s.c  om*/
        return async.get();
    } catch (InterruptedException e) {
        throw new ServerScopedRuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof RuntimeException) {
            throw (RuntimeException) cause;
        } else if (cause instanceof Error) {
            throw (Error) cause;
        } else if (cause != null) {
            throw new ServerScopedRuntimeException(cause);
        } else {
            throw new ServerScopedRuntimeException(e);
        }

    }
}

From source file:eu.eubrazilcc.lvl.core.entrez.EntrezHelper.java

private void efetchFlatFiles(final List<String> ids, final int retstart, final int retmax, final File directory)
        throws Exception {
    // save the bulk of files to a temporary file
    final File tmpFile = createTempFile("gb-", ".tmp", directory);
    final String idsParam = Joiner.on(",").skipNulls().join(ids);
    LOGGER.trace("Fetching " + ids.size() + " files from GenBank, retstart=" + retstart + ", retmax=" + retmax
            + ", file=" + tmpFile.getPath());
    httpClient.request(EFETCH_BASE_URI).post()
            .bodyForm(efetchForm(NUCLEOTIDE_DB, idsParam, retstart, retmax, "text").build())
            .saveContent(tmpFile, true);
    // go over the file extracting the sequences
    final ListenableFuture<String[]> future = TASK_RUNNER.submit(new Callable<String[]>() {
        @Override/*w  w w .  j  a v a  2s .c o m*/
        public String[] call() throws Exception {
            final Set<String> files = newHashSet();
            final BufferedReader reader = newBufferedReader(tmpFile.toPath(), DEFAULT_CHARSET);
            int i = -1;
            File file = null;
            ByteSink sink = null;
            String line = null;
            while ((line = reader.readLine()) != null) {
                // start parsing a new fragment
                if (file == null && i < ids.size() - 1) {
                    file = new File(directory, ids.get(++i) + ".gb");
                    sink = asByteSink(file, FileWriteMode.APPEND);
                    LOGGER.info("Processing file: " + file.getCanonicalPath());
                }
                if (i < ids.size()) {
                    // write non-empty lines to the file
                    if (isNotBlank(line)) {
                        if (sink != null) {
                            sink.write((line + "\n").getBytes(DEFAULT_CHARSET));
                        } else {
                            LOGGER.warn("Ingoring line when all files were closed: " + line);
                        }
                    }
                    // process line
                    if (line.startsWith("VERSION    ")) {
                        checkState(line.contains(ids.get(i)), "Id not found in the VERSION section");
                    } else if (line.startsWith("//")) {
                        files.add(file.getCanonicalPath());
                        file = null;
                    }
                } else {
                    if (isNotBlank(line)) {
                        LOGGER.warn("Ingoring line after all sequences were processed: " + line);
                    }
                }
            }
            return files.toArray(new String[files.size()]);
        }
    });
    addCallback(future, new FutureCallback<String[]>() {
        @Override
        public void onSuccess(final String[] result) {
            LOGGER.info("One bulk sequence file was processed successfully: " + tmpFile.getName()
                    + ", number of created files: " + result.length);
            deleteQuietly(tmpFile);
        }

        @Override
        public void onFailure(final Throwable error) {
            LOGGER.error("Failed to process bulk sequence file " + tmpFile.getName(), error);
        }
    });
    // wait for files to be processed
    future.get();
}

From source file:io.druid.query.lookup.KafkaLookupExtractorFactory.java

@Override
public boolean start() {
    synchronized (started) {
        if (started.get()) {
            LOG.warn("Already started, not starting again");
            return started.get();
        }/*from   ww w  .  jav a 2 s.  c  o m*/
        if (executorService.isShutdown()) {
            LOG.warn("Already shut down, not starting again");
            return false;
        }
        final Properties kafkaProperties = new Properties();
        kafkaProperties.putAll(getKafkaProperties());
        if (kafkaProperties.containsKey("group.id")) {
            throw new IAE(
                    "Cannot set kafka property [group.id]. Property is randomly generated for you. Found [%s]",
                    kafkaProperties.getProperty("group.id"));
        }
        if (kafkaProperties.containsKey("auto.offset.reset")) {
            throw new IAE(
                    "Cannot set kafka property [auto.offset.reset]. Property will be forced to [smallest]. Found [%s]",
                    kafkaProperties.getProperty("auto.offset.reset"));
        }
        Preconditions.checkNotNull(kafkaProperties.getProperty("zookeeper.connect"),
                "zookeeper.connect required property");

        kafkaProperties.setProperty("group.id", factoryId);
        final String topic = getKafkaTopic();
        LOG.debug("About to listen to topic [%s] with group.id [%s]", topic, factoryId);
        final Map<String, String> map = cacheManager.getCacheMap(factoryId);
        mapRef.set(map);
        // Enable publish-subscribe
        kafkaProperties.setProperty("auto.offset.reset", "smallest");

        final CountDownLatch startingReads = new CountDownLatch(1);

        final ListenableFuture<?> future = executorService.submit(new Runnable() {
            @Override
            public void run() {
                while (!executorService.isShutdown()) {
                    consumerConnector = buildConnector(kafkaProperties);
                    try {
                        if (executorService.isShutdown()) {
                            break;
                        }

                        final List<KafkaStream<String, String>> streams = consumerConnector
                                .createMessageStreamsByFilter(new Whitelist(Pattern.quote(topic)), 1,
                                        DEFAULT_STRING_DECODER, DEFAULT_STRING_DECODER);

                        if (streams == null || streams.isEmpty()) {
                            throw new IAE("Topic [%s] had no streams", topic);
                        }
                        if (streams.size() > 1) {
                            throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
                        }
                        final KafkaStream<String, String> kafkaStream = streams.get(0);

                        startingReads.countDown();

                        for (final MessageAndMetadata<String, String> messageAndMetadata : kafkaStream) {
                            final String key = messageAndMetadata.key();
                            final String message = messageAndMetadata.message();
                            if (key == null || message == null) {
                                LOG.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
                                continue;
                            }
                            doubleEventCount.incrementAndGet();
                            map.put(key, message);
                            doubleEventCount.incrementAndGet();
                            LOG.trace("Placed key[%s] val[%s]", key, message);
                        }
                    } catch (Exception e) {
                        LOG.error(e, "Error reading stream for topic [%s]", topic);
                    } finally {
                        consumerConnector.shutdown();
                    }
                }
            }
        });
        Futures.addCallback(future, new FutureCallback<Object>() {
            @Override
            public void onSuccess(Object result) {
                LOG.debug("Success listening to [%s]", topic);
            }

            @Override
            public void onFailure(Throwable t) {
                if (t instanceof CancellationException) {
                    LOG.debug("Topic [%s] cancelled", topic);
                } else {
                    LOG.error(t, "Error in listening to [%s]", topic);
                }
            }
        }, MoreExecutors.sameThreadExecutor());
        this.future = future;
        final Stopwatch stopwatch = Stopwatch.createStarted();
        try {
            while (!startingReads.await(100, TimeUnit.MILLISECONDS) && connectTimeout > 0L) {
                // Don't return until we have actually connected
                if (future.isDone()) {
                    future.get();
                } else {
                    if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > connectTimeout) {
                        throw new TimeoutException("Failed to connect to kafka in sufficient time");
                    }
                }
            }
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            executorService.shutdown();
            if (!future.isDone() && !future.cancel(false)) {
                LOG.warn("Could not cancel kafka listening thread");
            }
            LOG.error(e, "Failed to start kafka extraction factory");
            cacheManager.delete(factoryId);
            return false;
        }

        started.set(true);
        return true;
    }
}