Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:com.google.devtools.build.android.AndroidResourceProcessor.java

@Nullable
public SymbolLoader loadResourceSymbolTable(List<SymbolFileProvider> libraries, String appPackageName,
        Path primaryRTxt, Multimap<String, SymbolLoader> libMap) throws IOException {
    // The reported availableProcessors may be higher than the actual resources
    // (on a shared system). On the other hand, a lot of the work is I/O, so it's not completely
    // CPU bound. As a compromise, divide by 2 the reported availableProcessors.
    int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2);
    ListeningExecutorService executorService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(numThreads));
    try (Closeable closeable = ExecutorServiceCloser.createWith(executorService)) {
        // Load the package names from the manifest files.
        Map<SymbolFileProvider, ListenableFuture<String>> packageJobs = new HashMap<>();
        for (final SymbolFileProvider lib : libraries) {
            packageJobs.put(lib, executorService.submit(new PackageParsingTask(lib.getManifest())));
        }/*from  w  ww .  jav a 2  s .  c o  m*/
        Map<SymbolFileProvider, String> packageNames = new HashMap<>();
        try {
            for (Map.Entry<SymbolFileProvider, ListenableFuture<String>> entry : packageJobs.entrySet()) {
                packageNames.put(entry.getKey(), entry.getValue().get());
            }
        } catch (InterruptedException | ExecutionException e) {
            throw new IOException("Failed to load package name: ", e);
        }
        // Associate the packages with symbol files.
        for (SymbolFileProvider lib : libraries) {
            String packageName = packageNames.get(lib);
            // If the library package matches the app package skip -- the final app resource IDs are
            // stored in the primaryRTxt file.
            if (appPackageName.equals(packageName)) {
                continue;
            }
            File rFile = lib.getSymbolFile();
            // If the library has no resource, this file won't exist.
            if (rFile.isFile()) {
                SymbolLoader libSymbols = new SymbolLoader(rFile, stdLogger);
                libMap.put(packageName, libSymbols);
            }
        }
        // Even if there are no libraries, load fullSymbolValues, in case we only have resources
        // defined for the binary.
        File primaryRTxtFile = primaryRTxt.toFile();
        SymbolLoader fullSymbolValues = null;
        if (primaryRTxtFile.isFile()) {
            fullSymbolValues = new SymbolLoader(primaryRTxtFile, stdLogger);
        }
        // Now load the symbol files in parallel.
        List<ListenableFuture<?>> loadJobs = new ArrayList<>();
        Iterable<SymbolLoader> toLoad = fullSymbolValues != null
                ? Iterables.concat(libMap.values(), ImmutableList.of(fullSymbolValues))
                : libMap.values();
        for (final SymbolLoader loader : toLoad) {
            loadJobs.add(executorService.submit(new SymbolLoadingTask(loader)));
        }
        try {
            Futures.allAsList(loadJobs).get();
        } catch (InterruptedException | ExecutionException e) {
            throw new IOException("Failed to load SymbolFile: ", e);
        }
        return fullSymbolValues;
    }
}

From source file:com.facebook.buck.distributed.build_client.BuildPhase.java

@VisibleForTesting
ListenableFuture<List<BuildSlaveStatus>> fetchBuildSlaveStatusesAsync(BuildJob job,
        ListeningExecutorService networkExecutorService) {
    if (!job.isSetBuildSlaves()) {
        return Futures.immediateFuture(ImmutableList.of());
    }//from w  w  w  .  j  a va2s  .  c  om

    StampedeId stampedeId = job.getStampedeId();
    List<ListenableFuture<Optional<BuildSlaveStatus>>> slaveStatusFutures = new LinkedList<>();

    // TODO(shivanker, alisdair): Replace this with a multiFetch request.
    for (BuildSlaveInfo info : job.getBuildSlaves()) {
        BuildSlaveRunId runId = info.getBuildSlaveRunId();
        slaveStatusFutures.add(
                networkExecutorService.submit(() -> distBuildService.fetchBuildSlaveStatus(stampedeId, runId)));
    }

    return Futures.transform(
            Futures.allAsList(slaveStatusFutures), slaveStatusList -> slaveStatusList.stream()
                    .filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList()),
            MoreExecutors.directExecutor());
}

From source file:com.microsoft.intellij.helpers.o365.Office365ManagerImpl.java

@Override
@NotNull//from  w  w  w.j  a  va  2  s . c  om
public ListenableFuture<List<ServicePrincipal>> addServicePrincipals(
        @NotNull final List<ServicePrincipal> servicePrincipals) {

    return requestFutureWithToken(new RequestCallback<ListenableFuture<List<ServicePrincipal>>>() {
        @Override
        public ListenableFuture<List<ServicePrincipal>> execute() throws Throwable {
            List<ListenableFuture<ServicePrincipal>> futures = Lists.transform(servicePrincipals,
                    new Function<ServicePrincipal, ListenableFuture<ServicePrincipal>>() {
                        @Override
                        public ListenableFuture<ServicePrincipal> apply(ServicePrincipal servicePrincipal) {
                            return getDirectoryClient().getservicePrincipals().add(servicePrincipal);
                        }
                    });

            return Futures.allAsList(futures);
        }
    });
}

From source file:org.apache.druid.indexing.kafka.IncrementalPublishingKafkaIndexTaskRunner.java

private TaskStatus runInternal(TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");

    startTime = DateTimes.nowUtc();//from w  w w.j  a v  a 2s  .  com
    status = Status.STARTING;
    this.toolbox = toolbox;

    if (!restoreSequences()) {
        final TreeMap<Integer, Map<Integer, Long>> checkpoints = getCheckPointsFromContext(toolbox, task);
        if (checkpoints != null) {
            Iterator<Entry<Integer, Map<Integer, Long>>> sequenceOffsets = checkpoints.entrySet().iterator();
            Map.Entry<Integer, Map<Integer, Long>> previous = sequenceOffsets.next();
            while (sequenceOffsets.hasNext()) {
                Map.Entry<Integer, Map<Integer, Long>> current = sequenceOffsets.next();
                sequences.add(new SequenceMetadata(previous.getKey(),
                        StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
                        previous.getValue(), current.getValue(), true));
                previous = current;
            }
            sequences.add(new SequenceMetadata(previous.getKey(),
                    StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
                    previous.getValue(), endOffsets, false));
        } else {
            sequences
                    .add(new SequenceMetadata(0, StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), 0),
                            ioConfig.getStartPartitions().getPartitionOffsetMap(), endOffsets, false));
        }
    }
    log.info("Starting with sequences:  %s", sequences);

    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(task.getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }

    runThread = Thread.currentThread();

    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(task.getDataSchema(),
            new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler().addMonitor(
            TaskRealtimeMetricsMonitorBuilder.build(task, fireDepartmentForMetrics, rowIngestionMeters));

    final String lookupTier = task.getContextValue(RealtimeIndexTask.CTX_KEY_LOOKUP_TIER);
    LookupNodeService lookupNodeService = lookupTier == null ? toolbox.getLookupNodeService()
            : new LookupNodeService(lookupTier);
    DiscoveryDruidNode discoveryDruidNode = new DiscoveryDruidNode(toolbox.getDruidNode(),
            DruidNodeDiscoveryProvider.NODE_TYPE_PEON, ImmutableMap.of(toolbox.getDataNodeService().getName(),
                    toolbox.getDataNodeService(), lookupNodeService.getName(), lookupNodeService));

    Throwable caughtExceptionOuter = null;
    try (final KafkaConsumer<byte[], byte[]> consumer = task.newConsumer()) {
        toolbox.getDataSegmentServerAnnouncer().announce();
        toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode);

        appenderator = task.newAppenderator(fireDepartmentMetrics, toolbox);
        driver = task.newDriver(appenderator, toolbox, fireDepartmentMetrics);

        final String topic = ioConfig.getStartPartitions().getTopic();

        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            // no persist has happened so far
            // so either this is a brand new task or replacement of a failed task
            Preconditions.checkState(
                    sequences.get(0).startOffsets.entrySet().stream()
                            .allMatch(partitionOffsetEntry -> Longs.compare(partitionOffsetEntry.getValue(),
                                    ioConfig.getStartPartitions().getPartitionOffsetMap()
                                            .get(partitionOffsetEntry.getKey())) >= 0),
                    "Sequence offsets are not compatible with start offsets of task");
            nextOffsets.putAll(sequences.get(0).startOffsets);
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final KafkaPartitions restoredNextPartitions = toolbox.getObjectMapper()
                    .convertValue(restoredMetadataMap.get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
            nextOffsets.putAll(restoredNextPartitions.getPartitionOffsetMap());

            // Sanity checks.
            if (!restoredNextPartitions.getTopic().equals(ioConfig.getStartPartitions().getTopic())) {
                throw new ISE("WTF?! Restored topic[%s] but expected topic[%s]",
                        restoredNextPartitions.getTopic(), ioConfig.getStartPartitions().getTopic());
            }

            if (!nextOffsets.keySet().equals(ioConfig.getStartPartitions().getPartitionOffsetMap().keySet())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets.keySet(),
                        ioConfig.getStartPartitions().getPartitionOffsetMap().keySet());
            }
            // sequences size can be 0 only when all sequences got published and task stopped before it could finish
            // which is super rare
            if (sequences.size() == 0 || sequences.get(sequences.size() - 1).isCheckpointed()) {
                this.endOffsets.putAll(sequences.size() == 0 ? nextOffsets
                        : sequences.get(sequences.size() - 1).getEndOffsets());
                log.info("End offsets changed to [%s]", endOffsets);
            }
        }

        // Set up committer.
        final Supplier<Committer> committerSupplier = () -> {
            final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);
            lastPersistedOffsets.clear();
            lastPersistedOffsets.putAll(snapshot);

            return new Committer() {
                @Override
                public Object getMetadata() {
                    return ImmutableMap.of(METADATA_NEXT_PARTITIONS,
                            new KafkaPartitions(ioConfig.getStartPartitions().getTopic(), snapshot));
                }

                @Override
                public void run() {
                    // Do nothing.
                }
            };
        };

        // restart publishing of sequences (if any)
        maybePersistAndPublishSequences(committerSupplier);

        Set<Integer> assignment = assignPartitionsAndSeekToNext(consumer, topic);

        ingestionState = IngestionState.BUILD_SEGMENTS;

        // Main loop.
        // Could eventually support leader/follower mode (for keeping replicas more in sync)
        boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        Throwable caughtExceptionInner = null;
        try {
            while (stillReading) {
                if (possiblyPause()) {
                    // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
                    // partitions upon resuming. This is safe even if the end offsets have not been modified.
                    assignment = assignPartitionsAndSeekToNext(consumer, topic);

                    if (assignment.isEmpty()) {
                        log.info("All partitions have been fully read");
                        publishOnStop.set(true);
                        stopRequested.set(true);
                    }
                }

                // if stop is requested or task's end offset is set by call to setEndOffsets method with finish set to true
                if (stopRequested.get() || sequences.get(sequences.size() - 1).isCheckpointed()) {
                    status = Status.PUBLISHING;
                    break;
                }

                if (backgroundThreadException != null) {
                    throw new RuntimeException(backgroundThreadException);
                }

                checkPublishAndHandoffFailure();

                maybePersistAndPublishSequences(committerSupplier);

                // The retrying business is because the KafkaConsumer throws OffsetOutOfRangeException if the seeked-to
                // offset is not present in the topic-partition. This can happen if we're asking a task to read from data
                // that has not been written yet (which is totally legitimate). So let's wait for it to show up.
                ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
                try {
                    records = consumer.poll(KafkaIndexTask.POLL_TIMEOUT_MILLIS);
                } catch (OffsetOutOfRangeException e) {
                    log.warn("OffsetOutOfRangeException with message [%s]", e.getMessage());
                    possiblyResetOffsetsOrWait(e.offsetOutOfRangePartitions(), consumer, toolbox);
                    stillReading = !assignment.isEmpty();
                }

                SequenceMetadata sequenceToCheckpoint = null;
                for (ConsumerRecord<byte[], byte[]> record : records) {
                    log.trace("Got topic[%s] partition[%d] offset[%,d].", record.topic(), record.partition(),
                            record.offset());

                    if (record.offset() < endOffsets.get(record.partition())) {
                        if (record.offset() != nextOffsets.get(record.partition())) {
                            if (ioConfig.isSkipOffsetGaps()) {
                                log.warn("Skipped to offset[%,d] after offset[%,d] in partition[%d].",
                                        record.offset(), nextOffsets.get(record.partition()),
                                        record.partition());
                            } else {
                                throw new ISE("WTF?! Got offset[%,d] after offset[%,d] in partition[%d].",
                                        record.offset(), nextOffsets.get(record.partition()),
                                        record.partition());
                            }
                        }

                        try {
                            final byte[] valueBytes = record.value();
                            final List<InputRow> rows = valueBytes == null
                                    ? Utils.nullableListOf((InputRow) null)
                                    : parser.parseBatch(ByteBuffer.wrap(valueBytes));
                            boolean isPersistRequired = false;

                            final SequenceMetadata sequenceToUse = sequences.stream()
                                    .filter(sequenceMetadata -> sequenceMetadata.canHandle(record)).findFirst()
                                    .orElse(null);

                            if (sequenceToUse == null) {
                                throw new ISE(
                                        "WTH?! cannot find any valid sequence for record with partition [%d] and offset [%d]. Current sequences: %s",
                                        record.partition(), record.offset(), sequences);
                            }

                            for (InputRow row : rows) {
                                if (row != null && task.withinMinMaxRecordTime(row)) {
                                    final AppenderatorDriverAddResult addResult = driver.add(row,
                                            sequenceToUse.getSequenceName(), committerSupplier,
                                            // skip segment lineage check as there will always be one segment
                                            // for combination of sequence and segment granularity.
                                            // It is necessary to skip it as the task puts messages polled from all the
                                            // assigned Kafka partitions into a single Druid segment, thus ordering of
                                            // messages among replica tasks across assigned partitions is not guaranteed
                                            // which may cause replica tasks to ask for segments with different interval
                                            // in different order which might cause SegmentAllocateAction to fail.
                                            true,
                                            // do not allow incremental persists to happen until all the rows from this batch
                                            // of rows are indexed
                                            false);

                                    if (addResult.isOk()) {
                                        // If the number of rows in the segment exceeds the threshold after adding a row,
                                        // move the segment out from the active segments of BaseAppenderatorDriver to make a new segment.
                                        if (addResult.isPushRequired(tuningConfig)
                                                && !sequenceToUse.isCheckpointed()) {
                                            sequenceToCheckpoint = sequenceToUse;
                                        }
                                        isPersistRequired |= addResult.isPersistRequired();
                                    } else {
                                        // Failure to allocate segment puts determinism at risk, bail out to be safe.
                                        // May want configurable behavior here at some point.
                                        // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                                        throw new ISE("Could not allocate segment for row with timestamp[%s]",
                                                row.getTimestamp());
                                    }

                                    if (addResult.getParseException() != null) {
                                        handleParseException(addResult.getParseException(), record);
                                    } else {
                                        rowIngestionMeters.incrementProcessed();
                                    }
                                } else {
                                    rowIngestionMeters.incrementThrownAway();
                                }
                            }
                            if (isPersistRequired) {
                                Futures.addCallback(driver.persistAsync(committerSupplier.get()),
                                        new FutureCallback<Object>() {
                                            @Override
                                            public void onSuccess(@Nullable Object result) {
                                                log.info("Persist completed with metadata [%s]", result);
                                            }

                                            @Override
                                            public void onFailure(Throwable t) {
                                                log.error("Persist failed, dying");
                                                backgroundThreadException = t;
                                            }
                                        });
                            }
                        } catch (ParseException e) {
                            handleParseException(e, record);
                        }

                        nextOffsets.put(record.partition(), record.offset() + 1);
                    }

                    if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition()))
                            && assignment.remove(record.partition())) {
                        log.info("Finished reading topic[%s], partition[%,d].", record.topic(),
                                record.partition());
                        KafkaIndexTask.assignPartitions(consumer, topic, assignment);
                        stillReading = !assignment.isEmpty();
                    }
                }

                if (System.currentTimeMillis() > nextCheckpointTime) {
                    sequenceToCheckpoint = sequences.get(sequences.size() - 1);
                }

                if (sequenceToCheckpoint != null && stillReading) {
                    Preconditions.checkArgument(
                            sequences.get(sequences.size() - 1).getSequenceName()
                                    .equals(sequenceToCheckpoint.getSequenceName()),
                            "Cannot checkpoint a sequence [%s] which is not the latest one, sequences %s",
                            sequenceToCheckpoint, sequences);
                    requestPause();
                    final CheckPointDataSourceMetadataAction checkpointAction = new CheckPointDataSourceMetadataAction(
                            task.getDataSource(), ioConfig.getTaskGroupId(),
                            task.getIOConfig().getBaseSequenceName(),
                            new KafkaDataSourceMetadata(
                                    new KafkaPartitions(topic, sequenceToCheckpoint.getStartOffsets())),
                            new KafkaDataSourceMetadata(new KafkaPartitions(topic, nextOffsets)));
                    if (!toolbox.getTaskActionClient().submit(checkpointAction)) {
                        throw new ISE("Checkpoint request with offsets [%s] failed, dying", nextOffsets);
                    }
                }
            }
            ingestionState = IngestionState.COMPLETED;
        } catch (Exception e) {
            // (1) catch all exceptions while reading from kafka
            caughtExceptionInner = e;
            log.error(e, "Encountered exception in run() before persisting.");
            throw e;
        } finally {
            log.info("Persisting all pending data");
            try {
                driver.persist(committerSupplier.get()); // persist pending data
            } catch (Exception e) {
                if (caughtExceptionInner != null) {
                    caughtExceptionInner.addSuppressed(e);
                } else {
                    throw e;
                }
            }
        }

        synchronized (statusLock) {
            if (stopRequested.get() && !publishOnStop.get()) {
                throw new InterruptedException("Stopping without publishing");
            }

            status = Status.PUBLISHING;
        }

        for (SequenceMetadata sequenceMetadata : sequences) {
            if (!publishingSequences.contains(sequenceMetadata.getSequenceName())) {
                // this is done to prevent checks in sequence specific commit supplier from failing
                sequenceMetadata.setEndOffsets(nextOffsets);
                sequenceMetadata.updateAssignments(nextOffsets);
                publishingSequences.add(sequenceMetadata.getSequenceName());
                // persist already done in finally, so directly add to publishQueue
                publishAndRegisterHandoff(sequenceMetadata);
            }
        }

        if (backgroundThreadException != null) {
            throw new RuntimeException(backgroundThreadException);
        }

        // Wait for publish futures to complete.
        Futures.allAsList(publishWaitList).get();

        // Wait for handoff futures to complete.
        // Note that every publishing task (created by calling AppenderatorDriver.publish()) has a corresponding
        // handoffFuture. handoffFuture can throw an exception if 1) the corresponding publishFuture failed or 2) it
        // failed to persist sequences. It might also return null if handoff failed, but was recoverable.
        // See publishAndRegisterHandoff() for details.
        List<SegmentsAndMetadata> handedOffList = Collections.emptyList();
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOffList = Futures.allAsList(handOffWaitList).get();
        } else {
            try {
                handedOffList = Futures.allAsList(handOffWaitList)
                        .get(tuningConfig.getHandoffConditionTimeout(), TimeUnit.MILLISECONDS);
            } catch (TimeoutException e) {
                // Handoff timeout is not an indexing failure, but coordination failure. We simply ignore timeout exception
                // here.
                log.makeAlert("Timed out after [%d] millis waiting for handoffs",
                        tuningConfig.getHandoffConditionTimeout()).addData("TaskId", task.getId()).emit();
            }
        }

        for (SegmentsAndMetadata handedOff : handedOffList) {
            log.info("Handoff completed for segments[%s] with metadata[%s].",
                    Joiner.on(", ")
                            .join(handedOff.getSegments().stream().map(DataSegment::getIdentifier)
                                    .collect(Collectors.toList())),
                    Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata"));
        }

        appenderator.close();
    } catch (InterruptedException | RejectedExecutionException e) {
        // (2) catch InterruptedException and RejectedExecutionException thrown for the whole ingestion steps including
        // the final publishing.
        caughtExceptionOuter = e;
        try {
            Futures.allAsList(publishWaitList).cancel(true);
            Futures.allAsList(handOffWaitList).cancel(true);
            if (appenderator != null) {
                appenderator.closeNow();
            }
        } catch (Exception e2) {
            e.addSuppressed(e2);
        }

        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException
                && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }

        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested.get()) {
            Thread.currentThread().interrupt();
            throw e;
        }

        log.info("The task was asked to stop before completing");
    } catch (Exception e) {
        // (3) catch all other exceptions thrown for the whole ingestion steps including the final publishing.
        caughtExceptionOuter = e;
        try {
            Futures.allAsList(publishWaitList).cancel(true);
            Futures.allAsList(handOffWaitList).cancel(true);
            if (appenderator != null) {
                appenderator.closeNow();
            }
        } catch (Exception e2) {
            e.addSuppressed(e2);
        }
        throw e;
    } finally {
        try {
            if (driver != null) {
                driver.close();
            }
            if (chatHandlerProvider.isPresent()) {
                chatHandlerProvider.get().unregister(task.getId());
            }

            toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
            toolbox.getDataSegmentServerAnnouncer().unannounce();
        } catch (Exception e) {
            if (caughtExceptionOuter != null) {
                caughtExceptionOuter.addSuppressed(e);
            } else {
                throw e;
            }
        }
    }

    toolbox.getTaskReportFileWriter().write(getTaskCompletionReports(null));
    return TaskStatus.success(task.getId());
}

From source file:org.hawkular.alerts.engine.impl.CassActionsServiceImpl.java

@Override
public int deleteActions(String tenantId, ActionsCriteria criteria) throws Exception {
    if (isEmpty(tenantId)) {
        throw new IllegalArgumentException("TenantId must be not null");
    }/*from   www  . jav  a  2s  .c  o  m*/
    if (null == criteria) {
        throw new IllegalArgumentException("Criteria must be not null");
    }

    List<Action> actionsToDelete = getActions(tenantId, criteria, null);
    if (actionsToDelete == null || actionsToDelete.isEmpty()) {
        return 0;
    }

    PreparedStatement deleteActionHistory = CassStatement.get(session, CassStatement.DELETE_ACTION_HISTORY);
    PreparedStatement deleteActionHistoryAction = CassStatement.get(session,
            CassStatement.DELETE_ACTION_HISTORY_ACTION);
    PreparedStatement deleteActionHistoryAlert = CassStatement.get(session,
            CassStatement.DELETE_ACTION_HISTORY_ALERT);
    PreparedStatement deleteActionHistoryCtime = CassStatement.get(session,
            CassStatement.DELETE_ACTION_HISTORY_CTIME);
    PreparedStatement deleteActionHistoryResult = CassStatement.get(session,
            CassStatement.DELETE_ACTION_HISTORY_RESULT);

    for (Action action : actionsToDelete) {
        List<ResultSetFuture> futures = new ArrayList<>();
        futures.add(session.executeAsync(deleteActionHistory.bind(action.getTenantId(),
                action.getActionPlugin(), action.getActionId(), action.getEvent().getId(), action.getCtime())));
        futures.add(session.executeAsync(deleteActionHistoryAction.bind(action.getTenantId(),
                action.getActionId(), action.getActionPlugin(), action.getEvent().getId(), action.getCtime())));
        futures.add(session.executeAsync(deleteActionHistoryAlert.bind(action.getTenantId(),
                action.getEvent().getId(), action.getActionPlugin(), action.getActionId(), action.getCtime())));
        futures.add(session.executeAsync(deleteActionHistoryCtime.bind(action.getTenantId(), action.getCtime(),
                action.getActionPlugin(), action.getActionId(), action.getEvent().getId())));
        futures.add(session.executeAsync(deleteActionHistoryResult.bind(action.getTenantId(),
                action.getResult(), action.getActionPlugin(), action.getActionId(), action.getEvent().getId(),
                action.getCtime())));
        Futures.allAsList(futures).get();
    }

    return actionsToDelete.size();
}

From source file:org.apache.brooklyn.core.entity.Entities.java

/**
 * Stops, destroys, and unmanages all apps in the given context, and then terminates the management context.
 * /* w w w .j a  v a  2s . c o  m*/
 * Apps will be stopped+destroyed+unmanaged concurrently, waiting for all to complete.
 */
public static void destroyAll(final ManagementContext mgmt) {
    if (mgmt instanceof NonDeploymentManagementContext) {
        // log here because it is easy for tests to destroyAll(app.getMgmtContext())
        // which will *not* destroy the mgmt context if the app has been stopped!
        log.warn("Entities.destroyAll invoked on non-deployment " + mgmt + " - not likely to have much effect! "
                + "(This usually means the mgmt context has been taken from an entity that has been destroyed. "
                + "To destroy other things on the management context ensure you keep a handle to the context "
                + "before the entity is destroyed, such as by creating the management context first.)");
    }
    if (!mgmt.isRunning())
        return;

    ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
    List<ListenableFuture<?>> futures = Lists.newArrayList();
    final AtomicReference<Exception> error = Atomics.newReference();
    try {
        log.debug("destroying all apps in " + mgmt + ": " + mgmt.getApplications());
        for (final Application app : mgmt.getApplications()) {
            futures.add(executor.submit(new Runnable() {
                public void run() {
                    log.debug("destroying app " + app + " (managed? " + isManaged(app) + "; mgmt is " + mgmt
                            + ")");
                    try {
                        destroy(app);
                        log.debug("destroyed app " + app + "; mgmt now " + mgmt);
                    } catch (Exception e) {
                        log.warn("problems destroying app " + app + " (mgmt now " + mgmt
                                + ", will rethrow at least one exception): " + e);
                        error.compareAndSet(null, e);
                    }
                }
            }));
        }
        Futures.allAsList(futures).get();

        for (Location loc : mgmt.getLocationManager().getLocations()) {
            destroyCatching(loc);
        }
        if (mgmt instanceof ManagementContextInternal) {
            ((ManagementContextInternal) mgmt).terminate();
        }
        if (error.get() != null)
            throw Exceptions.propagate(error.get());
    } catch (Exception e) {
        if (!mgmt.isRunning()) {
            // we've checked this above so it would only happen if a different thread stopped it;
            // this does happen sometimes e.g. in CliTest where the server shutdown occurs concurrently
            log.debug(
                    "Destroying apps gave an error, but mgmt context was concurrently stopped so not really a problem; swallowing (unless fatal): "
                            + e);
            Exceptions.propagateIfFatal(e);
        } else {
            throw Exceptions.propagate(e);
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:com.microsoftopentechnologies.intellij.helpers.o365.Office365RestAPIManager.java

@Override
public ListenableFuture<List<ServicePrincipal>> getO365ServicePrincipalsForApp(
        @NotNull final Application application) throws ParseException {
    return requestWithToken(new RequestCallback<List<ServicePrincipal>>() {
        @Override//from  w  ww  .j  av  a 2 s. c  o  m
        public ListenableFuture<List<ServicePrincipal>> execute() throws ParseException {
            @SuppressWarnings("unchecked")
            ListenableFuture<List<ServicePrincipal>>[] futures = new ListenableFuture[] {
                    getServicePrincipalsForApp(application), getServicePrincipalsForO365() };

            final String[] filterAppIds = new String[] { ServiceAppIds.SHARE_POINT, ServiceAppIds.EXCHANGE,
                    ServiceAppIds.AZURE_ACTIVE_DIRECTORY };

            return Futures.transform(Futures.allAsList(futures),
                    new AsyncFunction<List<List<ServicePrincipal>>, List<ServicePrincipal>>() {
                        @Override
                        public ListenableFuture<List<ServicePrincipal>> apply(
                                List<List<ServicePrincipal>> lists) throws Exception {
                            // According to Guava documentation for allAsList, the list of results is in the
                            // same order as the input list. So first we get the service principals for the app
                            // filtered for O365 and Graph service principals.
                            final List<ServicePrincipal> servicePrincipalsForApp = Lists.newArrayList(
                                    Iterables.filter(lists.get(0), new Predicate<ServicePrincipal>() {
                                        @Override
                                        public boolean apply(final ServicePrincipal servicePrincipal) {
                                            // we are only interested in O365 and Graph service principals
                                            return Iterators.any(Iterators.forArray(filterAppIds),
                                                    new Predicate<String>() {
                                                        @Override
                                                        public boolean apply(String appId) {
                                                            return appId.equals(servicePrincipal.getappId());
                                                        }
                                                    });
                                        }
                                    }));

                            // next we get the O365/graph service principals
                            final List<ServicePrincipal> servicePrincipalsForO365 = lists.get(1);

                            // then we add service principals from servicePrincipalsForO365 to servicePrincipalsForApp
                            // where the service principal is not available in the latter
                            Iterable<ServicePrincipal> servicePrincipalsToBeAdded = Iterables
                                    .filter(servicePrincipalsForO365, new Predicate<ServicePrincipal>() {
                                        @Override
                                        public boolean apply(ServicePrincipal servicePrincipal) {
                                            return !servicePrincipalsForApp.contains(servicePrincipal);
                                        }
                                    });
                            Iterables.addAll(servicePrincipalsForApp, servicePrincipalsToBeAdded);

                            // assign the appid to the service principal and reset permissions on new service principals;
                            // we do Lists.newArrayList calls below to create a copy of the service lists because Lists.transform
                            // invokes the transformation function lazily and this causes problems for us; we force immediate
                            // evaluation of our transfomer by copying the elements to a new list
                            List<ServicePrincipal> servicePrincipals = Lists
                                    .newArrayList(Lists.transform(servicePrincipalsForApp,
                                            new Function<ServicePrincipal, ServicePrincipal>() {
                                                @Override
                                                public ServicePrincipal apply(
                                                        ServicePrincipal servicePrincipal) {
                                                    if (!servicePrincipal.getappId()
                                                            .equals(application.getappId())) {
                                                        servicePrincipal.setappId(application.getappId());
                                                        servicePrincipal.setoauth2Permissions(
                                                                Lists.newArrayList(Lists.transform(
                                                                        servicePrincipal.getoauth2Permissions(),
                                                                        new Function<OAuth2Permission, OAuth2Permission>() {
                                                                            @Override
                                                                            public OAuth2Permission apply(
                                                                                    OAuth2Permission oAuth2Permission) {
                                                                                oAuth2Permission
                                                                                        .setisEnabled(false);
                                                                                return oAuth2Permission;
                                                                            }
                                                                        })));
                                                    }

                                                    return servicePrincipal;
                                                }
                                            }));

                            return Futures.immediateFuture(servicePrincipals);
                        }
                    });
        }
    });
}

From source file:com.b2international.snowowl.snomed.api.impl.SnomedMergeReviewServiceImpl.java

private Set<ISnomedBrowserMergeReviewDetail> getConceptDetails(final MergeReview mergeReview,
        final List<ExtendedLocale> extendedLocales) throws InterruptedException, ExecutionException {
    final String sourcePath = mergeReview.sourcePath();
    final String targetPath = mergeReview.targetPath();

    final Set<String> filteredConceptIds = getFilteredMergeReviewIntersection(mergeReview);

    final List<ListenableFuture<ISnomedBrowserMergeReviewDetail>> changeFutures = Lists.newArrayList();
    final MergeReviewParameters parameters = new MergeReviewParameters(sourcePath, targetPath, extendedLocales,
            mergeReview.id());// w w w.j a  v  a2 s.c o  m

    for (final String conceptId : filteredConceptIds) {
        changeFutures.add(executorService.submit(new ComputeMergeReviewCallable(conceptId, parameters)));
    }

    // Filter out all irrelevant detail objects
    final List<ISnomedBrowserMergeReviewDetail> changes = Futures.allAsList(changeFutures).get();
    final Set<ISnomedBrowserMergeReviewDetail> relevantChanges = changes.stream()
            .filter(change -> change != SKIP_DETAIL).collect(toSet());

    LOG.debug("Merge review {} count: {} initial, {} filtered", mergeReview.id(), changes.size(),
            relevantChanges.size());

    return relevantChanges;
}

From source file:org.opendaylight.vbd.impl.VbdBridgeDomain.java

private ListenableFuture<Void> addVxlanTunnel(final NodeId sourceNode) {
    final KeyedInstanceIdentifier<Node, NodeKey> iiToSrcVpp = nodesToVpps.get(sourceNode).iterator().next();
    List<ListenableFuture<Void>> cumulativeTask = new ArrayList<>();

    LOG.debug("adding tunnel to vpp node {} (vbd node is {})", PPrint.node(iiToSrcVpp), sourceNode.getValue());
    for (final NodeId dstNode : getNodePeers(sourceNode)) {
        List<ListenableFuture<Void>> perPeerTask = new ArrayList<>();
        final KeyedInstanceIdentifier<Node, NodeKey> iiToDstVpp = nodesToVpps.get(dstNode).iterator().next();
        final Integer srcVxlanTunnelId = tunnelIdAllocator.nextIdFor(iiToSrcVpp);
        final Integer dstVxlanTunnelId = tunnelIdAllocator.nextIdFor(iiToDstVpp);
        final List<Ipv4AddressNoZone> endpoints = getTunnelEndpoints(iiToSrcVpp, iiToDstVpp);

        Preconditions.checkState(endpoints.size() == 2,
                "Got IP address list with wrong size (should be 2, actual size is " + endpoints.size() + ")");

        final Ipv4AddressNoZone ipAddressSrcVpp = endpoints.get(SOURCE_VPP_INDEX);
        final Ipv4AddressNoZone ipAddressDstVpp = endpoints.get(DESTINATION_VPP_INDEX);
        LOG.debug(//from   w  w w. ja v  a2  s  .c o  m
                "All required IP addresses for creating tunnel were obtained. (src: {} (node {}), dst: {} (node {}))",
                ipAddressSrcVpp.getValue(), sourceNode.getValue(), ipAddressDstVpp.getValue(),
                dstNode.getValue());

        String distinguisher = VbdUtil.deriveDistinguisher(config);
        // Termination Points
        LOG.debug("Adding term point to dst node {}", dstNode.getValue());
        perPeerTask
                .add(addTerminationPoint(topology.child(Node.class, new NodeKey(dstNode)), dstVxlanTunnelId));
        LOG.debug("Adding term point to src node {}", sourceNode.getValue());
        perPeerTask.add(
                addTerminationPoint(topology.child(Node.class, new NodeKey(sourceNode)), srcVxlanTunnelId));

        // Links between termination points
        perPeerTask.add(addLinkBetweenTerminationPoints(sourceNode, dstNode, srcVxlanTunnelId, dstVxlanTunnelId,
                distinguisher));
        perPeerTask.add(addLinkBetweenTerminationPoints(dstNode, sourceNode, srcVxlanTunnelId, dstVxlanTunnelId,
                distinguisher));

        // Virtual interfaces
        perPeerTask.add(vppModifier.createVirtualInterfaceOnVpp(ipAddressSrcVpp, ipAddressDstVpp, iiToSrcVpp,
                srcVxlanTunnelId));
        perPeerTask.add(vppModifier.createVirtualInterfaceOnVpp(ipAddressDstVpp, ipAddressSrcVpp, iiToDstVpp,
                dstVxlanTunnelId));

        final ListenableFuture<List<Void>> processedPerPeerTask = Futures.allAsList(perPeerTask);
        cumulativeTask.add(transform(processedPerPeerTask));
    }
    final ListenableFuture<List<Void>> processedCumulativeTask = Futures.allAsList(cumulativeTask);
    return transform(processedCumulativeTask);
}

From source file:org.hawkular.alerts.engine.impl.CassAlertsServiceImpl.java

private Set<String> filterByTriggers(String tenantId, AlertsCriteria criteria) throws Exception {
    Set<String> result = Collections.emptySet();
    Set<String> triggerIds = extractTriggerIds(tenantId, criteria);

    if (triggerIds.size() > 0) {
        PreparedStatement selectAlertsTriggers = CassStatement.get(session, CassStatement.SELECT_ALERT_TRIGGER);
        List<ResultSetFuture> futures = triggerIds.stream()
                .map(triggerId -> session.executeAsync(selectAlertsTriggers.bind(tenantId, triggerId)))
                .collect(Collectors.toList());
        List<ResultSet> rsAlertIdsByTriggerIds = Futures.allAsList(futures).get();

        Set<String> alertIds = new HashSet<>();
        rsAlertIdsByTriggerIds.stream().forEach(r -> {
            for (Row row : r) {
                String alertId = row.getString("alertId");
                alertIds.add(alertId);//from   w  ww.j  a v a  2s .co  m
            }
        });
        result = alertIds;
    }

    return result;
}