Example usage for org.apache.commons.lang3.tuple Pair getRight

List of usage examples for org.apache.commons.lang3.tuple Pair getRight

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair getRight.

Prototype

public abstract R getRight();

Source Link

Document

Gets the right element from this pair.

When treated as a key-value pair, this is the value.

Usage

From source file:io.prestosql.plugin.accumulo.index.ColumnCardinalityCache.java

/**
 * Gets the cardinality for each {@link AccumuloColumnConstraint}.
 * Given constraints are expected to be indexed! Who knows what would happen if they weren't!
 *
 * @param schema Schema name//from   w w w.  j a v a2 s . c  om
 * @param table Table name
 * @param auths Scan authorizations
 * @param idxConstraintRangePairs Mapping of all ranges for a given constraint
 * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete
 * @param pollingDuration Duration for polling the cardinality completion service
 * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest
 * @throws TableNotFoundException If the metrics table does not exist
 * @throws ExecutionException If another error occurs; I really don't even know anymore.
 */
public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table,
        Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs,
        long earlyReturnThreshold, Duration pollingDuration) {
    // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary
    CompletionService<Pair<Long, AccumuloColumnConstraint>> executor = new ExecutorCompletionService<>(
            executorService);
    idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> {
        long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(),
                value);
        LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality);
        return Pair.of(cardinality, key);
    }));

    // Create a multi map sorted by cardinality
    ListMultimap<Long, AccumuloColumnConstraint> cardinalityToConstraints = MultimapBuilder.treeKeys()
            .arrayListValues().build();
    try {
        boolean earlyReturn = false;
        int numTasks = idxConstraintRangePairs.asMap().entrySet().size();
        do {
            // Sleep for the polling duration to allow concurrent tasks to run for this time
            Thread.sleep(pollingDuration.toMillis());

            // Poll each task, retrieving the result if it is done
            for (int i = 0; i < numTasks; ++i) {
                Future<Pair<Long, AccumuloColumnConstraint>> futureCardinality = executor.poll();
                if (futureCardinality != null && futureCardinality.isDone()) {
                    Pair<Long, AccumuloColumnConstraint> columnCardinality = futureCardinality.get();
                    cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight());
                }
            }

            // If the smallest cardinality is present and below the threshold, set the earlyReturn flag
            Optional<Entry<Long, AccumuloColumnConstraint>> smallestCardinality = cardinalityToConstraints
                    .entries().stream().findFirst();
            if (smallestCardinality.isPresent()) {
                if (smallestCardinality.get().getKey() <= earlyReturnThreshold) {
                    LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish",
                            smallestCardinality);
                    earlyReturn = true;
                }
            }
        } while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks);
    } catch (ExecutionException | InterruptedException e) {
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e);
    }

    // Create a copy of the cardinalities
    return ImmutableMultimap.copyOf(cardinalityToConstraints);
}

From source file:com.facebook.presto.accumulo.index.ColumnCardinalityCache.java

/**
 * Gets the cardinality for each {@link AccumuloColumnConstraint}.
 * Given constraints are expected to be indexed! Who knows what would happen if they weren't!
 *
 * @param schema Schema name/*from   www .ja  v  a  2s  .c  om*/
 * @param table Table name
 * @param auths Scan authorizations
 * @param idxConstraintRangePairs Mapping of all ranges for a given constraint
 * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete
 * @param pollingDuration Duration for polling the cardinality completion service
 * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest
 * @throws TableNotFoundException If the metrics table does not exist
 * @throws ExecutionException If another error occurs; I really don't even know anymore.
 */
public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table,
        Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs,
        long earlyReturnThreshold, Duration pollingDuration) throws ExecutionException, TableNotFoundException {
    // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary
    CompletionService<Pair<Long, AccumuloColumnConstraint>> executor = new ExecutorCompletionService<>(
            executorService);
    idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> {
        long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(),
                value);
        LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality);
        return Pair.of(cardinality, key);
    }));

    // Create a multi map sorted by cardinality
    ListMultimap<Long, AccumuloColumnConstraint> cardinalityToConstraints = MultimapBuilder.treeKeys()
            .arrayListValues().build();
    try {
        boolean earlyReturn = false;
        int numTasks = idxConstraintRangePairs.asMap().entrySet().size();
        do {
            // Sleep for the polling duration to allow concurrent tasks to run for this time
            Thread.sleep(pollingDuration.toMillis());

            // Poll each task, retrieving the result if it is done
            for (int i = 0; i < numTasks; ++i) {
                Future<Pair<Long, AccumuloColumnConstraint>> futureCardinality = executor.poll();
                if (futureCardinality != null && futureCardinality.isDone()) {
                    Pair<Long, AccumuloColumnConstraint> columnCardinality = futureCardinality.get();
                    cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight());
                }
            }

            // If the smallest cardinality is present and below the threshold, set the earlyReturn flag
            Optional<Entry<Long, AccumuloColumnConstraint>> smallestCardinality = cardinalityToConstraints
                    .entries().stream().findFirst();
            if (smallestCardinality.isPresent()) {
                if (smallestCardinality.get().getKey() <= earlyReturnThreshold) {
                    LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish",
                            smallestCardinality);
                    earlyReturn = true;
                }
            }
        } while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks);
    } catch (ExecutionException | InterruptedException e) {
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e);
    }

    // Create a copy of the cardinalities
    return ImmutableMultimap.copyOf(cardinalityToConstraints);
}

From source file:com.minlia.cloud.framework.test.common.client.template.AbstractTestRestTemplate.java

final RequestSpecification givenReadExtendedAuthenticated() {
    final Pair<String, String> credentials = getReadExtendedCredentials();
    return auth.givenBasicAuthenticated(credentials.getLeft(), credentials.getRight());
}

From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2OnDemandCachingAgent.java

@Override
public OnDemandAgent.OnDemandResult handle(ProviderCache providerCache, Map<String, ?> data) {
    String account = (String) data.get("account");
    String namespace = (String) data.get("location");
    String fullName = (String) data.get("name");
    String name;//from w  w w.  ja va 2  s.c  om
    KubernetesKind kind;

    try {
        Pair<KubernetesKind, String> parsedName = KubernetesManifest.fromFullResourceName(fullName);
        kind = parsedName.getLeft();
        if (!primaryKinds().contains(kind)) {
            return null;
        }

        name = parsedName.getRight();
    } catch (Exception e) {
        // This is OK - the cache controller tries (w/o much info) to get every cache agent to handle each request
        return null;
    }

    reloadNamespaces();
    if (StringUtils.isEmpty(account) || StringUtils.isEmpty(name)
            || (!StringUtils.isEmpty(namespace) && !namespaces.contains(namespace))) {
        return null;
    }

    log.info("Accepted on demand refresh of '{}'", data);
    OnDemandAgent.OnDemandResult result;
    KubernetesManifest manifest = loadPrimaryResource(kind, namespace, name);
    String resourceKey = Keys.infrastructure(kind, account, namespace, name);
    try {
        result = manifest == null ? evictEntry(providerCache, kind, resourceKey)
                : addEntry(providerCache, resourceKey, manifest);
    } catch (Exception e) {
        log.error("Failed to process update of '{}'", resourceKey, e);
        return null;
    }

    log.info("On demand cache refresh of (data: {}) succeeded", data);
    return result;
}

From source file:com.epam.catgenome.manager.TrackHelper.java

/**
 * Sets fixed bounds for track//from w  ww. j  av  a  2  s.c  o m
 * @param track Track to fix bounds
 * @param bounds Pair of bounds to set
 */
public void setBounds(final Track track, final Pair<Integer, Integer> bounds) {
    if (bounds == null) {
        return;
    }

    if (track.getStartIndex() < bounds.getLeft()) {
        track.setStartIndex(bounds.getLeft());
    }
    if (track.getEndIndex() > bounds.getRight()) {
        track.setEndIndex(bounds.getRight());
    }
}

From source file:com.streamsets.pipeline.stage.origin.mongodb.oplog.MongoDBOplogSourceIT.java

@Test
public void testWithOnlyCmdFilter() throws Exception {
    MongoDBOplogSource source = new MongoDBOplogSourceBuilder()
            .connectionString("mongodb://" + mongoContainerIp + ":" + mongoContainerMappedPort)
            //Skip old oplogs and just start with whatever this test is producing
            .initialTs(getInitialTsFromCurrentTime()).initialOrdinal(0).collection(OPLOG_COLLECTION)
            //Just filter update oplogs
            .filterOlogOpTypeFilter(Collections.singletonList(OplogOpType.CMD)).initialTs(initialTs)
            .initialOrdinal(0).build();/*  w  w w .ja va2 s.  co m*/
    SourceRunner runner = new SourceRunner.Builder(MongoDBSource.class, source).addOutputLane(LANE).build();
    MongoCursor<Document> cursor = mongoCursorFindIterable
            .filter(Filters.and(Filters.gt(MongoDBOplogSource.TIMESTAMP_FIELD, new BsonTimestamp(initialTs, 0)),
                    Filters.eq(MongoDBOplogSource.OP_TYPE_FIELD, OplogOpType.CMD.getOp())))
            .iterator();
    runner.runInit();
    String offset = "";
    List<Record> records;
    try {
        //insert some testDocuments in collection1
        Document document1 = new Document("c", 1);
        Document document2 = new Document("c", 2);
        Document document3 = new Document("c", 3);
        Document document4 = new Document("c", 4);
        Document document5 = new Document("c", 5);
        Document document6 = new Document("c", 6);
        Document document7 = new Document("c", 7);

        testDocuments.insertMany(
                Arrays.asList(document1, document2, document3, document4, document5, document6, document7));

        //Delete two records
        DeleteResult deleteResult = testDocuments.deleteMany(Filters.gt("c", 5));
        Assert.assertEquals(2, deleteResult.getDeletedCount());

        //Update by Incrementing the field "c" by 1 for testDocuments 1, 2 and 3
        UpdateResult updateResult = testDocuments.updateMany(Filters.lt("c", 4),
                new Document("$inc", new Document("c", 1)));
        Assert.assertEquals(3, updateResult.getModifiedCount());

        //Now create bunch of collections, these are the only records we should see.
        int numberOfCollectionsToCreate = 5;
        for (int i = 0; i < numberOfCollectionsToCreate; i++) {
            mongoClient.getDatabase(DATABASE).createCollection(testCollectionName + "_" + i);
        }

        Pair<String, List<Record>> runOp = runSourceAndGetOp(runner, offset);
        records = runOp.getRight();
        //Only testDocuments with "CMD" op should be selected
        Assert.assertEquals(5, records.size());
        for (Record record : records) {
            checkRecord(record, cursor.tryNext());
            checkRecordForFields(record, OplogOpType.CMD.getOp(), DATABASE + ".$cmd");
        }
    } finally {
        runner.runDestroy();
        cursor.close();
    }
}

From source file:com.act.lcms.MS2.java

private double extractMZ(double mzWanted, List<Pair<Double, Double>> intensities) {
    double intensityFound = 0;
    int numWithinPrecision = 0;
    double mzLowRange = mzWanted - MS1_MZ_TOLERANCE;
    double mzHighRange = mzWanted + MS1_MZ_TOLERANCE;
    // we expect there to be pretty much only one intensity value in the precision
    // range we are looking at. But if a lot of masses show up then complain
    for (Pair<Double, Double> mz_int : intensities) {
        double mz = mz_int.getLeft();
        double intensity = mz_int.getRight();

        if (mz >= mzLowRange && mz <= mzHighRange) {
            intensityFound += intensity;
            numWithinPrecision++;//  w  w  w  .  ja  v a2 s  . com
        }
    }

    if (numWithinPrecision > MAX_MZ_IN_WINDOW) {
        System.out.format("Only expected %d, but found %d in the mz range [%f, %f]\n", MAX_MZ_IN_WINDOW,
                numWithinPrecision, mzLowRange, mzHighRange);
    }

    return intensityFound;
}

From source file:com.act.lcms.plotter.WriteAndPlotMS1Results.java

public void plotSpectra(Map<String, List<XZ>> ms1s, Double maxIntensity,
        Map<String, Double> individualMaxIntensities, Map<String, Double> metlinMzs, String outPrefix,
        String fmt, boolean makeHeatmap, boolean overlayPlots) throws IOException {

    String outImg = outPrefix + "." + fmt;
    String outData = outPrefix + ".data";

    // Write data output to outfile
    try (FileOutputStream out = new FileOutputStream(outData)) {
        List<Pair<String, String>> ionAndplotID = writeMS1Values(ms1s, maxIntensity, metlinMzs, out,
                makeHeatmap);/*  w  ww  .jav a2s. c  om*/

        // writeMS1Values picks an ordering of the plots.
        // create two new sets plotID and yMaxes that have the matching ordering
        // and contain plotNames, and yRanges respectively
        List<Double> yMaxesInSameOrderAsPlots = new ArrayList<>();
        List<String> plotID = new ArrayList<>();
        for (Pair<String, String> plot : ionAndplotID) {
            String ion = plot.getLeft();
            Double yMax = individualMaxIntensities.get(ion);
            yMaxesInSameOrderAsPlots.add(yMax);
            plotID.add(plot.getRight());
        }
        Double[] yMaxes = yMaxesInSameOrderAsPlots.toArray(new Double[yMaxesInSameOrderAsPlots.size()]);

        // render outDATA to outPDF using gnuplot
        Gnuplotter gp = new Gnuplotter();
        String[] plotNames = plotID.toArray(new String[plotID.size()]);

        if (makeHeatmap) {
            gp.plotHeatmap(outData, outImg, plotNames, maxIntensity, fmt);
        } else {
            if (!overlayPlots) {
                gp.plot2D(outData, outImg, plotNames, "time", maxIntensity, "intensity", fmt, null, null,
                        yMaxes, outImg + ".gnuplot");
            } else {
                gp.plotOverlayed2D(outData, outImg, plotNames, "time", maxIntensity, "intensity", fmt,
                        outImg + ".gnuplot");
            }
        }
    }
}

From source file:com.microsoft.azure.management.compute.implementation.VirtualMachineScaleSetMsiHelper.java

/**
 * Creates RBAC role assignments for the virtual machine scale set MSI service principal.
 *
 * @param scaleSet the virtual machine scale set
 * @return an observable that emits the created role assignments.
 *//*from  www.j  av  a2s  . c o  m*/
Observable<RoleAssignment> createMSIRbacRoleAssignmentsAsync(final VirtualMachineScaleSet scaleSet) {
    final Func0<Observable<RoleAssignment>> empty = new Func0<Observable<RoleAssignment>>() {
        @Override
        public Observable<RoleAssignment> call() {
            clear();
            return Observable.<RoleAssignment>empty();
        }
    };
    if (!requireSetup) {
        return empty.call();
    } else if (!scaleSet.isManagedServiceIdentityEnabled()) {
        // The principal id and tenant id needs to be set before performing role assignment
        return empty.call();
    } else if (this.rolesToAssign.isEmpty() && this.roleDefinitionsToAssign.isEmpty()) {
        return empty.call();
    } else {
        return rbacManager.servicePrincipals().getByIdAsync(scaleSet.inner().identity().principalId())
                .zipWith(resolveCurrentResourceGroupScopeAsync(scaleSet),
                        new Func2<ServicePrincipal, Boolean, ServicePrincipal>() {
                            @Override
                            public ServicePrincipal call(ServicePrincipal servicePrincipal,
                                    Boolean resolvedAny) {
                                return servicePrincipal;
                            }
                        })
                .flatMap(new Func1<ServicePrincipal, Observable<RoleAssignment>>() {
                    @Override
                    public Observable<RoleAssignment> call(final ServicePrincipal servicePrincipal) {
                        Observable<RoleAssignment> observable1 = Observable.from(rolesToAssign.values())
                                .flatMap(new Func1<Pair<String, BuiltInRole>, Observable<RoleAssignment>>() {
                                    @Override
                                    public Observable<RoleAssignment> call(
                                            Pair<String, BuiltInRole> scopeAndRole) {
                                        final BuiltInRole role = scopeAndRole.getRight();
                                        final String scope = scopeAndRole.getLeft();
                                        return createRbacRoleAssignmentIfNotExistsAsync(servicePrincipal,
                                                role.toString(), scope, true);
                                    }
                                });
                        Observable<RoleAssignment> observable2 = Observable
                                .from(roleDefinitionsToAssign.values())
                                .flatMap(new Func1<Pair<String, String>, Observable<RoleAssignment>>() {
                                    @Override
                                    public Observable<RoleAssignment> call(Pair<String, String> scopeAndRole) {
                                        final String roleDefinition = scopeAndRole.getRight();
                                        final String scope = scopeAndRole.getLeft();
                                        return createRbacRoleAssignmentIfNotExistsAsync(servicePrincipal,
                                                roleDefinition, scope, false);
                                    }
                                });
                        return Observable.mergeDelayError(observable1, observable2);
                    }
                }).doAfterTerminate(new Action0() {
                    @Override
                    public void call() {
                        clear();
                    }
                });
    }
}

From source file:act.installer.pubchem.PubchemTTLMergerTest.java

public List<String> getValForKey(
        Pair<RocksDB, Map<PubchemTTLMerger.COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles,
        PubchemTTLMerger.COLUMN_FAMILIES columnFamily, String key) throws Exception {
    RocksDB db = dbAndHandles.getLeft();
    String columnFamilyName = columnFamily.getName();
    ColumnFamilyHandle cfh = dbAndHandles.getRight().get(columnFamily);
    byte[] keyBytes = key.getBytes();
    byte[] valBytes = db.get(cfh, keyBytes);
    try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(valBytes))) {
        return (List<String>) ois.readObject();
    }/*www.  j a v  a 2 s  .com*/
}