Example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

List of usage examples for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair.

Prototype

public ImmutablePair(final L left, final R right) 

Source Link

Document

Create a new pair instance.

Usage

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

private CompletableFuture<ImmutablePair<Integer, Integer>> scaleCreateNewSegments(
        final List<SimpleEntry<Double, Double>> newRanges, final long scaleTimestamp, Data<T> segmentTable,
        final int activeEpoch) {
    final int nextSegmentNumber = TableHelper.getSegmentCount(segmentTable.getData());
    final byte[] updated = TableHelper.updateSegmentTable(nextSegmentNumber, segmentTable.getData(), newRanges,
            scaleTimestamp);//from w  w  w  .j a v  a2  s .  c  o m

    final Data<T> updatedData = new Data<>(updated, segmentTable.getVersion());

    return setSegmentTable(updatedData).thenApply(z -> new ImmutablePair<>(activeEpoch, nextSegmentNumber))
            .thenCompose(response -> updateState(State.SCALING).thenApply(x -> response));
}

From source file:com.pinterest.terrapin.controller.HdfsManagerTest.java

public void testRebalanceLoop(boolean lockLatestFileSet) throws Exception {
    List<String> resources = Lists.newArrayList();
    // This data set is locked if @lockLatestFileSet is true, otherwise the dataset is
    // from an orphaned job.
    resources.add(getResource(FILESET, 100));
    // This data set is from an orphaned job. This will not be deleted (since its new).
    resources.add(getResource(FILESET, 3600));
    // This is the current serving version. This will be rebalanced.
    resources.add(getResource(FILESET, 5400));
    // This is an older version, also in serving. This will be rebalanced.
    resources.add(getResource(FILESET, 6000));
    // This is an older version, in serving. This will be offlined.
    resources.add(getResource(FILESET, 7500));
    // This is an older version, not in serving/offlined. This will be deleted
    // but data on HDFS will remain.
    resources.add(getResource(FILESET, 8000));
    // No corresponding helix resource exists for this directly. It will be deleted.
    resources.add(getResource(FILESET, 8100));
    when(mockHelixAdmin.getResourcesInCluster(CLUSTER_NAME))
            .thenReturn(resources.subList(0, resources.size() - 1));

    FileSetInfo lockedFsInfo = null;//from  w ww .  j a  v  a2  s .c o m
    if (lockLatestFileSet) {
        lockedFsInfo = new FileSetInfo(FILESET, TerrapinUtil.helixResourceToHdfsDir(resources.get(0)), 2,
                Lists.<FileSetInfo.ServingInfo>newArrayList(), new Options().setNumVersionsToKeep(2));
    }
    FileSetInfo.ServingInfo oldVersionFsServingInfo = new FileSetInfo.ServingInfo(
            TerrapinUtil.helixResourceToHdfsDir(resources.get(3)), resources.get(2), 2,
            PartitionerType.MODULUS);
    FileSetInfo fsInfo = new FileSetInfo(FILESET, TerrapinUtil.helixResourceToHdfsDir(resources.get(2)), 2,
            Lists.newArrayList(oldVersionFsServingInfo), new Options().setNumVersionsToKeep(2));
    when(mockZkManager.getCandidateHdfsDirMap())
            .thenReturn((Map) ImmutableMap.of(FILESET, new ImmutablePair(fsInfo, lockedFsInfo)));
    setupBaseDirListing(Lists.newArrayList(FILESET));
    setupListingForFileSet(FILESET, resources, 2);

    // For the second resource, set up the block mapping.
    String servingResource = resources.get(2);
    String oldVersionResource = resources.get(3);
    String toBeOfflinedResource = resources.get(4);
    Map<Integer, List<String>> servingHdfsMapping = (Map) ImmutableMap.of(0, ImmutableList.of("host1", "host2"),
            1, ImmutableList.of("host2", "host3"));
    Map<Integer, List<String>> oldVersionHdfsMapping = (Map) ImmutableMap.of(0,
            ImmutableList.of("host1", "host2"), 1, ImmutableList.of("host2", "host3"));
    setupBlockLocations(servingResource, servingHdfsMapping);
    setupBlockLocations(oldVersionResource, oldVersionHdfsMapping);
    testRoutingTableProvider.setOnlinePartitionMap((Map) ImmutableMap.of(servingResource, ImmutableMap.of(
            TerrapinUtil.getViewPartitionName(servingResource, 0), ImmutableList.of(hostConfig1, hostConfig2),
            TerrapinUtil.getViewPartitionName(servingResource, 1), ImmutableList.of(hostConfig1, hostConfig2)),
            oldVersionResource,
            ImmutableMap.of(TerrapinUtil.getViewPartitionName(oldVersionResource, 0),
                    ImmutableList.of(hostConfig1, hostConfig2),
                    TerrapinUtil.getViewPartitionName(oldVersionResource, 1),
                    ImmutableList.of(hostConfig2, hostConfig3)),
            toBeOfflinedResource,
            ImmutableMap.of(TerrapinUtil.getViewPartitionName(toBeOfflinedResource, 0),
                    ImmutableList.of(hostConfig1, hostConfig2),
                    TerrapinUtil.getViewPartitionName(toBeOfflinedResource, 1),
                    ImmutableList.of(hostConfig1, hostConfig2))));
    hdfsManager.createAndGetRebalancer().reconcileAndRebalance();

    checkIdealStateModified(servingResource, servingHdfsMapping, 2);
    checkIdealStateModified(toBeOfflinedResource, (Map) Maps.newHashMap(), 2);
    for (String resource : resources) {
        // For other resources, confirm that we do not modify the ideal state.
        if (!resource.equals(toBeOfflinedResource) && !resource.equals(servingResource)) {
            checkIdealStateNotModified(resource);
        }
        // Confirm that we only retrieve HDFS block locations for serving versions.
        if (!resource.equals(servingResource) && !resource.equals(oldVersionResource)) {
            checkHdfsBlocksNotRetrieved(resource, 2);
        } else {
            checkHdfsBlocksRetrieved(resource, 2);
        }
    }
    // This resource has already been offlined, so data as well as the resource are cleaned up.
    String toBeDeletedResource = resources.get(5);
    // The corresponding resource does not exist but there is data on HDFS, this must be deleted.
    String orphanedDataResource = resources.get(6);
    for (String resource : resources) {
        if (resource.equals(toBeDeletedResource)) {
            checkResourceDeleted(resource);
            checkHdfsDataDeleted(resource);
            checkViewInfoDeleted(resource);
        } else if (resource.equals(orphanedDataResource)) {
            checkHdfsDataDeleted(resource);
            checkResourceNotDeleted(resource);
            checkViewInfoNotDeleted(resource);
        } else {
            checkResourceNotDeleted(resource);
            checkHdfsDataNotDeleted(resource);
            checkViewInfoNotDeleted(resource);
        }
    }
}

From source file:com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByOperatorService.java

/**
 * Given a group by result, return a group by result trimmed to provided size.
 * Sorting ordering is determined based on aggregation function.
 *
 * @param aggregationFunction/*from w  ww .  j a v a 2 s  .c om*/
 * @param aggregationGroupByResult
 * @param trimSize
 * @return
 */
private Map<String, Serializable> trimToSize(AggregationFunction aggregationFunction,
        Map<String, Serializable> aggregationGroupByResult, int trimSize) {

    boolean reverseOrder = aggregationFunction.getFunctionName().startsWith(MIN_PREFIX);
    MinMaxPriorityQueue<ImmutablePair<Serializable, String>> minMaxPriorityQueue = getMinMaxPriorityQueue(
            aggregationGroupByResult.values().iterator().next(), trimSize, reverseOrder);

    if (minMaxPriorityQueue == null) {
        return aggregationGroupByResult;
    }

    // The MinMaxPriorityQueue will add only the TOP N elements.
    for (String groupedKey : aggregationGroupByResult.keySet()) {
        minMaxPriorityQueue.add(new ImmutablePair(aggregationGroupByResult.get(groupedKey), groupedKey));
    }

    Map<String, Serializable> trimmedResult = new HashMap<>();
    ImmutablePair<Serializable, String> pair;
    while ((pair = (ImmutablePair) minMaxPriorityQueue.pollFirst()) != null) {
        trimmedResult.put(pair.getRight(), pair.getLeft());
    }
    return trimmedResult;
}

From source file:com.hortonworks.streamline.streams.metrics.storm.topology.StormTopologyMetricsImpl.java

private Map<String, ?> getComponentInfo(String topologyId, String componentId, String asUser) {
    LOG.debug("[START] getComponentInfo - topology id: {}, component id: {}, asUser: {}", topologyId,
            componentId, asUser);/*ww  w.j  a v a2  s . co m*/
    Stopwatch stopwatch = Stopwatch.createStarted();

    try {
        Map<String, ?> responseMap;
        try {
            responseMap = componentRetrieveCache
                    .get(new ImmutablePair<>(new ImmutablePair<>(topologyId, componentId), asUser));
        } catch (ExecutionException e) {
            if (e.getCause() != null) {
                throw new RuntimeException(e.getCause());
            } else {
                throw new RuntimeException(e);
            }

        } catch (UncheckedExecutionException e) {
            if (e.getCause() != null) {
                throw (RuntimeException) e.getCause();
            } else {
                throw new RuntimeException(e);
            }
        }

        LOG.debug("[END] getComponentInfo - topology id: {}, component id: {}, elapsed: {} ms", topologyId,
                componentId, stopwatch.elapsed(TimeUnit.MILLISECONDS));

        return responseMap;
    } finally {
        stopwatch.stop();
    }
}

From source file:jgnash.report.pdf.Report.java

/**
 * Writes a table section to the report.
 *
 * @param reportModel   report model/*from  ww w  .  j a  v  a  2 s . co m*/
 * @param group         report group
 * @param contentStream PDF content stream
 * @param startRow      starting row
 * @param columnWidths  column widths
 * @param yStart        start location from top of the page
 * @return returns the last reported row of the group and yDoc location
 * @throws IOException IO exception
 */
@SuppressWarnings("SuspiciousNameCombination")
private Pair<Integer, Float> addTableSection(final AbstractReportTableModel reportModel,
        @NotNull final String group, final PDPageContentStream contentStream, final int startRow,
        float[] columnWidths, float yStart) throws IOException {

    Objects.requireNonNull(group);

    int rowsWritten = 0; // the return value of the number of rows written

    // establish start location, use half the row height as the vertical margin between title and table
    final float yTop = (float) getPageFormat().getHeight() - getTableRowHeight() / 2 - yStart;

    float xPos = getLeftMargin() + getCellPadding();
    float yPos = yTop - getTableRowHeight() + getRowTextBaselineOffset();

    contentStream.setFont(getHeaderFont(), getBaseFontSize());

    // add the header
    contentStream.setNonStrokingColor(headerBackground);
    fillRect(contentStream, getLeftMargin(), yTop - getTableRowHeight(), getAvailableWidth(),
            getTableRowHeight());

    contentStream.setNonStrokingColor(headerTextColor);

    for (int i = 0; i < reportModel.getColumnCount(); i++) {
        if (reportModel.isColumnVisible(i)) {
            float shift = 0;
            float availWidth = columnWidths[i] - getCellPadding() * 2;

            final String text = truncateText(reportModel.getColumnName(i), availWidth, getHeaderFont(),
                    getBaseFontSize());

            if (rightAlign(i, reportModel)) {
                shift = availWidth - getStringWidth(text, getHeaderFont(), getBaseFontSize());
            }

            drawText(contentStream, xPos + shift, yPos, text);

            xPos += columnWidths[i];
        }
    }

    // add the rows
    contentStream.setFont(getTableFont(), getBaseFontSize());
    contentStream.setNonStrokingColor(Color.BLACK);

    int row = startRow;

    final float bottomMargin = getBottomMargin();

    while (yPos > bottomMargin + getTableRowHeight() && row < reportModel.getRowCount()) {

        final String rowGroup = reportModel.getGroup(row);

        if (group.equals(rowGroup)) {

            xPos = getLeftMargin() + getCellPadding();
            yPos -= getTableRowHeight();

            for (int i = 0; i < reportModel.getColumnCount(); i++) {

                if (reportModel.isColumnVisible(i)) {

                    final Object value = reportModel.getValueAt(row, i);

                    if (value != null) {
                        float shift = 0;
                        float availWidth = columnWidths[i] - getCellPadding() * 2;

                        final String text = truncateText(
                                formatValue(reportModel.getValueAt(row, i), i, reportModel), availWidth,
                                getTableFont(), getBaseFontSize());

                        if (rightAlign(i, reportModel)) {
                            shift = availWidth - getStringWidth(text, getTableFont(), getBaseFontSize());
                        }

                        drawText(contentStream, xPos + shift, yPos, text);
                    }

                    xPos += columnWidths[i];
                }
            }

            rowsWritten++;
        }
        row++;
    }

    // add row lines
    yPos = yTop;
    xPos = getLeftMargin();

    for (int r = 0; r <= rowsWritten + 1; r++) {
        drawLine(contentStream, xPos, yPos, getAvailableWidth() + getLeftMargin(), yPos);
        yPos -= getTableRowHeight();
    }

    // add column lines
    yPos = yTop;
    xPos = getLeftMargin();

    for (int i = 0; i < reportModel.getColumnCount(); i++) {
        if (reportModel.isColumnVisible(i)) {
            drawLine(contentStream, xPos, yPos, xPos, yPos - getTableRowHeight() * (rowsWritten + 1));
            xPos += columnWidths[i];
        }
    }

    // end of last column
    drawLine(contentStream, xPos, yPos, xPos, yPos - getTableRowHeight() * (rowsWritten + 1));

    float yDoc = (float) getPageFormat().getHeight() - (yPos - getTableRowHeight() * (rowsWritten + 1));

    // return the row and docY position
    return new ImmutablePair<>(row, yDoc);
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

private CompletableFuture<ImmutablePair<Integer, Integer>> isScaleRerun(final List<Integer> sealedSegments,
        final List<SimpleEntry<Double, Double>> newRanges, final Data<T> segmentTable,
        final Data<T> historyTable, final int activeEpoch) {
    int nextSegmentNumber;
    if (TableHelper.isRerunOf(sealedSegments, newRanges, historyTable.getData(), segmentTable.getData())) {
        // rerun means segment table is already updated. No need to do anything
        nextSegmentNumber = TableHelper.getSegmentCount(segmentTable.getData()) - newRanges.size();
        return CompletableFuture.completedFuture(new ImmutablePair<>(activeEpoch, nextSegmentNumber));
    } else {/*from w w w. j  a va  2s  .  c o m*/
        return FutureHelpers.failedFuture(new ScaleOperationExceptions.ScaleStartException());
    }
}

From source file:eu.bittrade.libs.steemj.configuration.SteemJConfig.java

/**
 * Configure the connection to the Steem Node by providing the endpoint URI
 * and the SSL verification settings./*from   w w w.j  a  va2 s  .co m*/
 * 
 * @param endpointURI
 *            The URI of the node you want to connect to.
 * @param sslVerificationDisabled
 *            Define if SteemJ should verify the SSL certificate of the
 *            endpoint. This option will be ignored if the given
 *            <code>endpointURI</code> is using a non SSL protocol.
 * @throws URISyntaxException
 *             If the <code>endpointURI</code> is null.
 */
public void addEndpointURI(URI endpointURI, boolean sslVerificationDisabled) throws URISyntaxException {
    if (endpointURI == null) {
        throw new URISyntaxException("endpointURI", "The endpointURI can't be null.");
    }

    this.endpointURIs.add(new ImmutablePair<URI, Boolean>(endpointURI, sslVerificationDisabled));
}

From source file:com.knowprocess.bpm.bdd.BpmSpec.java

/**
 * Creates an immutable pair to specify a process variable.
 * //w w w.  j  a  va 2  s.co  m
 * @param varName
 * @param varValue
 * @return an immutable pair representing a scenario variable.
 */
public static ImmutablePair<String, Object> newPair(String varName, Object varValue) {
    return new ImmutablePair<String, Object>(varName, varValue);
}

From source file:eu.bittrade.libs.steemj.SteemJIT.java

/**
 * The the <code>false</code> case of the
 * {@link SteemJ#verifyAuthority(SignedTransaction)} method by testing a
 * transaction signed with the wrong key.
 * //from   w w  w  .j  av a2s .  c om
 * The positive case of this operation is tested by the operation tests.
 * 
 * @throws Exception
 *             If something went wrong.
 */
@Category({ IntegrationTest.class })
@Test(expected = SteemResponseException.class)
public void testVerifyAuthority() throws Exception {
    List<ImmutablePair<PrivateKeyType, String>> privateKeys = new ArrayList<>();

    privateKeys.add(
            new ImmutablePair<>(PrivateKeyType.ACTIVE, "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"));

    config.getPrivateKeyStorage().addAccount(new AccountName("dez1337"), privateKeys);

    Asset steemAmount = new Asset(1L, AssetSymbolType.STEEM);

    AccountName from = new AccountName("dez1337");
    AccountName to = new AccountName("steemj");
    String memo = "Test SteemJ";

    ArrayList<Operation> operations = new ArrayList<>();
    operations.add(new TransferOperation(from, to, steemAmount, memo));

    SignedTransaction signedTransaction = new SignedTransaction(REF_BLOCK_NUM, REF_BLOCK_PREFIX,
            new TimePointSec(EXPIRATION_DATE), operations, null);

    assertFalse(steemJ.verifyAuthority(signedTransaction));
}

From source file:io.pravega.controller.store.stream.tables.TableHelper.java

/**
 * Return the active epoch./*from w w w  .  j  av a  2 s .  c  o  m*/
 * @param historyTableData history table
 * @return active epoch
 */
public static Pair<Integer, List<Integer>> getActiveEpoch(byte[] historyTableData) {
    HistoryRecord historyRecord = HistoryRecord.readLatestRecord(historyTableData, true).get();
    return new ImmutablePair<>(historyRecord.getEpoch(), historyRecord.getSegments());
}