Example usage for org.apache.commons.lang3.tuple Pair getValue

List of usage examples for org.apache.commons.lang3.tuple Pair getValue

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair getValue.

Prototype

@Override
public R getValue() 

Source Link

Document

Gets the value from this pair.

This method implements the Map.Entry interface returning the right element as the value.

Usage

From source file:org.apache.bookkeeper.stream.storage.impl.sc.DefaultStorageContainerController.java

@Override
public ClusterAssignmentData computeIdealState(ClusterMetadata clusterMetadata,
        ClusterAssignmentData currentState, Set<BookieSocketAddress> currentCluster) {

    if (currentCluster.isEmpty()) {
        log.info("Current cluster is empty. No alive server is found.");
        return currentState;
    }//from  w ww  .  j  a  va  2 s.c o  m

    // 1. get current server assignments
    Map<BookieSocketAddress, Set<Long>> currentServerAssignments;
    try {
        currentServerAssignments = currentState.getServersMap().entrySet().stream()
                .collect(Collectors.toMap(e1 -> {
                    try {
                        return new BookieSocketAddress(e1.getKey());
                    } catch (UnknownHostException uhe) {
                        log.error("Invalid cluster ");
                        throw new UncheckedExecutionException(
                                "Invalid server found in current assignment map" + e1.getKey(), uhe);
                    }
                }, e2 -> e2.getValue().getContainersList().stream().collect(Collectors.toSet())));
    } catch (UncheckedExecutionException uee) {
        log.warn("Invalid cluster assignment data is found : {} - {}. Recompute assignment from empty state",
                currentState, uee.getCause().getMessage());
        currentServerAssignments = Maps.newHashMap();
    }
    Set<BookieSocketAddress> currentServersAssigned = currentServerAssignments.keySet();

    // 2. if no servers is assigned, initialize the ideal state
    if (currentServersAssigned.isEmpty()) {
        return initializeIdealState(clusterMetadata, currentCluster);
    }

    // 3. get the cluster diffs
    Set<BookieSocketAddress> serversAdded = Sets.difference(currentCluster, currentServersAssigned)
            .immutableCopy();
    Set<BookieSocketAddress> serversRemoved = Sets.difference(currentServersAssigned, currentCluster)
            .immutableCopy();

    if (serversAdded.isEmpty() && serversRemoved.isEmpty()) {
        // cluster is unchanged, assuming the current state is ideal, no re-assignment is required.
        return currentState;
    }

    log.info(
            "Storage container controller detects cluster changed:\n"
                    + "\t {} servers added: {}\n\t {} servers removed: {}",
            serversAdded.size(), serversAdded, serversRemoved.size(), serversRemoved);

    // 4. compute the containers that owned by servers removed. these containers are needed to be reassigned.
    Set<Long> containersToReassign = currentServerAssignments.entrySet().stream()
            .filter(serverEntry -> !currentCluster.contains(serverEntry.getKey()))
            .flatMap(serverEntry -> serverEntry.getValue().stream()).collect(Collectors.toSet());

    // 5. use an ordered set as priority deque to sort the servers by the number of assigned containers
    TreeSet<Pair<BookieSocketAddress, LinkedList<Long>>> assignmentQueue = new TreeSet<>(
            new ServerAssignmentDataComparator());
    for (Map.Entry<BookieSocketAddress, Set<Long>> entry : currentServerAssignments.entrySet()) {
        BookieSocketAddress host = entry.getKey();

        if (!currentCluster.contains(host)) {
            if (log.isTraceEnabled()) {
                log.trace("Host {} is not in current cluster anymore", host);
            }
            continue;
        } else {
            if (log.isTraceEnabled()) {
                log.trace("Adding host {} to assignment queue", host);
            }
            assignmentQueue.add(Pair.of(host, Lists.newLinkedList(entry.getValue())));
        }
    }

    // 6. add new servers
    for (BookieSocketAddress server : serversAdded) {
        assignmentQueue.add(Pair.of(server, Lists.newLinkedList()));
    }

    // 7. assign the containers that are needed to be reassigned.
    for (Long containerId : containersToReassign) {
        Pair<BookieSocketAddress, LinkedList<Long>> leastLoadedServer = assignmentQueue.pollFirst();
        leastLoadedServer.getValue().add(containerId);
        assignmentQueue.add(leastLoadedServer);
    }

    // 8. rebalance the containers if needed
    int diffAllowed;
    if (assignmentQueue.size() > clusterMetadata.getNumStorageContainers()) {
        diffAllowed = 1;
    } else {
        diffAllowed = clusterMetadata.getNumStorageContainers() % assignmentQueue.size() == 0 ? 0 : 1;
    }

    Pair<BookieSocketAddress, LinkedList<Long>> leastLoaded = assignmentQueue.first();
    Pair<BookieSocketAddress, LinkedList<Long>> mostLoaded = assignmentQueue.last();
    while (mostLoaded.getValue().size() - leastLoaded.getValue().size() > diffAllowed) {
        leastLoaded = assignmentQueue.pollFirst();
        mostLoaded = assignmentQueue.pollLast();

        // move container from mostLoaded to leastLoaded
        Long containerId = mostLoaded.getValue().removeFirst();
        // add the container to the end to avoid balancing this container again.
        leastLoaded.getValue().addLast(containerId);

        assignmentQueue.add(leastLoaded);
        assignmentQueue.add(mostLoaded);

        leastLoaded = assignmentQueue.first();
        mostLoaded = assignmentQueue.last();
    }

    // 9. the new ideal state is computed, finalize it
    Map<String, ServerAssignmentData> newAssignmentMap = Maps.newHashMap();
    assignmentQueue.forEach(assignment -> newAssignmentMap.put(assignment.getKey().toString(),
            ServerAssignmentData.newBuilder().addAllContainers(assignment.getValue()).build()));
    return ClusterAssignmentData.newBuilder().putAllServers(newAssignmentMap).build();
}

From source file:org.apache.drill.exec.compile.ClassTransformer.java

public Class<?> getImplementationClass(final QueryClassLoader classLoader,
        final TemplateClassDefinition<?> templateDefinition, final String entireClass,
        final String materializedClassName) throws ClassTransformationException {
    // unfortunately, this hasn't been set up at construction time, so we have to do it here
    final ScalarReplacementOption scalarReplacementOption = ScalarReplacementOption
            .fromString(optionManager.getOption(SCALAR_REPLACEMENT_VALIDATOR));

    try {/*  w  w  w.  j av a  2  s.  c  om*/
        final long t1 = System.nanoTime();
        final ClassSet set = new ClassSet(null, templateDefinition.getTemplateClassName(),
                materializedClassName);
        final byte[][] implementationClasses = classLoader.getClassByteCode(set.generated, entireClass);

        long totalBytecodeSize = 0;
        Map<String, Pair<byte[], ClassNode>> classesToMerge = Maps.newHashMap();
        for (byte[] clazz : implementationClasses) {
            totalBytecodeSize += clazz.length;
            final ClassNode node = AsmUtil.classFromBytes(clazz, ClassReader.EXPAND_FRAMES);
            if (!AsmUtil.isClassOk(logger, "implementationClasses", node)) {
                throw new IllegalStateException("Problem found with implementationClasses");
            }
            classesToMerge.put(node.name, Pair.of(clazz, node));
        }

        final LinkedList<ClassSet> names = Lists.newLinkedList();
        final Set<ClassSet> namesCompleted = Sets.newHashSet();
        names.add(set);

        while (!names.isEmpty()) {
            final ClassSet nextSet = names.removeFirst();
            if (namesCompleted.contains(nextSet)) {
                continue;
            }
            final ClassNames nextPrecompiled = nextSet.precompiled;
            final byte[] precompiledBytes = byteCodeLoader.getClassByteCodeFromPath(nextPrecompiled.clazz);
            final ClassNames nextGenerated = nextSet.generated;
            // keeps only classes that have not be merged
            Pair<byte[], ClassNode> classNodePair = classesToMerge.remove(nextGenerated.slash);
            final ClassNode generatedNode;
            if (classNodePair != null) {
                generatedNode = classNodePair.getValue();
            } else {
                generatedNode = null;
            }

            /*
             * TODO
             * We're having a problem with some cases of scalar replacement, but we want to get
             * the code in so it doesn't rot anymore.
             *
             *  Here, we use the specified replacement option. The loop will allow us to retry if
             *  we're using TRY.
             */
            MergedClassResult result = null;
            boolean scalarReplace = scalarReplacementOption != ScalarReplacementOption.OFF
                    && entireClass.length() < MAX_SCALAR_REPLACE_CODE_SIZE;
            while (true) {
                try {
                    result = MergeAdapter.getMergedClass(nextSet, precompiledBytes, generatedNode,
                            scalarReplace);
                    break;
                } catch (RuntimeException e) {
                    // if we had a problem without using scalar replacement, then rethrow
                    if (!scalarReplace) {
                        throw e;
                    }

                    // if we did try to use scalar replacement, decide if we need to retry or not
                    if (scalarReplacementOption == ScalarReplacementOption.ON) {
                        // option is forced on, so this is a hard error
                        throw e;
                    }

                    /*
                     * We tried to use scalar replacement, with the option to fall back to not using it.
                     * Log this failure before trying again without scalar replacement.
                     */
                    logger.info("scalar replacement failure (retrying)\n", e);
                    scalarReplace = false;
                }
            }

            for (String s : result.innerClasses) {
                s = s.replace(DrillFileUtils.SEPARATOR_CHAR, '.');
                names.add(nextSet.getChild(s));
            }
            classLoader.injectByteCode(nextGenerated.dot, result.bytes);
            namesCompleted.add(nextSet);
        }

        // adds byte code of the classes that have not been merged to make them accessible for outer class
        for (Map.Entry<String, Pair<byte[], ClassNode>> clazz : classesToMerge.entrySet()) {
            classLoader.injectByteCode(clazz.getKey().replace(DrillFileUtils.SEPARATOR_CHAR, '.'),
                    clazz.getValue().getKey());
        }
        Class<?> c = classLoader.findClass(set.generated.dot);
        if (templateDefinition.getExternalInterface().isAssignableFrom(c)) {
            logger.debug("Compiled and merged {}: bytecode size = {}, time = {} ms.", c.getSimpleName(),
                    DrillStringUtils.readable(totalBytecodeSize),
                    (System.nanoTime() - t1 + 500_000) / 1_000_000);
            return c;
        }

        throw new ClassTransformationException("The requested class did not implement the expected interface.");
    } catch (CompileException | IOException | ClassNotFoundException e) {
        throw new ClassTransformationException(
                String.format("Failure generating transformation classes for value: \n %s", entireClass), e);
    }
}

From source file:org.apache.drill.exec.store.ischema.InfoSchemaRecordGenerator.java

/**
 * Visit the tables in the given schema. The
 * @param  schemaPath  the path to the given schema
 * @param  schema  the given schema/*ww w.java  2s. c  o m*/
 */
public void visitTables(String schemaPath, SchemaPlus schema) {
    final AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class);
    final List<String> tableNames = Lists.newArrayList(schema.getTableNames());
    for (Pair<String, ? extends Table> tableNameToTable : drillSchema.getTablesByNames(tableNames)) {
        final String tableName = tableNameToTable.getKey();
        final Table table = tableNameToTable.getValue();
        // Visit the table, and if requested ...
        if (shouldVisitTable(schemaPath, tableName) && visitTable(schemaPath, tableName, table)) {
            // ... do for each of the table's fields.
            final RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl());
            for (RelDataTypeField field : tableRow.getFieldList()) {
                if (shouldVisitColumn(schemaPath, tableName, field.getName())) {
                    visitField(schemaPath, tableName, field);
                }
            }
        }
    }
}

From source file:org.apache.drill.exec.store.ischema.RecordGenerator.java

/**
 * Visit the tables in the given schema. The
 * @param  schemaPath  the path to the given schema
 * @param  schema  the given schema//from  ww w .  j av  a 2  s. co  m
 */
public void visitTables(String schemaPath, SchemaPlus schema) {
    final AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class);
    final List<String> tableNames = Lists.newArrayList(schema.getTableNames());
    for (Pair<String, ? extends Table> tableNameToTable : drillSchema.getTablesByNames(tableNames)) {
        final String tableName = tableNameToTable.getKey();
        final Table table = tableNameToTable.getValue();
        // Visit the table, and if requested ...
        if (shouldVisitTable(schemaPath, tableName) && visitTable(schemaPath, tableName, table)) {
            // ... do for each of the table's fields.
            final RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl());
            for (RelDataTypeField field : tableRow.getFieldList()) {
                visitField(schemaPath, tableName, field);
            }
        }
    }
}

From source file:org.apache.giraph.ooc.data.DiskBackedDataStore.java

/**
 * The proxy method that does the actual operation for `loadPartitionData`,
 * but uses the data index given by the caller.
 *
 * @param partitionId id of the partition to load and assemble all data for
 * @param index data index chain for the data to load
 * @return number of bytes loaded from disk to memory
 * @throws IOException/* ww w.ja  va 2s .  c  o  m*/
 */
protected long loadPartitionDataProxy(int partitionId, DataIndex index) throws IOException {
    long numBytes = 0;
    ReadWriteLock rwLock = getPartitionLock(partitionId);
    rwLock.writeLock().lock();
    if (hasPartitionDataOnDisk.contains(partitionId)) {
        int ioThreadId = oocEngine.getMetaPartitionManager().getOwnerThreadId(partitionId);
        numBytes += loadInMemoryPartitionData(partitionId, ioThreadId,
                index.addIndex(NumericIndexEntry.createPartitionEntry(partitionId)));
        hasPartitionDataOnDisk.remove(partitionId);
        // Loading raw data buffers from disk if there is any and applying those
        // to already loaded in-memory data.
        Integer numBuffers = numDataBuffersOnDisk.remove(partitionId);
        if (numBuffers != null) {
            checkState(numBuffers > 0);
            index.addIndex(DataIndex.TypeIndexEntry.BUFFER);
            OutOfCoreDataAccessor.DataInputWrapper inputWrapper = oocEngine.getDataAccessor()
                    .prepareInput(ioThreadId, index.copy());
            DataInput dataInput = inputWrapper.getDataInput();
            for (int i = 0; i < numBuffers; ++i) {
                T entry = readNextEntry(dataInput);
                addEntryToInMemoryPartitionData(partitionId, entry);
            }
            numBytes += inputWrapper.finalizeInput(true);
            index.removeLastIndex();
        }
        index.removeLastIndex();
        // Applying in-memory raw data buffers to in-memory partition data.
        Pair<Integer, List<T>> pair = dataBuffers.remove(partitionId);
        if (pair != null) {
            for (T entry : pair.getValue()) {
                addEntryToInMemoryPartitionData(partitionId, entry);
            }
        }
    }
    rwLock.writeLock().unlock();
    return numBytes;
}

From source file:org.apache.giraph.ooc.data.OutOfCoreDataManager.java

/**
 * Loads and assembles all data for a given partition, and put it into the
 * data store.//from  ww  w.  ja v  a  2  s . co  m
 *
 * @param partitionId id of the partition to load ana assemble all data for
 * @param basePath path to load the data from
 * @throws IOException
 */
public void loadPartitionData(int partitionId, String basePath) throws IOException {
    ReadWriteLock rwLock = getPartitionLock(partitionId);
    rwLock.writeLock().lock();
    if (hasPartitionDataOnDisk.contains(partitionId)) {
        loadInMemoryPartitionData(partitionId, getPath(basePath, partitionId));
        hasPartitionDataOnDisk.remove(partitionId);
        // Loading raw data buffers from disk if there is any and applying those
        // to already loaded in-memory data.
        Integer numBuffers = numDataBuffersOnDisk.remove(partitionId);
        if (numBuffers != null) {
            checkState(numBuffers > 0);
            File file = new File(getBuffersPath(basePath, partitionId));
            checkState(file.exists());
            if (LOG.isDebugEnabled()) {
                LOG.debug("loadPartitionData: loading " + numBuffers + " buffers of" + " partition "
                        + partitionId + " from " + file.getAbsolutePath());
            }
            FileInputStream fis = new FileInputStream(file);
            BufferedInputStream bis = new BufferedInputStream(fis);
            DataInputStream dis = new DataInputStream(bis);
            for (int i = 0; i < numBuffers; ++i) {
                T entry = readNextEntry(dis);
                addEntryToImMemoryPartitionData(partitionId, entry);
            }
            dis.close();
            checkState(file.delete(), "loadPartitionData: failed to delete %s.", file.getAbsoluteFile());
        }
        // Applying in-memory raw data buffers to in-memory partition data.
        Pair<Integer, List<T>> pair = dataBuffers.remove(partitionId);
        if (pair != null) {
            for (T entry : pair.getValue()) {
                addEntryToImMemoryPartitionData(partitionId, entry);
            }
        }
    }
    rwLock.writeLock().unlock();
}

From source file:org.apache.gobblin.cluster.ScheduledJobConfigurationManager.java

/***
 * TODO: Change cluster code to handle Spec. Right now all job properties are needed to be in config and template is not honored
 * TODO: Materialized JobSpec and make use of ResolvedJobSpec
 * @throws ExecutionException//from  w  w w  . j av  a2s.  c o m
 * @throws InterruptedException
 */
private void fetchJobSpecs() throws ExecutionException, InterruptedException {
    List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this._specConsumer
            .changedSpecs().get();

    for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) {

        SpecExecutor.Verb verb = entry.getKey();
        if (verb.equals(SpecExecutor.Verb.ADD)) {

            // Handle addition
            JobSpec jobSpec = (JobSpec) entry.getValue();
            postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties());
            jobSpecs.put(entry.getValue().getUri(), (JobSpec) entry.getValue());
        } else if (verb.equals(SpecExecutor.Verb.UPDATE)) {

            // Handle update
            JobSpec jobSpec = (JobSpec) entry.getValue();
            postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties());
            jobSpecs.put(entry.getValue().getUri(), (JobSpec) entry.getValue());
        } else if (verb.equals(SpecExecutor.Verb.DELETE)) {

            // Handle delete
            Spec anonymousSpec = (Spec) entry.getValue();
            postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties());
            jobSpecs.remove(entry.getValue().getUri());
        }
    }
}

From source file:org.apache.gobblin.cluster.StreamingJobConfigurationManager.java

private void fetchJobSpecs() throws ExecutionException, InterruptedException {
    List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this.specConsumer
            .changedSpecs().get();//from w w  w  .  j  av a2  s  .  c  o  m

    // propagate thread interruption so that caller will exit from loop
    if (Thread.interrupted()) {
        throw new InterruptedException();
    }

    for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) {
        SpecExecutor.Verb verb = entry.getKey();
        if (verb.equals(SpecExecutor.Verb.ADD)) {
            // Handle addition
            JobSpec jobSpec = (JobSpec) entry.getValue();
            postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties());
        } else if (verb.equals(SpecExecutor.Verb.UPDATE)) {
            // Handle update
            JobSpec jobSpec = (JobSpec) entry.getValue();
            postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties());
        } else if (verb.equals(SpecExecutor.Verb.DELETE)) {
            // Handle delete
            Spec anonymousSpec = (Spec) entry.getValue();
            postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties());
        }
    }
}

From source file:org.apache.hadoop.hive.ql.metadata.TestMetadataColumnRestrictionPreEventListener.java

@SafeVarargs
private static Configuration conf(Pair<String, String>... properties) {
    Configuration conf = new Configuration(false); // Don't load defaults.
    // Settings for testing.
    conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_DROP_TABLE_COLUMNS.varname, "true");
    conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_ADD_TABLE_COLUMNS_IN_MIDDLE.varname, "true");
    conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_DROP_STRUCT_COLUMNS.varname, "true");
    conf.set(HiveConf.ConfVars.METADATA_RESTRICTIONS_BLOCK_ADD_STRUCT_COLUMNS_IN_MIDDLE.varname, "true");

    // Overrides.
    for (Pair<String, String> setting : properties) {
        conf.set(setting.getKey(), setting.getValue());
    }/*from   www. j a v a2  s .  co m*/
    return conf;
}

From source file:org.apache.hyracks.storage.am.lsm.common.impls.MemoryComponentMetadata.java

@Override
public ArrayBackedValueStorage get(IValueReference key) {
    for (Pair<IValueReference, ArrayBackedValueStorage> pair : store) {
        if (pair.getKey().equals(key)) {
            return pair.getValue();
        }/* w  w  w .  j av a2s  . co m*/
    }
    return null;
}