Example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

List of usage examples for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair.

Prototype

public ImmutablePair(final L left, final R right) 

Source Link

Document

Create a new pair instance.

Usage

From source file:org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroupAssignment.java

/**
 * NOTE: This method is not synchronized by itself, needs to be synchronized externally.
 * /*from  ww w  .  ja va 2  s. com*/
 * @return An allocated sub slot, or {@code null}, if no slot is available.
 */
private Pair<SharedSlot, Locality> getSlotForTaskInternal(AbstractID groupId, ExecutionVertex vertex,
        Iterable<Instance> preferredLocations, boolean localOnly) {
    if (allSlots.isEmpty()) {
        return null;
    }

    Map<Instance, List<SharedSlot>> slotsForGroup = availableSlotsPerJid.get(groupId);

    // get the available slots for the group
    if (slotsForGroup == null) {
        // no task is yet scheduled for that group, so all slots are available
        slotsForGroup = new LinkedHashMap<Instance, List<SharedSlot>>();
        availableSlotsPerJid.put(groupId, slotsForGroup);

        for (SharedSlot availableSlot : allSlots) {
            putIntoMultiMap(slotsForGroup, availableSlot.getAllocatedSlot().getInstance(), availableSlot);
        }
    } else if (slotsForGroup.isEmpty()) {
        return null;
    }

    // check whether we can schedule the task to a preferred location
    boolean didNotGetPreferred = false;

    if (preferredLocations != null) {
        for (Instance location : preferredLocations) {

            // set the flag that we failed a preferred location. If one will be found,
            // we return early anyways and skip the flag evaluation
            didNotGetPreferred = true;

            SharedSlot slot = removeFromMultiMap(slotsForGroup, location);
            if (slot != null && !slot.isDisposed()) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Local assignment in shared group : " + vertex + " --> " + slot);
                }

                return new ImmutablePair<SharedSlot, Locality>(slot, Locality.LOCAL);
            }
        }
    }

    // if we want only local assignments, exit now with a "not found" result
    if (didNotGetPreferred && localOnly) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("No local assignment in shared possible for " + vertex);
        }
        return null;
    }

    // schedule the task to any available location
    SharedSlot slot = pollFromMultiMap(slotsForGroup);
    if (slot != null && !slot.isDisposed()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug((didNotGetPreferred ? "Non-local" : "Unconstrained") + " assignment in shared group : "
                    + vertex + " --> " + slot);
        }

        return new ImmutablePair<SharedSlot, Locality>(slot,
                didNotGetPreferred ? Locality.NON_LOCAL : Locality.UNCONSTRAINED);
    } else {
        return null;
    }
}

From source file:org.apache.gobblin.service.SimpleKafkaSpecConsumer.java

@Override
public Future<? extends List<Pair<SpecExecutor.Verb, Spec>>> changedSpecs() {
    List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = new ArrayList<>();
    initializeWatermarks();//from w  w  w.  j av a2 s  . c o m
    this.currentPartitionIdx = -1;
    while (!allPartitionsFinished()) {
        if (currentPartitionFinished()) {
            moveToNextPartition();
            continue;
        }
        if (this.messageIterator == null || !this.messageIterator.hasNext()) {
            try {
                this.messageIterator = fetchNextMessageBuffer();
            } catch (Exception e) {
                log.error(String.format(
                        "Failed to fetch next message buffer for partition %s. Will skip this partition.",
                        getCurrentPartition()), e);
                moveToNextPartition();
                continue;
            }
            if (this.messageIterator == null || !this.messageIterator.hasNext()) {
                moveToNextPartition();
                continue;
            }
        }
        while (!currentPartitionFinished()) {
            if (!this.messageIterator.hasNext()) {
                break;
            }

            KafkaConsumerRecord nextValidMessage = this.messageIterator.next();

            // Even though we ask Kafka to give us a message buffer starting from offset x, it may
            // return a buffer that starts from offset smaller than x, so we need to skip messages
            // until we get to x.
            if (nextValidMessage.getOffset() < _nextWatermark.get(this.currentPartitionIdx)) {
                continue;
            }

            _nextWatermark.set(this.currentPartitionIdx, nextValidMessage.getNextOffset());
            try {
                final AvroJobSpec record;

                if (nextValidMessage instanceof ByteArrayBasedKafkaRecord) {
                    record = decodeRecord((ByteArrayBasedKafkaRecord) nextValidMessage);
                } else if (nextValidMessage instanceof DecodeableKafkaRecord) {
                    record = ((DecodeableKafkaRecord<?, AvroJobSpec>) nextValidMessage).getValue();
                } else {
                    throw new IllegalStateException(
                            "Unsupported KafkaConsumerRecord type. The returned record can either be ByteArrayBasedKafkaRecord"
                                    + " or DecodeableKafkaRecord");
                }

                JobSpec.Builder jobSpecBuilder = JobSpec.builder(record.getUri());

                Properties props = new Properties();
                props.putAll(record.getProperties());
                jobSpecBuilder.withJobCatalogURI(record.getUri()).withVersion(record.getVersion())
                        .withDescription(record.getDescription()).withConfigAsProperties(props);

                if (!record.getTemplateUri().isEmpty()) {
                    jobSpecBuilder.withTemplate(new URI(record.getTemplateUri()));
                }

                String verbName = record.getMetadata().get(VERB_KEY);
                SpecExecutor.Verb verb = SpecExecutor.Verb.valueOf(verbName);

                changesSpecs.add(new ImmutablePair<SpecExecutor.Verb, Spec>(verb, jobSpecBuilder.build()));
            } catch (Throwable t) {
                log.error("Could not decode record at partition " + this.currentPartitionIdx + " offset "
                        + nextValidMessage.getOffset());
            }
        }
    }

    return new CompletedFuture(changesSpecs, null);
}

From source file:org.apache.hadoop.hive.ql.exec.ExplainTask.java

@VisibleForTesting
ImmutablePair<Boolean, JSONObject> outputPlanVectorization(PrintStream out, boolean jsonOutput)
        throws Exception {

    if (out != null) {
        out.println("PLAN VECTORIZATION:");
    }//from www. j  a  v a  2 s  . c om

    JSONObject json = jsonOutput ? new JSONObject(new LinkedHashMap<>()) : null;

    HiveConf hiveConf = queryState.getConf();

    boolean isVectorizationEnabled = HiveConf.getBoolVar(hiveConf,
            HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
    String isVectorizationEnabledCondName = (isVectorizationEnabled ? trueCondNameVectorizationEnabled
            : falseCondNameVectorizationEnabled);
    List<String> isVectorizationEnabledCondList = Arrays.asList(isVectorizationEnabledCondName);

    if (out != null) {
        out.print(indentString(2));
        out.print("enabled: ");
        out.println(isVectorizationEnabled);
        out.print(indentString(2));
        if (!isVectorizationEnabled) {
            out.print("enabledConditionsNotMet: ");
        } else {
            out.print("enabledConditionsMet: ");
        }
        out.println(isVectorizationEnabledCondList);
    }
    if (jsonOutput) {
        json.put("enabled", isVectorizationEnabled);
        JSONArray jsonArray = new JSONArray(Arrays.asList(isVectorizationEnabledCondName));
        if (!isVectorizationEnabled) {
            json.put("enabledConditionsNotMet", jsonArray);
        } else {
            json.put("enabledConditionsMet", jsonArray);
        }
    }

    return new ImmutablePair<Boolean, JSONObject>(isVectorizationEnabled, jsonOutput ? json : null);
}

From source file:org.apache.hadoop.hive.ql.exec.MapJoinOperator.java

protected Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]> loadHashTable(
        ExecMapperContext mapContext, MapredContext mrContext) throws HiveException {
    if (canSkipReload(mapContext)) {
        // no need to reload
        return new ImmutablePair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]>(mapJoinTables,
                mapJoinTableSerdes);/*from  w  ww.j  a  va 2  s  .c o m*/
    }

    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.LOAD_HASHTABLE);
    loader.init(mapContext, mrContext, hconf, this);
    try {
        loader.load(mapJoinTables, mapJoinTableSerdes);
    } catch (HiveException e) {
        if (LOG.isInfoEnabled()) {
            LOG.info("Exception loading hash tables. Clearing partially loaded hash table containers.");
        }

        // there could be some spilled partitions which needs to be cleaned up
        clearAllTableContainers();
        throw e;
    }

    hashTblInitedOnce = true;

    Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]> pair = new ImmutablePair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]>(
            mapJoinTables, mapJoinTableSerdes);

    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.LOAD_HASHTABLE);

    if (canSkipJoinProcessing(mapContext)) {
        LOG.info("Skipping big table join processing for " + this.toString());
        this.setDone(true);
    }

    return pair;
}

From source file:org.apache.hadoop.hive.ql.plan.SparkWork.java

/**
 * disconnect removes an edge between a and b. Both a and
 * b have to be in the graph. If there is no matching edge
 * no change happens.// w w  w.  j a v a  2 s .c o  m
 */
public void disconnect(BaseWork a, BaseWork b) {
    workGraph.get(a).remove(b);
    invertedWorkGraph.get(b).remove(a);
    if (getParents(b).isEmpty()) {
        roots.add(b);
    }
    if (getChildren(a).isEmpty()) {
        leaves.add(a);
    }
    edgeProperties.remove(new ImmutablePair<BaseWork, BaseWork>(a, b));
}

From source file:org.apache.hadoop.hive.ql.plan.SparkWork.java

/**
 * remove removes a node from the graph and removes all edges with
 * work as start or end point. No change to the graph if the node
 * doesn't exist./*ww w  . j  av a2s  .co  m*/
 */
public void remove(BaseWork work) {
    if (!workGraph.containsKey(work)) {
        return;
    }

    List<BaseWork> children = getChildren(work);
    List<BaseWork> parents = getParents(work);

    for (BaseWork w : children) {
        edgeProperties.remove(new ImmutablePair<BaseWork, BaseWork>(work, w));
        invertedWorkGraph.get(w).remove(work);
        if (invertedWorkGraph.get(w).size() == 0) {
            roots.add(w);
        }
    }

    for (BaseWork w : parents) {
        edgeProperties.remove(new ImmutablePair<BaseWork, BaseWork>(w, work));
        workGraph.get(w).remove(work);
        if (workGraph.get(w).size() == 0) {
            leaves.add(w);
        }
    }

    roots.remove(work);
    leaves.remove(work);

    workGraph.remove(work);
    invertedWorkGraph.remove(work);
}

From source file:org.apache.hadoop.hive.ql.plan.SparkWork.java

/**
 * returns the edge type connecting work a and b
 *///from  w  w  w .j a v  a  2  s.c  o  m
public SparkEdgeProperty getEdgeProperty(BaseWork a, BaseWork b) {
    return edgeProperties.get(new ImmutablePair<BaseWork, BaseWork>(a, b));
}

From source file:org.apache.hadoop.hive.ql.plan.SparkWork.java

/**
 * connect adds an edge between a and b. Both nodes have
 * to be added prior to calling connect.
 * @param//from   www. ja v a2s  .c o m
 */
public void connect(BaseWork a, BaseWork b, SparkEdgeProperty edgeProp) {
    workGraph.get(a).add(b);
    invertedWorkGraph.get(b).add(a);
    roots.remove(b);
    leaves.remove(a);
    ImmutablePair<BaseWork, BaseWork> workPair = new ImmutablePair<BaseWork, BaseWork>(a, b);
    edgeProperties.put(workPair, edgeProp);
}

From source file:org.apache.hadoop.hive.ql.plan.TezWork.java

public EdgeType getEdgeType(BaseWork a, BaseWork b) {
    return edgeProperties.get(new ImmutablePair(a, b)).getEdgeType();
}