Example usage for org.apache.commons.lang3.tuple Pair getLeft

List of usage examples for org.apache.commons.lang3.tuple Pair getLeft

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair getLeft.

Prototype

public abstract L getLeft();

Source Link

Document

Gets the left element from this pair.

When treated as a key-value pair, this is the key.

Usage

From source file:edu.uci.ics.hyracks.api.client.impl.ActivityClusterGraphBuilder.java

public ActivityClusterGraph inferActivityClusters(JobId jobId, JobActivityGraph jag) {
    /*//from  ww  w . j  a v  a2s .  c om
     * Build initial equivalence sets map. We create a map such that for each IOperatorTask, t -> { t }
     */
    Map<ActivityId, Set<ActivityId>> stageMap = new HashMap<ActivityId, Set<ActivityId>>();
    Set<Set<ActivityId>> stages = new HashSet<Set<ActivityId>>();
    for (ActivityId taskId : jag.getActivityMap().keySet()) {
        Set<ActivityId> eqSet = new HashSet<ActivityId>();
        eqSet.add(taskId);
        stageMap.put(taskId, eqSet);
        stages.add(eqSet);
    }

    boolean changed = true;
    while (changed) {
        changed = false;
        Pair<ActivityId, ActivityId> pair = findMergePair(jag, stages);
        if (pair != null) {
            merge(stageMap, stages, pair.getLeft(), pair.getRight());
            changed = true;
        }
    }

    ActivityClusterGraph acg = new ActivityClusterGraph();
    Map<ActivityId, ActivityCluster> acMap = new HashMap<ActivityId, ActivityCluster>();
    int acCounter = 0;
    Map<ActivityId, IActivity> activityNodeMap = jag.getActivityMap();
    List<ActivityCluster> acList = new ArrayList<ActivityCluster>();
    for (Set<ActivityId> stage : stages) {
        ActivityCluster ac = new ActivityCluster(acg, new ActivityClusterId(jobId, acCounter++));
        acList.add(ac);
        for (ActivityId aid : stage) {
            IActivity activity = activityNodeMap.get(aid);
            ac.addActivity(activity);
            acMap.put(aid, ac);
        }
    }

    for (Set<ActivityId> stage : stages) {
        for (ActivityId aid : stage) {
            IActivity activity = activityNodeMap.get(aid);
            ActivityCluster ac = acMap.get(aid);
            List<IConnectorDescriptor> aOutputs = jag.getActivityOutputMap().get(aid);
            if (aOutputs == null || aOutputs.isEmpty()) {
                ac.addRoot(activity);
            } else {
                int nActivityOutputs = aOutputs.size();
                for (int i = 0; i < nActivityOutputs; ++i) {
                    IConnectorDescriptor conn = aOutputs.get(i);
                    ac.addConnector(conn);
                    Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> pcPair = jag
                            .getConnectorActivityMap().get(conn.getConnectorId());
                    ac.connect(conn, activity, i, pcPair.getRight().getLeft(), pcPair.getRight().getRight(),
                            jag.getConnectorRecordDescriptorMap().get(conn.getConnectorId()));
                }
            }
        }
    }

    Map<ActivityId, Set<ActivityId>> blocked2BlockerMap = jag.getBlocked2BlockerMap();
    for (ActivityCluster s : acList) {
        Map<ActivityId, Set<ActivityId>> acBlocked2BlockerMap = s.getBlocked2BlockerMap();
        Set<ActivityCluster> blockerStages = new HashSet<ActivityCluster>();
        for (ActivityId t : s.getActivityMap().keySet()) {
            Set<ActivityId> blockerTasks = blocked2BlockerMap.get(t);
            acBlocked2BlockerMap.put(t, blockerTasks);
            if (blockerTasks != null) {
                for (ActivityId bt : blockerTasks) {
                    blockerStages.add(acMap.get(bt));
                }
            }
        }
        for (ActivityCluster bs : blockerStages) {
            s.getDependencies().add(bs);
        }
    }
    acg.addActivityClusters(acList);

    if (LOGGER.isLoggable(Level.FINE)) {
        try {
            LOGGER.fine(acg.toJSON().toString(2));
        } catch (JSONException e) {
            e.printStackTrace();
            throw new RuntimeException(e);
        }
    }
    return acg;
}

From source file:mase.mason.world.DistanceSensorArcs.java

/**
 * Very efficient implementation using an ordered TreeMap Should ensure
 * scalability when large numbers of objects are present, as there is no
 * need to check angles with objects that are farther than the closest
 * object in the given cone. Potential limitation (unlikely): if there are
 * two objects at exactly the same distance but at different angles, only
 * one of them will be considered, as the distance is used as key in the
 * TreeMap/*w  w  w  .ja v  a 2  s. c o  m*/
 */
@Override
public double[] readValues() {
    lastDistances = new double[valueCount()];
    Arrays.fill(lastDistances, Double.POSITIVE_INFINITY);
    Arrays.fill(closestObjects, null);
    if (range < 0.001) {
        return lastDistances;
    }
    double rangeNoiseAbs = Double.isInfinite(range) ? rangeNoise * fieldDiagonal : range * rangeNoise;

    WorldObject[] candidates = getCandidates();

    // TODO: replace treemap with collection-sort
    Pair<Double, WorldObject>[] distances = new Pair[candidates.length];
    int index = 0;
    for (WorldObject o : candidates) {
        if (!centerToCenter && o.isInside(ag.getLocation())) {
            Arrays.fill(lastDistances, 0);
            Arrays.fill(closestObjects, o);
            return lastDistances;
        }

        double dist = centerToCenter ? ag.getLocation().distance(o.getLocation())
                : Math.max(0, ag.distanceTo(o));
        if (rangeNoiseAbs > 0) {
            dist += rangeNoiseAbs
                    * (noiseType == UNIFORM ? state.random.nextDouble() * 2 - 1 : state.random.nextGaussian());
            dist = Math.max(dist, 0);
        }
        if (dist <= range) {
            distances[index++] = Pair.of(dist, o);
        }
    }
    if (index < distances.length) {
        distances = Arrays.copyOf(distances, index);
    }

    Arrays.sort(distances, new Comparator<Pair<Double, WorldObject>>() {
        @Override
        public int compare(Pair<Double, WorldObject> a, Pair<Double, WorldObject> b) {
            return Double.compare(a.getLeft(), b.getLeft());
        }
    });

    int filled = 0;
    for (Pair<Double, WorldObject> e : distances) {
        if (filled == arcStart.length) {
            break;
        }
        double angle = ag.angleTo(e.getRight().getLocation());
        if (orientationNoise > 0) {
            angle += orientationNoise
                    * (noiseType == UNIFORM ? state.random.nextDouble() * 2 - 1 : state.random.nextGaussian());
            angle = EmboddiedAgent.normalizeAngle(angle);
        }
        for (int a = 0; a < arcStart.length; a++) {
            if (Double.isInfinite(lastDistances[a]) && ((angle >= arcStart[a] && angle <= arcEnd[a])
                    || (arcStart[a] > arcEnd[a] && (angle >= arcStart[a] || angle <= arcEnd[a])))) {
                filled++;
                lastDistances[a] = e.getKey();
                closestObjects[a] = e.getValue();
            }
        }
    }
    return lastDistances;
}

From source file:com.dancorder.Archiverify.SynchingVisitor.java

@Override
public void postVisitDirectory(Path relativeDirectoryPath, FileExistence existence) {
    try {// w w w .  j a va 2s .co  m
        if (isNotInErrorPath(relativeDirectoryPath)) {
            Pair<FileHashStore, FileHashStore> hashStorePair = hashStoresByDirectory.get(relativeDirectoryPath);
            List<Path> visitedFiles = visitedFilesByDirectory.get(relativeDirectoryPath);

            syncLogic.removeUnvisitedHashes(hashStorePair.getLeft(), visitedFiles);
            syncLogic.removeUnvisitedHashes(hashStorePair.getRight(), visitedFiles);
            List<Action> newActions = syncLogic.checkHashStores(hashStorePair.getLeft(),
                    hashStorePair.getRight());
            actions.addAll(newActions);
        }

        hashStoresByDirectory.remove(relativeDirectoryPath);
        visitedFilesByDirectory.remove(relativeDirectoryPath);
    } catch (Exception e) {
        errorPath = relativeDirectoryPath;

        actions.add(new WarningAction(String.format(
                "Error caught ending visit of directory %s. Hashes in this directory will not be synched. %s",
                relativeDirectoryPath, e)));
    }
}

From source file:com.quancheng.plugin.common.PrintMessageFile.java

private String getMessageJavaType(String packageName, DescriptorProto sourceMessageDesc,
        FieldDescriptorProto field) {// w ww  .  ja v  a 2  s.co m
    String fieldType = CommonUtils.findNotIncludePackageType(field.getTypeName());
    Map<String, Pair<DescriptorProto, List<FieldDescriptorProto>>> nestedFieldType = transform(
            sourceMessageDesc);
    // isMap
    if (nestedFieldType.containsKey(fieldType)) {
        Pair<DescriptorProto, List<FieldDescriptorProto>> nestedFieldPair = nestedFieldType.get(fieldType);
        if (nestedFieldPair.getRight().size() == 2) {
            DescriptorProto mapSourceMessageDesc = nestedFieldPair.getLeft();
            List<FieldDescriptorProto> mapFieldList = nestedFieldPair.getRight();
            String nestedJavaType = "java.util.Map<"
                    + findJavaType(packageName, mapSourceMessageDesc, mapFieldList.get(0)) + ","
                    + findJavaType(packageName, mapSourceMessageDesc, mapFieldList.get(1)) + ">";
            return nestedJavaType;
        } else {
            return null;
        }
    } else {
        return CommonUtils.findPojoTypeFromCache(field.getTypeName(), pojoTypeCache);
    }
}

From source file:net.malisis.blocks.tileentity.SwapperTileEntity.java

private void applyState(BlockPos pos, Pair<IBlockState, NBTTagCompound> state) {
    if (getWorld().getBlockState(pos).getBlock() == Blocks.bedrock)
        return;//from w ww . j a va2s .c  o  m

    clearWorldState(pos);
    getWorld().setBlockState(pos, state.getLeft() != null ? state.getLeft() : Blocks.air.getDefaultState(), 0);
    TileEntity te = getWorld().getTileEntity(pos);
    if (te != null && state.getRight() != null)
        te.readFromNBT(state.getRight());
}

From source file:com.github.helenusdriver.driver.tools.Tool.java

/**
 * Inserts objects from all object creators based on the provided collection
 * of classes and options.//from   w w w. j a  v a  2 s .  c  om
 *
 * @author paouelle
 *
 * @param classes the collection of classes for object creators
 * @param suffixes the map of provided suffix values
 */
private static void insertObjectsFromClasses(Collection<Class<?>> classes, Map<String, String> suffixes) {
    for (final Class<?> clazz : classes) {
        final Pair<Method, Class<?>[]> initial = Tool.findInitial(clazz);

        if (initial == null) { // should not happen!
            System.out.println(Tool.class.getSimpleName() + ": no objects found using " + clazz.getName());
            continue;
        }
        final List<Object> ios = Tool.getInitialObjects(initial.getLeft(), suffixes);

        System.out.println(Tool.class.getSimpleName() + ": inserting " + ios.size() + " object"
                + (ios.size() == 1 ? "" : "s") + " using " + clazz.getName());
        final Batch b = StatementBuilder.batch();

        for (final Object io : ios) {
            b.add(StatementBuilder.insert(io));
        }
        if (b.isEmpty() || (b.getQueryString() == null)) {
            System.out.println(Tool.class.getSimpleName() + ": no objects to insert");
        } else {
            executeCQL(b);
        }
    }
}

From source file:com.norconex.importer.handler.transformer.impl.StripBetweenTransformer.java

@Override
protected void saveHandlerToXML(EnhancedXMLStreamWriter writer) throws XMLStreamException {
    writer.writeAttribute("caseSensitive", Boolean.toString(isCaseSensitive()));
    writer.writeAttribute("inclusive", Boolean.toString(isInclusive()));
    for (Pair<String, String> pair : stripPairs) {
        writer.writeStartElement("stripBetween");
        writer.writeStartElement("start");
        writer.writeCharacters(pair.getLeft());
        writer.writeEndElement();// ww w .  ja  va2 s . c  om
        writer.writeStartElement("end");
        writer.writeCharacters(pair.getRight());
        writer.writeEndElement();
        writer.writeEndElement();
    }
}

From source file:com.jaspersoft.jasperserver.jrsh.operation.grammar.parser.PlainGrammarParser.java

protected void buildGraphEdges() {
    for (val entry : dependencies.entrySet()) {
        Pair<Token, String[]> tokenPair = entry.getValue();
        for (String dependencyName : tokenPair.getRight()) {
            Pair<Token, String[]> dependency = dependencies.get(dependencyName);
            graph.addEdge(dependency.getLeft(), tokenPair.getLeft());
        }/*from   w w w.  ja v a  2 s  .com*/
    }
}

From source file:it.polimi.diceH2020.SPACE4CloudWS.solvers.solversImpl.DagSimSolver.DagSimSolver.java

@Override
protected Pair<Double, Boolean> run(Pair<List<File>, List<File>> pFiles, String remoteName,
        String remoteDirectory) throws Exception {
    if (pFiles.getLeft() == null || pFiles.getLeft().size() != 1) {
        throw new Exception("Model file missing");
    }//from   w ww .j a v a2  s.  c om

    if (pFiles.getRight() == null || pFiles.getRight().isEmpty()) {
        throw new Exception("Replayer files missing");
    }

    double result = 0.;
    boolean success = false;
    List<String> remoteMsg = null;

    boolean stillNotOk = true;
    for (int i = 0; stillNotOk && i < MAX_ITERATIONS; ++i) {
        logger.info(remoteName + "-> Starting DagSim resolution on the server");

        File modelFile = pFiles.getLeft().get(0);
        String fileName = modelFile.getName();
        String remotePath = remoteDirectory + File.separator + fileName;

        List<File> workingFiles = pFiles.getRight();
        workingFiles.add(modelFile);
        cleanRemoteSubDirectory(remoteDirectory);
        sendFiles(remoteDirectory, workingFiles);
        logger.debug(remoteName + "-> Working files sent");

        logger.debug(remoteName + "-> Starting DagSim model...");
        String command = String.format("%s %s", connSettings.getSolverPath(), remotePath);
        remoteMsg = connector.exec(command, getClass());

        if (remoteMsg.contains("exit-status: 0")) {
            stillNotOk = false;
            logger.info(remoteName + "-> The remote simulation process completed correctly");
        } else {
            logger.debug(remoteName + "-> Remote exit status: " + remoteMsg);
        }
    }

    if (stillNotOk) {
        logger.info(remoteName + "-> Error in remote simulation on DagSim");
        throw new Exception("Error in the DagSim server");
    } else {
        final String stdout = remoteMsg.get(0);
        final String stderr = remoteMsg.get(1);

        try (Scanner scanner = new Scanner(stdout)) {
            while (!success && scanner.hasNextLine()) {
                Matcher matcher = RESULT_LINE.matcher(scanner.nextLine());
                if (matcher.find()) {
                    result = Double.valueOf(matcher.group("avg"));
                    success = true;
                }
            }
        }

        if (!success) {
            logger.error(String.format("%s -> Error in remote DagSim simulation", remoteName));
            logger.error(String.format("%s -> stdout:\n%s", remoteName, stdout));
            logger.error(String.format("%s -> stderr:\n%s", remoteName, stderr));
        }
    }

    return Pair.of(result, !success);
}

From source file:com.pinterest.terrapin.hadoop.BaseUploader.java

public void upload(String clusterName, String fileSet, Options options) throws Exception {
    List<Pair<Path, Long>> fileSizePairList = getFileList();

    int numShards = fileSizePairList.size();
    LOG.info("Got " + numShards + " files.");
    if (numShards == 0) {
        LOG.warn("No files found. Exiting.");
        System.exit(1);/*from   w w  w. jav a2s .co  m*/
    }

    List<Path> parts = Lists.transform(fileSizePairList, new Function<Pair<Path, Long>, Path>() {
        @Override
        public Path apply(Pair<Path, Long> pathLongPair) {
            return pathLongPair.getKey();
        }
    });
    PartitionerType partitionerType = options.getPartitioner();

    validate(parts, partitionerType, numShards);
    long maxSize = -1;
    for (Pair<Path, Long> fileSizePair : fileSizePairList) {
        long size = fileSizePair.getRight();
        if (maxSize < size) {
            maxSize = size;
        }
    }
    // Come up with a new timestamp epoch for the latest data.
    long timestampEpochMillis = System.currentTimeMillis();
    String hdfsDir = Constants.HDFS_DATA_DIR + "/" + fileSet + "/" + timestampEpochMillis;
    ZooKeeperManager zkManager = getZKManager(clusterName);
    FileSetInfo fileSetInfo = new FileSetInfo(fileSet, hdfsDir, numShards, (List) Lists.newArrayList(),
            options);

    int replicationFactor = Constants.DEFAULT_HDFS_REPLICATION;
    if (terrapinNamenode == null || terrapinNamenode.isEmpty()) {
        ClusterInfo info = zkManager.getClusterInfo();
        if (info == null) {
            LOG.error("Could not find the namenode for " + clusterName);
            System.exit(1);
        }
        if (info.hdfsNameNode == null || info.hdfsNameNode.isEmpty()) {
            LOG.error("Could not find the namenode for " + clusterName);
            System.exit(1);
        }
        this.terrapinNamenode = info.hdfsNameNode;
        replicationFactor = info.hdfsReplicationFactor;
    }
    // Connect to the zookeeper and establish a lock on the fileset.
    LOG.info("Locking fileset " + fileSet);
    zkManager.lockFileSet(fileSet, fileSetInfo);

    try {
        LOG.info("Uploading " + numShards + " files through distcp to " + hdfsDir);

        // TODO: Add check for cluster disk space.
        List<Path> sourceFiles = Lists.newArrayListWithCapacity(fileSizePairList.size());
        for (Pair<Path, Long> fileSize : fileSizePairList) {
            sourceFiles.add(fileSize.getLeft());
        }
        if (sourceFiles.size() == 1) {
            hdfsDir = hdfsDir + "/" + TerrapinUtil.formatPartitionName(0);
        }
        DistCpOptions distCpOptions = new DistCpOptions(sourceFiles,
                new Path("hdfs", terrapinNamenode, hdfsDir));
        distCpOptions.setSyncFolder(true);
        distCpOptions.setSkipCRC(true);

        if (maxSize > Constants.DEFAULT_MAX_SHARD_SIZE_BYTES) {
            LOG.warn("Largest shard is " + maxSize + " bytes. This is more than 4G. "
                    + "Increase the # of shards to reduce the size.");
            System.exit(1);
        }
        TerrapinUtil.setupConfiguration(conf, maxSize, replicationFactor);

        DistCp distCp = getDistCp(conf, distCpOptions);
        Job job = distCp.execute();
        if (!job.waitForCompletion(true)) {
            throw new RuntimeException("Distributed copy failed.");
        }

        LOG.info("Successfully copied data.");

        loadFileSetData(zkManager, fileSetInfo, options);

        // Wait for a while so that zookeeper watches have propagated before relinquishing the lock.
        try {
            LOG.info("Releasing file set lock.");
            Thread.sleep(5000);
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted.");
        }
    } finally {
        zkManager.unlockFileSet(fileSet);
    }
}