Example usage for org.apache.commons.lang3.tuple Pair getKey

List of usage examples for org.apache.commons.lang3.tuple Pair getKey

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair getKey.

Prototype

@Override
public final L getKey() 

Source Link

Document

Gets the key from this pair.

This method implements the Map.Entry interface returning the left element as the key.

Usage

From source file:org.kuali.student.poc.jsonparser.json.SimpleJsonMap.java

public BaseJsonObject get(String key) {
    for (Pair<String, BaseJsonObject> keyValue : keyValues) {
        if (keyValue.getKey().equals(key)) {
            return keyValue.getValue();
        }//  w  w w.j a v a  2s .c  om
    }
    return null;
}

From source file:org.languagetool.rules.spelling.morfologik.suggestions_ordering.SuggestionsOrderer.java

private float processRow(String sentence, String correctedSentence, String covered, String replacement,
        Integer contextLength) {/*from  w w w  . j  a v  a 2 s  .c  om*/

    Pair<String, String> context = Pair.of("", "");
    int errorStartIdx;

    int sentencesDifferenceCharIdx = ContextUtils.firstDifferencePosition(sentence, correctedSentence);
    if (sentencesDifferenceCharIdx != -1) {
        errorStartIdx = ContextUtils.startOfErrorString(sentence, covered, sentencesDifferenceCharIdx);
        if (errorStartIdx != -1) {
            context = ContextUtils.extractContext(sentence, covered, errorStartIdx, contextLength);
        }
    }

    String leftContextCovered = context.getKey();
    String rightContextCovered = context.getValue();

    String leftContextCorrection = leftContextCovered.isEmpty() ? ""
            : leftContextCovered.substring(0, leftContextCovered.length() - covered.length()) + replacement;
    String rightContextCorrection = rightContextCovered.isEmpty() ? ""
            : replacement + rightContextCovered.substring(covered.length());

    boolean firstLetterMatches = ContextUtils.longestCommonPrefix(new String[] { replacement, covered })
            .length() != 0;

    Integer editDistance = ContextUtils.editDistance(covered, replacement);

    List<String> leftContextCoveredTokenized = nGramUtil
            .tokenizeString(leftContextCovered.isEmpty() ? covered : leftContextCovered);
    double leftContextCoveredProba = nGramUtil.stringProbability(leftContextCoveredTokenized, 3);
    List<String> rightContextCoveredTokenized = nGramUtil
            .tokenizeString(rightContextCovered.isEmpty() ? covered : rightContextCovered);
    double rightContextCoveredProba = nGramUtil.stringProbability(rightContextCoveredTokenized, 3);

    List<String> leftContextCorrectionTokenized = nGramUtil
            .tokenizeString(leftContextCorrection.isEmpty() ? replacement : leftContextCorrection);
    double leftContextCorrectionProba = nGramUtil.stringProbability(leftContextCorrectionTokenized, 3);
    List<String> rightContextCorrectionTokenized = nGramUtil
            .tokenizeString(rightContextCorrection.isEmpty() ? replacement : rightContextCorrection);
    double rightContextCorrectionProba = nGramUtil.stringProbability(rightContextCorrectionTokenized, 3);

    float left_context_covered_length = leftContextCoveredTokenized.size();
    float left_context_covered_proba = (float) leftContextCoveredProba;
    float right_context_covered_length = rightContextCoveredTokenized.size();
    float right_context_covered_proba = (float) rightContextCoveredProba;
    float left_context_correction_length = leftContextCorrectionTokenized.size();
    float left_context_correction_proba = (float) leftContextCorrectionProba;
    float right_context_correction_length = rightContextCorrectionTokenized.size();
    float right_context_correction_proba = (float) rightContextCorrectionProba;
    float first_letter_matches = firstLetterMatches ? 1f : 0f;
    float edit_distance = editDistance;

    float[] data = { left_context_covered_length, left_context_covered_proba, right_context_covered_length,
            right_context_covered_proba, left_context_correction_length, left_context_correction_proba,
            right_context_correction_length, right_context_correction_proba, first_letter_matches,
            edit_distance };

    FVec featuresVector = FVec.Transformer.fromArray(data, false);

    double[] predictions = predictor.predict(featuresVector);
    double predictedScore = predictions.length == 0 ? 0 : predictions[0];

    return (float) predictedScore;
}

From source file:org.languagetool.rules.spelling.morfologik.suggestions_ordering.SuggestionsOrderer.java

public List<String> orderSuggestionsUsingModel(List<String> suggestions, String word, AnalyzedSentence sentence,
        int startPos, int wordLength) {
    if (!isMlAvailable()) {
        return suggestions;
    }//w w  w .ja va 2s. c  o m
    List<Pair<String, Float>> suggestionsScores = new LinkedList<>();
    for (String suggestion : suggestions) {
        String text = sentence.getText();
        String correctedSentence = text.substring(0, startPos) + suggestion
                + sentence.getText().substring(startPos + wordLength);

        float score = processRow(text, correctedSentence, word, suggestion, DEFAULT_CONTEXT_LENGTH);
        suggestionsScores.add(Pair.of(suggestion, score));
    }
    Comparator<Pair<String, Float>> comparing = Comparator.comparing(Pair::getValue);
    suggestionsScores.sort(comparing.reversed());
    List<String> result = new LinkedList<>();
    suggestionsScores.iterator().forEachRemaining((Pair<String, Float> p) -> result.add(p.getKey()));
    return result;
}

From source file:org.languagetool.rules.spelling.suggestions.SuggestionsChanges.java

public void trackExperimentResult(Pair<SuggestionChangesExperiment, SuggestionChangesDataset> source,
        int position, int resultTextSize, long resultComputationTime) {
    numSamples.compute(source.getKey(), (ex, value) -> value == null ? 1 : value + 1);
    datasetNumSamples.compute(source, (ex, value) -> value == null ? 1 : value + 1);

    textSize.compute(source.getKey(), (ex, value) -> value == null ? resultTextSize : value + resultTextSize);
    datasetTextSize.compute(source, (ex, value) -> value == null ? resultTextSize : value + resultTextSize);

    computationTime.compute(source.getKey(),
            (ex, value) -> value == null ? resultComputationTime : value + resultComputationTime);
    datasetComputationTime.compute(source,
            (ex, value) -> value == null ? resultComputationTime : value + resultComputationTime);

    if (position == 0) {
        correctSuggestions.compute(source.getKey(), (ex, value) -> value == null ? 1 : value + 1);
        datasetCorrectSuggestions.compute(source, (ex, value) -> value == null ? 1 : value + 1);
    }//from   w w w.j  a  v  a2 s  .  c om
    if (position == -1) {
        notFoundSuggestions.compute(source.getKey(), (ex, value) -> value == null ? 1 : value + 1);
        datasetNotFoundSuggestions.compute(source, (ex, value) -> value == null ? 1 : value + 1);
    } else {
        suggestionPosSum.compute(source.getKey(), (ex, value) -> value == null ? position : value + position);
        datasetSuggestionPosSum.compute(source, (ex, value) -> value == null ? position : value + position);
    }
}

From source file:org.lenskit.cli.util.ScriptEnvironment.java

public ScriptEnvironment(Namespace ns) {
    properties = new Properties();
    List<Pair<String, String>> props = ns.getList("properties");
    if (props != null) {
        for (Pair<String, String> arg : props) {
            properties.setProperty(arg.getKey(), arg.getValue());
        }/*  w  w  w .j a  va  2s.  com*/
    }

    List<String> cp = ns.getList("classpath");
    if (cp != null) {
        classpath = cp;
    } else {
        classpath = Collections.emptyList();
    }
}

From source file:org.matsim.contrib.drt.optimizer.rebalancing.mincostflow.TransportProblem.java

public List<Triple<P, C, Integer>> solve(List<Pair<P, Integer>> supply, List<Pair<C, Integer>> demand) {
    final int P = supply.size();
    final int C = demand.size();
    final int N = P + C + 2;

    // N nodes, which indices are:
    // 0 - source
    // 1..P - producers 1..P
    // P+1..P+C - consumers 1..C
    // P+C+1 - sink

    @SuppressWarnings("unchecked")
    List<Edge>[] graph = Stream.generate(ArrayList::new).limit(N).toArray(List[]::new);

    // source -> producers
    int totalSupply = 0;
    for (int i = 0; i < P; i++) {
        int supplyValue = supply.get(i).getValue();
        MinCostFlow.addEdge(graph, 0, 1 + i, supplyValue, 0);
        totalSupply += supplyValue;//from   w  ww  .  jav a 2s.  com
    }

    // producers --> consumers
    for (int i = 0; i < P; i++) {
        Pair<P, Integer> producer = supply.get(i);
        for (int j = 0; j < C; j++) {
            Pair<C, Integer> consumer = demand.get(j);
            int capacity = Math.min(producer.getValue(), consumer.getValue());
            int cost = costFunction.applyAsInt(producer.getKey(), consumer.getKey());
            MinCostFlow.addEdge(graph, 1 + i, 1 + P + j, capacity, cost);
        }
    }

    // consumers -> sink
    int totalDemand = 0;
    for (int j = 0; j < C; j++) {
        int demandValue = demand.get(j).getValue();
        MinCostFlow.addEdge(graph, 1 + P + j, N - 1, demandValue, 0);
        totalDemand += demandValue;
    }

    // solve min cost flow problem
    int[] result = MinCostFlow.minCostFlow(graph, 0, N - 1, Math.min(totalSupply, totalDemand), false);
    if (result[0] == 0) {
        return Collections.emptyList();
    }

    // extract flows
    List<Triple<P, C, Integer>> flows = new ArrayList<>();
    for (int i = 0; i < P; i++) {
        P from = supply.get(i).getKey();
        for (Edge e : graph[1 + i]) {
            int flow = e.getFlow();
            if (flow > 0) {
                int j = e.getTo() - (1 + P);
                C to = demand.get(j).getKey();
                flows.add(Triple.of(from, to, flow));
            }
        }
    }
    return flows;
}

From source file:org.metaborg.intellij.idea.sdks.MetaborgSdkType.java

/**
 * Adds the Metaborg SDK paths./* w w w .  j  av a  2  s  . c  o  m*/
 *
 * @param sdkModificator The SDK modificator.
 * @param sdkHomePath The SDK home path.
 */
private void addMetaborgSdkPaths(final SdkModificator sdkModificator, @Nullable final String sdkHomePath) {
    if (sdkHomePath == null) {
        // Anything else we need to do?
        return;
    }

    // The added SDK files must be in the jar:// file system.
    // Adding normal file:// files works when creating the SDK,
    // but they are lost when the SDK is reloaded (e.g. after restart).
    for (final Pair<String, VirtualFile> pair : getSdkJars(sdkHomePath)) {
        String filename = pair.getKey();
        @Nullable
        VirtualFile file = pair.getValue();
        if (file == null) {
            this.logger.error("SDK file not found: {}", filename);
        } else {
            if (!file.exists()) {
                this.logger.warn("SDK file may not exist: {}", filename);
            }
            sdkModificator.addRoot(file, OrderRootType.CLASSES);
        }
    }
}

From source file:org.omg.bpmn.miwg.util.xml.diff.AbstractXmlDifferenceListener.java

private void parseAttributes(List<String> attrs, Map<Node, Set<String>> map) {
    List<Node> tmpNodeList;
    for (String attrXpath : attrs) {
        // split attribute name and XPath for node
        Pair<String, String> nodeAndAttribute = XPathUtil.splitXPathIntoNodeAndAttribute(attrXpath);
        tmpNodeList = helper.getAllMatchingNodesFromBothDocuments(nodeAndAttribute.getKey());
        for (Node attrNode : tmpNodeList) {
            getAttributeSetForNode(attrNode, map).add(nodeAndAttribute.getValue());
        }//from  w  w w.  j  a  va 2 s .c o  m
    }
}

From source file:org.opencb.opencga.storage.core.variant.VariantStoragePipeline.java

/**
 * Transform raw variant files into biodata model.
 *
 * @param inputUri Input file. Accepted formats: *.vcf, *.vcf.gz
 * @param pedigreeUri Pedigree input file. Accepted formats: *.ped
 * @param outputUri The destination folder
 * @throws StorageEngineException If any IO problem
 *///from w w w .j  ava 2  s  .co  m
@Override
public URI transform(URI inputUri, URI pedigreeUri, URI outputUri) throws StorageEngineException {
    // input: VcfReader
    // output: JsonWriter

    Path input = Paths.get(inputUri.getPath());
    Path pedigree = pedigreeUri == null ? null : Paths.get(pedigreeUri.getPath());
    Path output = Paths.get(outputUri.getPath());

    //        boolean includeSamples = options.getBoolean(Options.INCLUDE_GENOTYPES.key(), false);
    boolean includeStats = options.getBoolean(Options.INCLUDE_STATS.key(), false);
    //        boolean includeSrc = options.getBoolean(Options.INCLUDE_SRC.key(), Options.INCLUDE_SRC.defaultValue());
    boolean includeSrc = false;
    boolean failOnError = options.getBoolean(Options.TRANSFORM_FAIL_ON_MALFORMED_VARIANT.key(),
            Options.TRANSFORM_FAIL_ON_MALFORMED_VARIANT.defaultValue());
    String format = options.getString(Options.TRANSFORM_FORMAT.key(), Options.TRANSFORM_FORMAT.defaultValue());
    String parser = options.getString("transform.parser", HTSJDK_PARSER);

    VariantSource source = buildVariantSource(input);
    String fileName = source.getFileName();
    boolean generateReferenceBlocks = options.getBoolean(Options.GVCF.key(), false);

    int batchSize = options.getInt(Options.TRANSFORM_BATCH_SIZE.key(),
            Options.TRANSFORM_BATCH_SIZE.defaultValue());

    String compression = options.getString(Options.COMPRESS_METHOD.key(),
            Options.COMPRESS_METHOD.defaultValue());
    String extension = "";
    int numTasks = options.getInt(Options.TRANSFORM_THREADS.key(), Options.TRANSFORM_THREADS.defaultValue());
    int capacity = options.getInt("blockingQueueCapacity", numTasks * 2);

    if ("gzip".equalsIgnoreCase(compression) || "gz".equalsIgnoreCase(compression)) {
        extension = ".gz";
    } else if ("snappy".equalsIgnoreCase(compression) || "snz".equalsIgnoreCase(compression)) {
        extension = ".snappy";
    } else if (!compression.isEmpty()) {
        throw new IllegalArgumentException("Unknown compression method " + compression);
    }

    Path outputMalformedVariants = output.resolve(fileName + "." + VariantReaderUtils.MALFORMED_FILE + ".txt");
    Path outputVariantsFile = output
            .resolve(fileName + "." + VariantReaderUtils.VARIANTS_FILE + "." + format + extension);
    Path outputMetaFile = VariantReaderUtils.getMetaFromTransformedFile(outputVariantsFile);

    // Close at the end!
    final MalformedVariantHandler malformedHandler;
    try {
        malformedHandler = new MalformedVariantHandler(outputMalformedVariants);
    } catch (IOException e) {
        throw new StorageEngineException(e.getMessage(), e);
    }

    ParallelTaskRunner.Config config = ParallelTaskRunner.Config.builder().setNumTasks(numTasks)
            .setBatchSize(batchSize).setCapacity(capacity).setSorted(true).build();

    logger.info("Transforming variants using {} into {} ...", parser, format);
    long start, end;
    if (numTasks == 1 && "json".equals(format)) { //Run transformation with a SingleThread runner. The legacy way
        if (!".gz".equals(extension)) { //FIXME: Add compatibility with snappy compression
            logger.warn("Force using gzip compression");
            extension = ".gz";
            outputVariantsFile = output.resolve(fileName + ".variants.json" + extension);
        }

        //Ped Reader
        PedigreeReader pedReader = null;
        if (pedigree != null && pedigree.toFile().exists()) { //FIXME Add "endsWith(".ped") ??
            pedReader = new PedigreePedReader(pedigree.toString());
        }

        //Reader
        VariantReader reader = new VariantVcfReader(source, input.toAbsolutePath().toString());

        //Writers
        VariantJsonWriter jsonWriter = new VariantJsonWriter(source, output);
        jsonWriter.includeStats(includeStats);

        List<VariantWriter> writers = Collections.<VariantWriter>singletonList(jsonWriter);

        //Runner
        VariantRunner vr = new VariantRunner(source, reader, pedReader, writers,
                Collections.<Task<Variant>>singletonList(new VariantGlobalStatsCalculator(source)), batchSize);

        logger.info("Single thread transform...");
        start = System.currentTimeMillis();
        try {
            vr.run();
        } catch (IOException e) {
            throw new StorageEngineException("Fail runner execution", e);
        }
        end = System.currentTimeMillis();

    } else if ("avro".equals(format)) {
        //Read VariantSource
        source = VariantReaderUtils.readVariantSource(input, source);

        //Reader
        StringDataReader dataReader = new StringDataReader(input);
        long fileSize = 0;
        try {
            fileSize = dataReader.getFileSize();
        } catch (IOException e) {
            throw new StorageEngineException("Error reading file " + input, e);
        }
        ProgressLogger progressLogger = new ProgressLogger("Transforming file:", fileSize, 200);
        dataReader.setReadBytesListener((totalRead, delta) -> progressLogger.increment(delta, "Bytes"));

        //Writer
        DataWriter<ByteBuffer> dataWriter;
        try {
            dataWriter = new AvroFileWriter<>(VariantAvro.getClassSchema(), compression,
                    new FileOutputStream(outputVariantsFile.toFile()));
        } catch (FileNotFoundException e) {
            throw new StorageEngineException("Fail init writer", e);
        }
        Supplier<VariantTransformTask<ByteBuffer>> taskSupplier;

        if (parser.equalsIgnoreCase(HTSJDK_PARSER)) {
            logger.info("Using HTSJDK to read variants.");
            FullVcfCodec codec = new FullVcfCodec();
            final VariantSource finalSource = source;
            Pair<VCFHeader, VCFHeaderVersion> header = readHtsHeader(input);
            VariantGlobalStatsCalculator statsCalculator = new VariantGlobalStatsCalculator(source);
            taskSupplier = () -> new VariantAvroTransformTask(header.getKey(), header.getValue(), finalSource,
                    outputMetaFile, statsCalculator, includeSrc, generateReferenceBlocks)
                            .setFailOnError(failOnError).addMalformedErrorHandler(malformedHandler);
        } else {
            // TODO Create a utility to determine which extensions are variants files
            final VariantVcfFactory factory = createVariantVcfFactory(source, fileName);
            logger.info("Using Biodata to read variants.");
            final VariantSource finalSource = source;
            VariantGlobalStatsCalculator statsCalculator = new VariantGlobalStatsCalculator(source);
            taskSupplier = () -> new VariantAvroTransformTask(factory, finalSource, outputMetaFile,
                    statsCalculator, includeSrc).setFailOnError(failOnError)
                            .addMalformedErrorHandler(malformedHandler);
        }

        logger.info("Generating output file {}", outputVariantsFile);

        ParallelTaskRunner<String, ByteBuffer> ptr;
        try {
            ptr = new ParallelTaskRunner<>(dataReader, taskSupplier, dataWriter, config);
        } catch (Exception e) {
            throw new StorageEngineException("Error while creating ParallelTaskRunner", e);
        }
        logger.info("Multi thread transform... [1 reading, {} transforming, 1 writing]", numTasks);
        start = System.currentTimeMillis();
        try {
            ptr.run();
        } catch (ExecutionException e) {
            throw new StorageEngineException("Error while executing TransformVariants in ParallelTaskRunner",
                    e);
        }
        end = System.currentTimeMillis();
    } else if ("json".equals(format)) {
        //Read VariantSource
        source = VariantReaderUtils.readVariantSource(input, source);

        //Reader
        StringDataReader dataReader = new StringDataReader(input);
        long fileSize = 0;
        try {
            fileSize = dataReader.getFileSize();
        } catch (IOException e) {
            throw new StorageEngineException("Error reading file " + input, e);
        }
        ProgressLogger progressLogger = new ProgressLogger("Transforming file:", fileSize, 200);
        dataReader.setReadBytesListener((totalRead, delta) -> progressLogger.increment(delta, "Bytes"));

        //Writers
        StringDataWriter dataWriter = new StringDataWriter(outputVariantsFile, true);

        final VariantSource finalSource = source;
        ParallelTaskRunner<String, String> ptr;

        Supplier<VariantTransformTask<String>> taskSupplier;
        if (parser.equalsIgnoreCase(HTSJDK_PARSER)) {
            logger.info("Using HTSJDK to read variants.");
            Pair<VCFHeader, VCFHeaderVersion> header = readHtsHeader(input);
            VariantGlobalStatsCalculator statsCalculator = new VariantGlobalStatsCalculator(finalSource);
            taskSupplier = () -> new VariantJsonTransformTask(header.getKey(), header.getValue(), finalSource,
                    outputMetaFile, statsCalculator, includeSrc, generateReferenceBlocks)
                            .setFailOnError(failOnError).addMalformedErrorHandler(malformedHandler);
        } else {
            // TODO Create a utility to determine which extensions are variants files
            final VariantVcfFactory factory = createVariantVcfFactory(source, fileName);
            logger.info("Using Biodata to read variants.");
            VariantGlobalStatsCalculator statsCalculator = new VariantGlobalStatsCalculator(source);
            taskSupplier = () -> new VariantJsonTransformTask(factory, finalSource, outputMetaFile,
                    statsCalculator, includeSrc).setFailOnError(failOnError)
                            .addMalformedErrorHandler(malformedHandler);
        }

        logger.info("Generating output file {}", outputVariantsFile);

        try {
            ptr = new ParallelTaskRunner<>(dataReader, taskSupplier, dataWriter, config);
        } catch (Exception e) {
            throw new StorageEngineException("Error while creating ParallelTaskRunner", e);
        }

        logger.info("Multi thread transform... [1 reading, {} transforming, 1 writing]", numTasks);
        start = System.currentTimeMillis();
        try {
            ptr.run();
        } catch (ExecutionException e) {
            throw new StorageEngineException("Error while executing TransformVariants in ParallelTaskRunner",
                    e);
        }
        end = System.currentTimeMillis();
    } else if ("proto".equals(format)) {
        //Read VariantSource
        source = VariantReaderUtils.readVariantSource(input, source);
        Pair<Long, Long> times = processProto(input, fileName, output, source, outputVariantsFile,
                outputMetaFile, includeSrc, parser, generateReferenceBlocks, batchSize, extension, compression,
                malformedHandler, failOnError);
        start = times.getKey();
        end = times.getValue();
    } else {
        throw new IllegalArgumentException("Unknown format " + format);
    }
    logger.info("end - start = " + (end - start) / 1000.0 + "s");
    logger.info("Variants transformed!");

    // Close the malformed variant handler
    malformedHandler.close();
    if (malformedHandler.getMalformedLines() > 0) {
        getTransformStats().put("malformed lines", malformedHandler.getMalformedLines());
    }

    return outputUri.resolve(outputVariantsFile.getFileName().toString());
}

From source file:org.opendaylight.netvirt.federation.plugin.FederationPluginIngress.java

private <T extends DataObject, S extends DataObject> void processModification(String listenerKey,
        S modification, ModificationType modificationType, WriteTransaction tx, int generationNumber)
        throws FederationCorruptedStateException {
    FederationPluginCounters.ingress_process_modification.inc();
    LogicalDatastoreType datastoreType = FederationPluginUtils.getListenerDatastoreType(listenerKey);
    if (datastoreType == null) {
        logger.error("Failed to get datastore type for {}", listenerKey);
        return;// w  w  w .  j  a v  a 2s .c o  m
    }
    if (!applyFilter(listenerKey, modification, modificationType)) {
        logger.trace("listener {} {} filtered out", listenerKey, modification);
        return;
    }

    Pair<InstanceIdentifier<T>, T> transformedModification = FederationPluginUtils.applyIngressTransformation(
            listenerKey, modification, modificationType, generationNumber, remoteIp);
    if (transformedModification == null) {
        logger.error("Failed to apply ingress transformation for {} {}", listenerKey, modification);
        return;
    }
    if (ModificationType.DELETE.equals(modificationType)) {
        logger.trace("Delete modification listener {} identifier {}", listenerKey,
                transformedModification.getKey());
        deleteModification(datastoreType, transformedModification.getKey(), MAX_TRANSACTION_SUBMIT_RETRIES);
        return;
    }

    logger.trace("Write modification type {} listener {} data {}", modificationType, listenerKey,
            transformedModification);
    if (tx == null) {
        writeModification(datastoreType, transformedModification.getKey(), transformedModification.getValue(),
                MAX_TRANSACTION_SUBMIT_RETRIES);
    } else {
        writeModification(listenerKey, datastoreType, transformedModification.getKey(),
                transformedModification.getValue(), tx);
    }
}