Example usage for org.apache.commons.lang3.tuple Pair getKey

List of usage examples for org.apache.commons.lang3.tuple Pair getKey

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair getKey.

Prototype

@Override
public final L getKey() 

Source Link

Document

Gets the key from this pair.

This method implements the Map.Entry interface returning the left element as the key.

Usage

From source file:com.bluepowermod.part.tube.TubeLogic.java

public void update() {

    if (!Config.enableTubeCaching)
        clearNodeCache();/*w  w w .j av  a 2  s . co  m*/
    Iterator<TubeStack> iterator = tubeStacks.iterator();
    while (iterator.hasNext()) {
        TubeStack tubeStack = iterator.next();
        if (tubeStack.update(tube.getWorld())) {
            if (!tube.isCrossOver) {
                for (ForgeDirection dir : ForgeDirection.VALID_DIRECTIONS) {
                    if (tube.connections[dir.ordinal()] && dir != tubeStack.heading.getOpposite()) {
                        tubeStack.heading = dir;
                        break;
                    }
                }
            } else {// when we are at an intersection
                if (!tube.getWorld().isRemote) {
                    Pair<ForgeDirection, TileEntity> heading = getHeadingForItem(tubeStack, false);
                    if (heading == null) {// if no valid destination
                        for (int i = 0; i < 6; i++) {
                            TubeEdge edge = getNode().edges[i];
                            if (edge != null) {
                                tubeStack.heading = ForgeDirection.getOrientation(i);// this will allow the item to ignore the color mask when
                                // there's really no option left.
                                if (canPassThroughMask(tubeStack.color, edge.colorMask)) {
                                    tubeStack.heading = ForgeDirection.getOrientation(i);// just a specific direction for now.
                                    break;
                                }
                            }
                        }
                    } else {
                        tubeStack.heading = heading.getKey();
                    }
                    BPNetworkHandler.INSTANCE.sendToAllAround(new MessageRedirectTubeStack(tube, tubeStack),
                            tube.getWorld());
                } else {
                    tubeStack.enabled = false;
                }
            }
        } else if (tubeStack.progress >= 1) {// when the item reached the end of the tube.
            TileEntity output = tube.getTileCache(tubeStack.heading);
            PneumaticTube tube = this.tube.getPartCache(tubeStack.heading);
            if (tube != null) {// we don't need to check connections, that's catched earlier.
                TubeLogic logic = tube.getLogic();
                tubeStack.progress = 0;
                tubeStack.oldProgress = -tubeStack.getSpeed() * TubeStack.tickTimeMultiplier;
                logic.tubeStacks.add(tubeStack);// transfer to another tube.
                iterator.remove();
            } else if (!this.tube.getWorld().isRemote) {
                ItemStack remainder = tubeStack.stack;
                if (output instanceof ITubeConnection
                        && ((ITubeConnection) output).isConnectedTo(tubeStack.heading.getOpposite())) {
                    TubeStack rem = ((ITubeConnection) output).acceptItemFromTube(tubeStack,
                            tubeStack.heading.getOpposite(), false);
                    if (rem != null)
                        remainder = rem.stack;
                    else
                        remainder = null;
                }
                if (remainder != null)
                    remainder = IOHelper.insert(output, remainder, tubeStack.heading.getOpposite(),
                            tubeStack.color, false);
                if (remainder != null) {
                    if (injectStack(remainder, tubeStack.heading.getOpposite(), tubeStack.color, true)) {
                        tubeStack.stack = remainder;
                        tubeStack.progress = 0;
                        tubeStack.oldProgress = 0;
                        tubeStack.heading = tubeStack.heading.getOpposite();
                        this.tube.sendUpdatePacket();
                    } else {
                        EntityItem entity = new EntityItem(this.tube.getWorld(),
                                this.tube.getX() + 0.5 + tubeStack.heading.offsetX * tubeStack.progress * 0.5,
                                this.tube.getY() + 0.5 + tubeStack.heading.offsetY * tubeStack.progress * 0.5,
                                this.tube.getZ() + 0.5 + tubeStack.heading.offsetX * tubeStack.progress * 0.5,
                                remainder);
                        this.tube.getWorld().spawnEntityInWorld(entity);
                        iterator.remove();
                    }
                } else {
                    iterator.remove();
                }
            } else {
                iterator.remove();
            }

        } else if (tubeStack.idleCounter > 100) {
            iterator.remove();
        }
    }
}

From source file:edu.isi.karma.controller.history.CommandHistory.java

public UpdateContainer doCommand(Command command, Workspace workspace, boolean saveToHistory)
        throws CommandException {
    UpdateContainer effects = new UpdateContainer();
    Pair<ICommand, Object> consolidatedCommand = null;
    String consolidatorName = null;
    String worksheetId = worksheetCommandHistory.getWorksheetId(command);
    List<ICommand> potentialConsolidateCommands = worksheetCommandHistory
            .getCommandsFromWorksheetIdAndCommandTag(worksheetId, command.getTagFromPriority());
    for (CommandConsolidator consolidator : consolidators) {
        consolidatedCommand = consolidator.consolidateCommand(potentialConsolidateCommands, command, workspace);
        if (consolidatedCommand != null) {
            consolidatorName = consolidator.getConsolidatorName();
            break;
        }/*  www.ja v a  2  s .  com*/
    }

    if (consolidatedCommand != null) {
        worksheetCommandHistory.setStale(worksheetId, true);
        if (consolidatorName.equals("PyTransformConsolidator")) {
            effects.append(consolidatedCommand.getLeft().doIt(workspace));
        }
        if (consolidatorName.equals("UnassignSemanticTypesConsolidator")) {
            worksheetCommandHistory.removeCommandFromHistory(Arrays.asList(consolidatedCommand.getLeft()));
            effects.append(command.doIt(workspace));
        }
        if (consolidatorName.equals("SemanticTypesConsolidator")) {
            worksheetCommandHistory.replaceCommandFromHistory(consolidatedCommand.getKey(),
                    (ICommand) consolidatedCommand.getRight());
            effects.append(((ICommand) consolidatedCommand.getRight()).doIt(workspace));
        }
        if (consolidatorName.equals("OrganizeColumnsConsolidator")) {
            effects.append(((ICommand) consolidatedCommand.getRight()).doIt(workspace));
            worksheetCommandHistory.replaceCommandFromHistory(consolidatedCommand.getKey(),
                    (ICommand) consolidatedCommand.getRight());
        }
    } else {
        effects.append(command.doIt(workspace));
    }
    command.setExecuted(true);

    if (command.getCommandType() != CommandType.notInHistory) {
        worksheetId = worksheetCommandHistory.getWorksheetId(command);
        worksheetCommandHistory.clearRedoCommand(worksheetId);
        worksheetCommandHistory.setCurrentCommand(command, consolidatedCommand);
        if (consolidatedCommand == null) {
            worksheetCommandHistory.insertCommandToHistory(command);
        }
        effects.add(new HistoryUpdate(this));
    }

    if (saveToHistory) {
        // Save the modeling commands
        if (!(instanceOf(command, "ResetKarmaCommand"))) {
            try {
                if (isHistoryWriteEnabled && command.isSavedInHistory()
                        && (command.hasTag(CommandTag.Modeling) || command.hasTag(CommandTag.Transformation)
                                || command.hasTag(CommandTag.Selection)
                                || command.hasTag(CommandTag.SemanticType))
                        && historySavers.get(workspace.getId()) != null) {
                    writeHistoryPerWorksheet(workspace, historySavers.get(workspace.getId()));
                }
            } catch (Exception e) {
                logger.error("Error occured while writing history!", e);
                logger.error("Error with this command: {}, Input params: {}", command.getCommandName(),
                        command.getInputParameterJson());
            }
        }
    }

    return effects;
}

From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java

/***
 * Validate all {@link Partition}s for a {@link Table} if it was updated recently by checking if its update time
 * lies between between maxLookBackTime and skipRecentThanTime window.
 * @param hiveDataset {@link HiveDataset} containing {@link Table} and {@link Partition} info.
 * @param client {@link IMetaStoreClient} to query Hive.
 * @throws IOException Issue in validating {@link HiveDataset}
 *//*w w  w.  j a v a 2s .  com*/
private void processPartitionedTable(ConvertibleHiveDataset hiveDataset,
        AutoReturnableObject<IMetaStoreClient> client) throws IOException {

    // Get partitions for the table
    List<Partition> sourcePartitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(),
            Optional.<String>absent());

    for (final String format : hiveDataset.getDestFormats()) {
        Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset
                .getConversionConfigForFormat(format);

        if (conversionConfigOptional.isPresent()) {

            // Get conversion config
            ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get();
            String orcTableName = conversionConfig.getDestinationTableName();
            String orcTableDatabase = conversionConfig.getDestinationDbName();
            Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = getDestinationTableMeta(
                    orcTableDatabase, orcTableName, this.props);

            // Validate each partition
            for (final Partition sourcePartition : sourcePartitions) {
                try {
                    final long updateTime = this.updateProvider.getUpdateTime(sourcePartition);
                    if (shouldValidate(sourcePartition)) {
                        log.info(String.format("Validating partition: %s", sourcePartition.getCompleteName()));

                        // Generate validation queries
                        final List<String> countValidationQueries = HiveValidationQueryGenerator
                                .generateCountValidationQueries(hiveDataset, Optional.of(sourcePartition),
                                        conversionConfig);
                        final List<String> dataValidationQueries = Lists
                                .newArrayList(HiveValidationQueryGenerator.generateDataValidationQuery(
                                        hiveDataset.getTable().getTableName(),
                                        hiveDataset.getTable().getDbName(), destinationMeta.getKey().get(),
                                        Optional.of(sourcePartition), this.isNestedORC));

                        this.futures.add(this.exec.submit(new Callable<Void>() {
                            @Override
                            public Void call() throws Exception {

                                // Execute validation queries
                                log.debug(String.format(
                                        "Going to execute count validation queries queries: %s for format: %s "
                                                + "and partition %s",
                                        countValidationQueries, format, sourcePartition.getCompleteName()));
                                List<Long> rowCounts = ValidationJob.this
                                        .getValidationOutputFromHive(countValidationQueries);
                                log.debug(String.format(
                                        "Going to execute data validation queries: %s for format: %s and partition %s",
                                        dataValidationQueries, format, sourcePartition.getCompleteName()));
                                List<Long> rowDataValidatedCount = ValidationJob.this
                                        .getValidationOutputFromHive(dataValidationQueries);

                                // Validate and populate report
                                validateAndPopulateReport(sourcePartition.getCompleteName(), updateTime,
                                        rowCounts, rowDataValidatedCount.get(0));

                                return null;
                            }
                        }));

                    } else {
                        log.debug(String.format(
                                "Not validating partition: %s as updateTime: %s is not in range of max look back: %s "
                                        + "and skip recent than: %s",
                                sourcePartition.getCompleteName(), updateTime, this.maxLookBackTime,
                                this.skipRecentThanTime));
                    }
                } catch (UncheckedExecutionException e) {
                    log.warn(String.format("Not validating partition: %s %s", sourcePartition.getCompleteName(),
                            e.getMessage()));
                } catch (UpdateNotFoundException e) {
                    log.warn(String.format("Not validating partition: %s as update time was not found. %s",
                            sourcePartition.getCompleteName(), e.getMessage()));
                }
            }
        } else {
            log.info(String.format("No conversion config found for format %s. Ignoring data validation",
                    format));
        }
    }
}

From source file:com.act.lcms.db.analysis.FeedingAnalysis.java

private static void performFeedingAnalysis(DB db, String lcmsDir, String searchIon, String searchMassStr,
        String plateBarcode, String strainOrConstruct, String extract, String feedingCondition,
        String outPrefix, String fmt) throws SQLException, Exception {
    Plate p = Plate.getPlateByBarcode(db, plateBarcode);
    if (p == null) {
        throw new RuntimeException(String.format("Unable to find plate with barcode %s", plateBarcode));
    }//from   www .j  av a  2s .  com
    if (p.getContentType() != Plate.CONTENT_TYPE.FEEDING_LCMS) {
        throw new RuntimeException(String.format("Plate with barcode %s is not a feeding plate (%s)",
                plateBarcode, p.getContentType()));
    }
    List<FeedingLCMSWell> allPlateWells = FeedingLCMSWell.getInstance().getFeedingLCMSWellByPlateId(db,
            p.getId());
    if (allPlateWells == null || allPlateWells.size() == 0) {
        throw new RuntimeException(
                String.format("No feeding LCMS wells available for plate %s", p.getBarcode()));
    }

    List<FeedingLCMSWell> relevantWells = new ArrayList<>();
    for (FeedingLCMSWell well : allPlateWells) {
        if (!well.getMsid().equals(strainOrConstruct) && !well.getComposition().equals(strainOrConstruct)) {
            // Ignore wells that don't have the right strain/construct (though we assume the whole plate shares one).
            continue;
        }

        if (!well.getExtract().equals(extract)) {
            // Filter by extract.
            continue;
        }

        if (!well.getChemical().equals(feedingCondition)) {
            // Filter by fed chemical.
            continue;
        }

        relevantWells.add(well);
    }

    Collections.sort(relevantWells, new Comparator<FeedingLCMSWell>() {
        @Override
        public int compare(FeedingLCMSWell o1, FeedingLCMSWell o2) {
            // Assume concentration is never null.
            return o1.getConcentration().compareTo(o2.getConcentration());
        }
    });

    Map<FeedingLCMSWell, ScanFile> wellsToScanFiles = new HashMap<>();
    Set<String> constructs = new HashSet<>(1);
    for (FeedingLCMSWell well : relevantWells) {
        List<ScanFile> scanFiles = ScanFile.getScanFileByPlateIDRowAndColumn(db, well.getPlateId(),
                well.getPlateRow(), well.getPlateColumn());
        if (scanFiles == null || scanFiles.size() == 0) {
            System.err.format("WARNING: no scan files for well at %s %s\n", p.getBarcode(),
                    well.getCoordinatesString());
            continue;
        }
        if (scanFiles.size() > 1) {
            System.err.format("WARNING: found multiple scan files for %s %s, using first\n", p.getBarcode(),
                    well.getCoordinatesString());
        }
        while (scanFiles.size() > 0 && scanFiles.get(0).getFileType() != ScanFile.SCAN_FILE_TYPE.NC) {
            scanFiles.remove(0);
        }
        if (scanFiles.size() == 0) {
            System.err.format("WARNING: no scan files with valid format for %s %s\n", p.getBarcode(),
                    well.getCoordinatesString());
            continue;
        }
        // All of the extracted wells should be unique, so there should be no collisions here.
        wellsToScanFiles.put(well, scanFiles.get(0));
        constructs.add(well.getComposition());
    }

    Pair<String, Double> searchMass = null;
    if (searchMassStr != null) {
        searchMass = Utils.extractMassFromString(db, searchMassStr);
    }
    if (searchMass == null) {
        if (constructs.size() != 1) {
            throw new RuntimeException(String.format(
                    "Found multiple growth targets for feeding analysis when no mass specified: %s",
                    StringUtils.join(constructs, ", ")));
        }
        String constructName = constructs.iterator().next();
        CuratedChemical cc = Utils.extractTargetForConstruct(db, constructName);
        if (cc == null) {
            throw new RuntimeException(
                    String.format("Unable to find curated chemical for construct %s", constructName));
        }
        System.out.format("Using target %s of construct %s as search mass (%f)\n", cc.getName(), constructName,
                cc.getMass());
        searchMass = Pair.of(cc.getName(), cc.getMass());
    }

    MS1 c = new MS1();
    // TODO: use configurable or scan-file derived ion mode. Do it the way its done in:
    // https://github.com/20n/act/blob/d997e84f0f44a5c88a94ef935829cb47e0ca8d1a/reachables/src/main/java/com/act/lcms/db/analysis/AnalysisHelper.java#L79
    MS1.IonMode mode = MS1.IonMode.valueOf("POS");
    Map<String, Double> metlinMasses = c.getIonMasses(searchMass.getValue(), mode);

    if (searchIon == null || searchIon.isEmpty()) {
        System.err.format("No search ion defined, defaulting to M+H\n");
        searchIon = DEFAULT_ION;
    }

    List<Pair<Double, MS1ScanForWellAndMassCharge>> rampUp = new ArrayList<>();
    for (FeedingLCMSWell well : relevantWells) {
        ScanFile scanFile = wellsToScanFiles.get(well);
        if (scanFile == null) {
            System.err.format("WARNING: no scan file available for %s %s", p.getBarcode(),
                    well.getCoordinatesString());
            continue;
        }
        File localScanFile = new File(lcmsDir, scanFile.getFilename());
        if (!localScanFile.exists() && localScanFile.isFile()) {
            System.err.format("WARNING: could not find regular file at expected path: %s\n",
                    localScanFile.getAbsolutePath());
            continue;
        }
        System.out.format("Processing scan data at %s\n", localScanFile.getAbsolutePath());

        MS1ScanForWellAndMassCharge ms1ScanCache = new MS1ScanForWellAndMassCharge();
        MS1ScanForWellAndMassCharge ms1ScanResults = ms1ScanCache
                .getByPlateIdPlateRowPlateColUseSnrScanFileChemical(db, p, well, true, scanFile,
                        searchMass.getKey(), metlinMasses, localScanFile);

        Double concentration = well.getConcentration();
        rampUp.add(Pair.of(concentration, ms1ScanResults));
    }

    WriteAndPlotMS1Results plotFeedingsResults = new WriteAndPlotMS1Results();
    plotFeedingsResults.plotFeedings(rampUp, searchIon, outPrefix, fmt, outPrefix + ".gnuplot");
}

From source file:com.samsung.sjs.backend.IRCBackend.java

public CompilationUnit compile() {
    CompilationUnit ccode = new CompilationUnit();
    ccode.addStatement(new IncludeDirective("runtime.h"));
    ccode.addStatement(new IncludeDirective("ffi.h"));
    ccode.addStatement(new IncludeDirective("globals.h"));
    ccode.addStatement(new IncludeDirective("map.h"));

    if (options.eflEnabled()) {
        ccode.addStatement(new IncludeDirective("Elementary.h"));
    }/*from  w ww  .j a  v a  2 s. c  o  m*/

    ccode.exportString("#ifdef __cplusplus");
    ccode.exportString("extern \"C\" {");
    ccode.exportString("#endif // __cplusplus");
    if (options.isGuestRuntime()) {
        ccode.exportString("extern int __sjs_main(int);");
    }

    // Process vtables exported to other runtime modules (e.g., for console)
    for (Map.Entry<String, List<String>> table_req : ffi.getTablesToGenerate()) {
        ccode.addStatement(generateObjectMap(table_req.getKey(), table_req.getValue()));
        ccode.exportIndirectionMap(table_req.getKey());
    }

    com.samsung.sjs.backend.asts.c.CompoundStatement vtables = new com.samsung.sjs.backend.asts.c.CompoundStatement();
    ccode.addStatement(vtables);

    ccode.addStatement(new IncludeDirective("array.h"));
    BackPatchDeclarations bpd = new BackPatchDeclarations();
    ccode.addStatement(bpd);

    // insert extern declarations for FFI entities
    for (Map.Entry<String, FFILinkage.LinkEntry> extern : ffi.entrySet()) {
        Type t = toplevel.get(extern.getKey());
        if (t == null) {
            System.err.println("BAD: FFI linkage declaration for [" + extern.getKey()
                    + "], but toplevel has no type for it");
        }
        if (t.isIntersectionType()) {
            continue;
            // TODO: Implement runtime representation for intersection of multiple types
        }
        CType ct = getTypeConverter().convert(t);
        if (extern.getValue().boxed) {
            ccode.addStatement(new com.samsung.sjs.backend.asts.c.ExpressionStatement(
                    new InlineCCode("extern value_t* " + extern.getKey())));
        } else {
            ccode.addStatement(new com.samsung.sjs.backend.asts.c.ExpressionStatement(
                    new InlineCCode("extern " + ct.toSource() + " " + extern.getKey())));
        }
    }

    // extern decls for module load hooks
    for (String hook : modsys.getModuleLoadCalls()) {
        ccode.addStatement(new com.samsung.sjs.backend.asts.c.ExpressionStatement(
                new InlineCCode("extern value_t " + hook + "()")));
    }

    // This emits field #defines in the C code and header
    exportPropertyOffsets(ccode, field_codes);
    // Need to include interop *after* generating the property lookup table
    if (options.interopEnabled()) {
        ccode.addStatement(new IncludeDirective("interop.h"));
    }

    // Make space for string literal decls
    com.samsung.sjs.backend.asts.c.CompoundStatement strlits = new com.samsung.sjs.backend.asts.c.CompoundStatement();
    ccode.addStatement(strlits);
    this.string_literal_decls = strlits;

    // Rhino seems to do a lot of casting from Node to AstNode
    //for (Node n : sourcetree) {
    for (IRNode n : program) {
        CNode result = n.accept(this);
        if (debug) {
            System.err.println("Converting [" + n.toSource(0) + "]");
            System.err.println(">>> [" + result.toSource(0) + "]");
        }
        if (result instanceof FunctionDeclaration) {
            bpd.preDeclare((FunctionDeclaration) result);
        }
        ccode.addStatement((com.samsung.sjs.backend.asts.c.Statement) result);
    }
    // Now that we've observed all anonymous C types, we can backtrack to generate some typedefs

    ccode.exportString("#ifdef __cplusplus");
    ccode.exportString("} // extern C");
    ccode.exportString("#endif // __cplusplus");

    for (Map.Entry<Integer, Set<Pair<int[], Integer>>> entry : vtables_by_hash.entrySet()) {
        for (Pair<int[], Integer> vt_and_id : entry.getValue()) {
            int[] vt = vt_and_id.getKey();
            int i = vt_and_id.getValue();
            CArrayLiteral arr = new CArrayLiteral();
            for (int x = 0; x < vt.length; x++) {
                arr.addElement(new com.samsung.sjs.backend.asts.c.IntLiteral(vt[x]));
            }
            // TODO: Refactor so we're not doing this hideous "int <name>[n] = " gen here
            com.samsung.sjs.backend.asts.c.VariableDeclaration vd =
                    //new com.samsung.sjs.backend.asts.c.VariableDeclaration(false, new VTablePseudoType());
                    new com.samsung.sjs.backend.asts.c.VariableDeclaration(false, new CInteger());
            vd.addVariable(new Variable("__vtable_id_" + i + "[]"), arr);
            vtables.addStatement(vd);
        }
    }

    for (String s : tts.getForwardDecls()) {
        strlits.addExpressionStatement(new InlineCCode(s));
    }
    for (String s : tts.getPropArrayDecls()) {
        strlits.addExpressionStatement(new InlineCCode(s));
    }
    for (String s : tts.getFieldArrayDecls()) {
        strlits.addExpressionStatement(new InlineCCode(s));
    }
    for (String s : tts.getArgArrayDecls()) {
        strlits.addExpressionStatement(new InlineCCode(s));
    }
    for (String s : tts.getCodeDecls()) {
        strlits.addExpressionStatement(new InlineCCode(s));
    }
    for (String s : tts.getTagDecls()) {
        strlits.addExpressionStatement(new InlineCCode(s));
    }

    return ccode;
}

From source file:de.tudarmstadt.tk.statistics.report.ReportGenerator.java

public String createPlainReport() {
    // Set locale to English globally to make reports independent of the
    // machine thei're created on, e.g. use "." as decimal points on any
    // machine//from   w w  w  . j av a 2s .co  m
    Locale.setDefault(Locale.ENGLISH);

    StringBuilder report = new StringBuilder();

    //
    // Evaluation Overview
    //
    report.append("###\n");
    report.append("Evaluation Overview\n");
    report.append("###\n\n");

    int nModels = evalResults.getSampleData().getModelMetadata().size();
    ArrayList<String> measures = evalResults.getMeasures();
    String ref = "tbl:models";

    // Separate training/testing datasets
    List<String> trainingDataList = new ArrayList<String>();
    List<String> testingDataList = new ArrayList<String>();
    List<Pair<String, String>> datasets = evalResults.getSampleData().getDatasetNames();
    Iterator<Pair<String, String>> itp = datasets.iterator();
    while (itp.hasNext()) {
        Pair<String, String> trainTest = itp.next();
        trainingDataList.add(trainTest.getKey());
        if (trainTest.getValue() != null) {
            testingDataList.add(trainTest.getValue());
        }
    }
    Set<String> trainingDataSet = new HashSet<String>(trainingDataList);
    Set<String> testingDataSet = new HashSet<String>(testingDataList);

    String pipelineDescription = null;
    String sampleOrigin = "per CV";

    ReportTypes pipelineType = this.evalResults.getSampleData().getPipelineType();
    switch (pipelineType) {
    // One-domain n-fold CV (ReportData=per Fold)
    case CV:
        pipelineDescription = String.format("%d-fold cross validation",
                evalResults.getSampleData().getnFolds());
        sampleOrigin = "per fold ";
        break;
    case MULTIPLE_CV:
        pipelineDescription = String.format("%dx%s repeated cross validation",
                evalResults.getSampleData().getnRepetitions(), evalResults.getSampleData().getnFolds());
        break;
    case CV_DATASET_LVL:
        pipelineDescription = String.format("%d-fold cross validation over %d datasets",
                evalResults.getSampleData().getnFolds(), trainingDataSet.size());
        break;
    case MULTIPLE_CV_DATASET_LVL:
        pipelineDescription = String.format("%dx%s repeated cross validation over %d datasets",
                evalResults.getSampleData().getnRepetitions(), evalResults.getSampleData().getnFolds(),
                trainingDataSet.size());
        sampleOrigin = "per dataset";
        break;
    case TRAIN_TEST_DATASET_LVL:
        // In the train/test scenario, the number of datasets only includes
        // distinct ones
        Set<String> allDataSets = new HashSet<String>(testingDataSet);
        allDataSets.addAll(trainingDataSet);
        pipelineDescription = String.format("Train/Test over %d datasets", allDataSets.size());
        sampleOrigin = "per dataset";
        break;
    default:
        pipelineDescription = "!unknown pipeline type!";
        sampleOrigin = "!unknown pipeline type!";
        break;
    }

    boolean isBaselineEvaluation = evalResults.isBaselineEvaluation();
    report.append(String.format("The system performed a %s for the following %d models. \n",
            pipelineDescription, nModels));
    if (isBaselineEvaluation) {
        report.append(String.format("The models were compared against the first baseline model. \n",
                pipelineDescription, nModels));
    } else {
        report.append(
                String.format("The models were compared against each other. \n", pipelineDescription, nModels));
    }

    ArrayList<Pair<String, String>> modelMetadata = evalResults.getSampleData().getModelMetadata();
    for (int modelIndex = 0; modelIndex < modelMetadata.size(); modelIndex++) {
        String[] algorithm = modelMetadata.get(modelIndex).getKey().split("\\.");
        String modelAlgorithm = algorithm[algorithm.length - 1];
        String modelFeatureSet = modelMetadata.get(modelIndex).getValue();
        report.append(String.format("M%d: %s; %s\n", modelIndex, modelAlgorithm, modelFeatureSet));
    }

    // List test/training datasets. Consider the case when these sets are
    // different.
    if (testingDataSet.isEmpty()) {
        if (trainingDataSet.size() == 1) {
            report.append(
                    String.format("\nThe models were evaluated on the dataset %s. ", trainingDataList.get(0)));
        } else {
            report.append(String.format("\nThe models were evaluated on the datasets %s. ",
                    this.createEnumeration(trainingDataList)));
        }
    } else {
        if (trainingDataSet.size() == 1 && testingDataSet.size() == 1) {
            report.append(
                    String.format("\nThe models were trained on the dataset %s and tested on the dataset %s. ",
                            trainingDataList.get(0), testingDataList.get(0)));
        } else if (trainingDataSet.size() > 1 && testingDataSet.size() == 1) {
            report.append(String.format(
                    "\nThe models were trained on the datasets %s and tested on the dataset %s. ",
                    this.createEnumeration(new ArrayList<String>(trainingDataSet)), testingDataList.get(0)));
        } else if (trainingDataSet.size() == 1 && testingDataSet.size() > 1) {
            report.append(String.format(
                    "\nThe models were trained on the dataset %s and tested on the datasets %s. ",
                    trainingDataList.get(0), this.createEnumeration(new ArrayList<String>(testingDataSet))));
        } else {
            report.append(String.format(
                    "\nThe models were trained on the datasets %s and tested on the datasets %s. ",
                    this.createEnumeration(new ArrayList<String>(trainingDataSet)),
                    this.createEnumeration(new ArrayList<String>(testingDataSet))));
        }
    }
    report.append(String.format("Their performance was assessed with the %s", createEnumeration(measures)));

    //
    // Results (for each measure separately)
    //
    report.append("\n\n###\n"); // All previous floats must be placed before
    // this point
    report.append("Results\n");
    report.append("###");

    for (int i = 0; i < measures.size(); i++) {

        // Continue for McNemar contingency matrix
        String measure = measures.get(i);
        if (!evalResults.getSampleData().getSamples().containsKey(measure)) {
            continue;
        }

        // Samples
        report.append("\n\n#\n");
        report.append(String.format("Evaluation for %s. \n", measure));
        report.append("#\n\n");

        report.append("Samples: \n");

        ArrayList<ArrayList<Double>> models = evalResults.getSampleData().getSamples().get(measure);
        for (int modelId = 0; i < models.size(); i++) {
            ArrayList<Double> samples = models.get(modelId);
            report.append(String.format("C%d: ", modelId));
            for (int j = 0; j < samples.size(); j++) {
                report.append(String.format("%.3f;", samples.get(j)));
            }
            report.append("\n");
        }
        report.append("\n");

        // Test results
        for (String testType : new String[] { "Parametric", "Non-Parametric" }) {
            report.append(String.format("%s Testing\n", testType));

            Pair<String, AbstractTestResult> result = null;
            if (testType.equals("Parametric")) {
                result = evalResults.getParametricTestResults().get(measure);
            } else {
                result = evalResults.getNonParametricTestResults().get(measure);
            }

            // Use pretty-print method descriptor if specified
            String method = result.getKey();
            if (StatsConfigConstants.PRETTY_PRINT_METHODS.containsKey(method)) {
                method = StatsConfigConstants.PRETTY_PRINT_METHODS.get(method);
            }

            TestResult r = (TestResult) result.getValue();
            report.append(String.format("The system compared the %d models using the %s. ", nModels, method));

            if (r != null && !Double.isNaN(r.getpValue())) {

                // A priori test: assumptions
                boolean assumptionViolated = false;
                Iterator<String> it = r.getAssumptions().keySet().iterator();
                while (it.hasNext()) {
                    String assumption = it.next();
                    TestResult at = (TestResult) r.getAssumptions().get(assumption);
                    if (at == null) {
                        report.append(String.format("Testing for %s failed. ", assumption));
                        assumptionViolated = true;
                        continue;
                    }
                    if (Double.isNaN(at.getpValue())) {
                        report.append(
                                String.format("Testing for %s using %s failed. ", assumption, at.getMethod()));
                        assumptionViolated = true;
                        continue;
                    }
                    double ap = at.getpValue();

                    if (ap <= this.significance_low) {
                        assumptionViolated = true;
                    }

                    // Verbalize result according to p value
                    Pair<String, Double> verbalizedP = verbalizeP(ap, true);

                    report.append(String.format("%s %s violation of %s (p=%f, alpha=%f). ", at.getMethod(),
                            verbalizedP.getKey(), assumption, ap, verbalizedP.getValue()));

                }

                if (assumptionViolated) {
                    report.append(
                            "Given that the assumptions are violated, the following test may be corrupted. ");
                }

                // A Priori test results
                Pair<String, Double> verbalizedP = verbalizeP(r.getpValue(), false);
                report.append(String.format(
                        "The %s %s differences between the performances of the models (p=%f, alpha=%f).\n\n",
                        method, verbalizedP.getKey(), r.getpValue(), verbalizedP.getValue()));

                // Post-hoc test for >2 models (pairwise comparisons)
                if (evalResults.getSampleData().getModelMetadata().size() > 2) {

                    Pair<String, AbstractTestResult> postHocResult = null;
                    HashMap<Integer, TreeSet<Integer>> postHocOrdering = null;
                    if (testType.equals("Parametric")) {
                        postHocResult = evalResults.getParametricPostHocTestResults().get(measure);
                        postHocOrdering = evalResults.getParameticPostHocOrdering().get(measure);
                    } else {
                        postHocResult = evalResults.getNonParametricPostHocTestResults().get(measure);
                        postHocOrdering = evalResults.getNonParameticPostHocOrdering().get(measure);
                    }
                    method = postHocResult.getKey();
                    if (StatsConfigConstants.PRETTY_PRINT_METHODS.containsKey(method)) {
                        method = StatsConfigConstants.PRETTY_PRINT_METHODS.get(method);
                    }

                    PairwiseTestResult rPostHoc = (PairwiseTestResult) postHocResult.getValue();
                    report.append(String.format("The system performed the %s post-hoc. ", method));

                    if (rPostHoc == null) {
                        report.append("The test failed. ");
                        continue;
                    }

                    // Assumptions
                    boolean assumptionsViolated = false;
                    it = rPostHoc.getAssumptions().keySet().iterator();
                    while (it.hasNext()) {
                        String assumption = it.next();
                        PairwiseTestResult at = (PairwiseTestResult) rPostHoc.getAssumptions().get(assumption);
                        if (at == null) {
                            report.append(String.format("Testing for %s failed. ", assumption));
                            assumptionsViolated = true;
                            continue;
                        }

                        report.append(String.format("\nTesting for %s using %s returned p-values:\n%s",
                                assumption, at.getMethod(), this.pairwiseResultsToString(at.getpValue())));

                        // Create table with pairwise p-values for
                        // assumption testing
                        double[][] ap = at.getpValue();
                        double max = getMax(ap);
                        double min = getMin(ap);
                        verbalizedP = verbalizeP(min, true);
                        if ((max > significance_low && min <= significance_low)
                                || (max > significance_medium && min <= significance_medium)
                                || (max > significance_high && min <= significance_high)) {
                            // partly significant to degree as specified by
                            // verbalized p-value
                            report.append(String.format("%s partly %s violation of %s (alpha=%.2f).\n",
                                    at.getMethod(), verbalizedP.getKey(), assumption, verbalizedP.getValue()));
                        } else {
                            report.append(String.format("%s %s violation of %s (alpha=%.2f).\n", at.getMethod(),
                                    verbalizedP.getKey(), assumption, verbalizedP.getValue()));
                        }

                        if (min <= this.significance_low) {
                            assumptionsViolated = true;
                        }

                    }

                    if (assumptionViolated) {
                        report.append(
                                "Given that the assumptions are violated, the following test may be corrupted. ");
                    }

                    // Result
                    double[][] ap = rPostHoc.getpValue();
                    report.append(
                            String.format("P-values:\n%s", this.pairwiseResultsToString(rPostHoc.getpValue())));

                    // Already fetch pairwise adjustments here in order to
                    // determine choice of words
                    double max = getMax(ap);
                    double min = getMin(ap);
                    verbalizedP = verbalizeP(min, false);
                    ArrayList<StatsConfigConstants.CORRECTION_VALUES> adjustments = new ArrayList<StatsConfigConstants.CORRECTION_VALUES>(
                            rPostHoc.getpValueCorrections().keySet());
                    String adjustWord = "";
                    if (adjustments.size() > 0) {
                        adjustWord = " for non-adjusted p-values";
                    }
                    if ((max > significance_low && min <= significance_low)
                            || (max > significance_medium && min <= significance_medium)
                            || (max > significance_high && min <= significance_high)) {
                        // partly significant to degree as specified by
                        // verbalized p-value
                        report.append(String.format(
                                "The %s partly %s differences between the performances of the models%s ($\\alpha=%.2f$, Tbl. \\ref{%s}). ",
                                method, verbalizedP.getKey(), adjustWord, verbalizedP.getValue(), ref));
                    } else {
                        report.append(String.format(
                                "The %s %s differences between the performances of the models%s ($\\alpha=%.2f$, Tbl. \\ref{%s}). ",
                                method, verbalizedP.getKey(), adjustWord, verbalizedP.getValue(), ref));
                    }

                    // Determine ordering of models
                    String ordering = getModelOrderingRepresentation(postHocOrdering);
                    report.append(ordering);
                    report.append("\n\n");

                    // Pairwise adjustments
                    if (adjustments.size() > 0) {
                        double[] minAdjustments = new double[adjustments.size()];
                        double[] maxAdjustments = new double[adjustments.size()];
                        for (int j = 0; j < adjustments.size(); j++) {
                            StatsConfigConstants.CORRECTION_VALUES adjustmentMethod = adjustments.get(j);
                            double[][] correctedP = rPostHoc.getpValueCorrections().get(adjustmentMethod);
                            String am = adjustmentMethod.name();
                            if (StatsConfigConstants.PRETTY_PRINT_METHODS.containsKey(am)) {
                                am = StatsConfigConstants.PRETTY_PRINT_METHODS.get(am);
                            }
                            report.append(String.format("\nAdjusted p-values according to %s:\n%s", am,
                                    this.pairwiseResultsToString(correctedP)));

                            minAdjustments[j] = getMin(correctedP);
                            maxAdjustments[j] = getMax(correctedP);
                        }

                        min = getMin(minAdjustments);
                        max = getMax(maxAdjustments);
                        verbalizedP = verbalizeP(min, false);

                        if ((max > significance_low && min <= significance_low)
                                || (max > significance_medium && min <= significance_medium)
                                || (max > significance_high && min <= significance_high)) {
                            // partly significant to degree as specified by
                            // verbalized p-value
                            report.append(String.format(
                                    "It partly %s differences for adjusted p-values (alpha=%.2f$).\n\n ",
                                    verbalizedP.getKey(), verbalizedP.getValue(), ref));
                        } else {
                            report.append(
                                    String.format("It %s differences for adjusted p-values (alpha=%.2f$).\n\n ",
                                            verbalizedP.getKey(), verbalizedP.getValue(), ref));
                        }
                    }
                }
            } else {
                report.append(String.format("The %s failed.", method));
            }
        }
    }

    //
    // Contingency table and McNemar results if this test was performed
    //
    if (evalResults.getNonParametricTest().equals("McNemar")) {
        String measure = "Contingency Table";
        String testType = "Non-Parametric";
        report.append("\n\n#\n");
        report.append("Evaluation for Contingency Table\n");
        report.append("#\n\n");

        int[][] contingencyMatrix = evalResults.getSampleData().getContingencyMatrix();
        if (evalResults.getSampleData().getPipelineType() == ReportTypes.MULTIPLE_CV) {
            report.append(String.format(
                    "Contingency table drawn from the %s and the %d models. The correctly and incorrectly classified instances per fold were averaged over all repetitions:\n%s\n",
                    pipelineDescription, nModels, this.contingencyMatrixToString(contingencyMatrix)));
        } else {
            report.append(String.format("Contingency table drawn from the %s and the %d models:\n%s\n",
                    pipelineDescription, nModels, this.contingencyMatrixToString(contingencyMatrix)));
        }

        // Test results
        report.append(String.format("%s Testing\n", testType));
        report.append(String.format("The system compared the %d models using the McNemar test. ", nModels));
        Pair<String, AbstractTestResult> result = evalResults.getNonParametricTestResults().get(measure);

        TestResult r = (TestResult) result.getValue();
        if (r != null && !Double.isNaN(r.getpValue())) {
            StringBuilder parameters = new StringBuilder();
            Iterator<String> it = r.getParameter().keySet().iterator();
            while (it.hasNext()) {
                String parameter = it.next();
                double value = r.getParameter().get(parameter);
                parameters.append(String.format("%s=%.3f, ", parameter, value));
            }

            // Verbalize result according to p value
            Pair<String, Double> verbalizedP = verbalizeP(r.getpValue(), false);
            report.append(String.format(
                    "The test %s differences between the performances of the models (%sp=%.3f, alpha=%.2f).\\\\ \n",
                    verbalizedP.getKey(), parameters.toString(), r.getpValue(), verbalizedP.getValue()));

        } else {
            report.append("The test failed.\n");
        }
    }

    return report.toString();
}

From source file:com.evolveum.midpoint.schema.util.WfContextUtil.java

@NotNull
private static TriggerType createTrigger(XMLGregorianCalendar triggerTime, WorkItemActionsType actions,
        Pair<Duration, AbstractWorkItemActionType> notifyInfo, PrismContext prismContext,
        @Nullable String workItemId, @NotNull String handlerUri) throws SchemaException {
    TriggerType trigger = new TriggerType(prismContext);
    trigger.setTimestamp(triggerTime);// w w w .  j ava  2  s . co  m
    trigger.setHandlerUri(handlerUri);
    ExtensionType extension = new ExtensionType(prismContext);
    trigger.setExtension(extension);

    SchemaRegistry schemaRegistry = prismContext.getSchemaRegistry();
    if (workItemId != null) {
        // work item id
        @SuppressWarnings("unchecked")
        @NotNull
        PrismPropertyDefinition<String> workItemIdDef = prismContext.getSchemaRegistry()
                .findPropertyDefinitionByElementName(SchemaConstants.MODEL_EXTENSION_WORK_ITEM_ID);
        PrismProperty<String> workItemIdProp = workItemIdDef.instantiate();
        workItemIdProp.addRealValue(workItemId);
        trigger.getExtension().asPrismContainerValue().add(workItemIdProp);
    }
    // actions
    if (actions != null) {
        @NotNull
        PrismContainerDefinition<WorkItemActionsType> workItemActionsDef = schemaRegistry
                .findContainerDefinitionByElementName(SchemaConstants.MODEL_EXTENSION_WORK_ITEM_ACTIONS);
        PrismContainer<WorkItemActionsType> workItemActionsCont = workItemActionsDef.instantiate();
        workItemActionsCont.add(actions.asPrismContainerValue().clone());
        extension.asPrismContainerValue().add(workItemActionsCont);
    }
    // time before + action
    if (notifyInfo != null) {
        @NotNull
        PrismContainerDefinition<AbstractWorkItemActionType> workItemActionDef = schemaRegistry
                .findContainerDefinitionByElementName(SchemaConstants.MODEL_EXTENSION_WORK_ITEM_ACTION);
        PrismContainer<AbstractWorkItemActionType> workItemActionCont = workItemActionDef.instantiate();
        workItemActionCont.add(notifyInfo.getValue().asPrismContainerValue().clone());
        extension.asPrismContainerValue().add(workItemActionCont);
        @SuppressWarnings("unchecked")
        @NotNull
        PrismPropertyDefinition<Duration> timeBeforeActionDef = schemaRegistry
                .findPropertyDefinitionByElementName(SchemaConstants.MODEL_EXTENSION_TIME_BEFORE_ACTION);
        PrismProperty<Duration> timeBeforeActionProp = timeBeforeActionDef.instantiate();
        timeBeforeActionProp.addRealValue(notifyInfo.getKey());
        extension.asPrismContainerValue().add(timeBeforeActionProp);
    }
    return trigger;
}

From source file:de.tudarmstadt.tk.statistics.report.ReportGenerator.java

/**
 * Creates a report of the statistical evaluation in the Latex-format
 * /*from w  ww. j a  va 2 s  .  c  om*/
 * @param outputFolder
 *            the folder where the report will be written later to store
 *            related images etc. there
 * @param evalResults
 *            an object of type {@link EvaluationResults} comprising the
 *            results of the statistical evaluation
 * @return A String representing the report of the statistical evaluation in
 *         Latex-format
 */
public String createLatexReport(File outputFolder) {
    // Set locale to English globally to make reports independent of the
    // machine thei're created on, e.g. use "." as decimal points on any
    // machine
    Locale.setDefault(Locale.ENGLISH);
    StringBuilder report = new StringBuilder();
    Statistics stats = Statistics.getInstance(true);
    HashMap<String, String> methodsSummary = new HashMap<String, String>();
    HashMap<String, HashMap<String, List<String>>> testSummary = new HashMap<String, HashMap<String, List<String>>>();
    ArrayList<String[]> figures = new ArrayList<String[]>();
    testSummary.put("Parametric", new HashMap<String, List<String>>());
    testSummary.put("Non-Parametric", new HashMap<String, List<String>>());
    String outputFolderPath = "";
    if (outputFolder != null) {
        outputFolderPath = outputFolder.getAbsolutePath();
    }

    //
    // Header
    //
    // Packages
    report.append("\\documentclass[a4paper,12pt]{article}\n");
    report.append("\\usepackage[english]{babel}\n");
    report.append("\\usepackage[utf8]{inputenc}\n");
    report.append("\\usepackage{graphicx}\n");
    report.append("\\usepackage{titlesec}\n");
    report.append("\\usepackage{caption}\n");
    report.append("\\usepackage{subcaption}\n");
    report.append("\\usepackage{adjustbox}\n");
    report.append("\\usepackage{placeins}\n");
    report.append("\\usepackage{longtable}\n");
    report.append("\\usepackage{morefloats}\n");
    // Title definition
    report.append("\\titleformat*{\\section}{\\large\\bfseries}\n");
    report.append("\\titleformat*{\\subsection}{\\normalsize\\bfseries}\n");
    report.append("\\titleformat*{\\subsubsection}{\\vspace{-0.3cm}\\normalsize\\bfseries}\n");
    report.append("\\title{Statistical Evaluation Report}\n");
    report.append("\\date{\\vspace{-10ex}}\n");
    report.append("\\begin{document}\n");
    report.append("\\maketitle\n");

    //
    // Evaluation Overview
    //
    report.append("\\section{Evaluation Overview}");

    int nModels = evalResults.getSampleData().getModelMetadata().size();
    ArrayList<String> measures = evalResults.getMeasures();
    int nSamples = evalResults.getSampleData().getSamples().get(measures.get(0)).get(0).size();
    String ref = "tbl:models";

    // Separate training/testing datasets
    List<String> trainingDataList = new ArrayList<String>();
    List<String> testingDataList = new ArrayList<String>();
    List<Pair<String, String>> datasets = evalResults.getSampleData().getDatasetNames();
    Iterator<Pair<String, String>> itp = datasets.iterator();
    while (itp.hasNext()) {
        Pair<String, String> trainTest = itp.next();
        trainingDataList.add(trainTest.getKey());
        if (trainTest.getValue() != null) {
            testingDataList.add(trainTest.getValue());
        }
    }
    Set<String> trainingDataSet = new HashSet<String>(trainingDataList);
    Set<String> testingDataSet = new HashSet<String>(testingDataList);

    String pipelineDescription = null;
    String sampleOrigin = "per CV";

    ReportTypes pipelineType = this.evalResults.getSampleData().getPipelineType();
    switch (pipelineType) {
    // One-domain n-fold CV (ReportData=per Fold)
    case CV:
        pipelineDescription = String.format("%d-fold cross validation",
                evalResults.getSampleData().getnFolds());
        sampleOrigin = "per fold ";
        break;
    case MULTIPLE_CV:
        pipelineDescription = String.format("%dx%s repeated cross validation",
                evalResults.getSampleData().getnRepetitions(), evalResults.getSampleData().getnFolds());
        break;
    case CV_DATASET_LVL:
        pipelineDescription = String.format("%d-fold cross validation over %d datasets",
                evalResults.getSampleData().getnFolds(), trainingDataSet.size());
        break;
    case MULTIPLE_CV_DATASET_LVL:
        pipelineDescription = String.format("%dx%s repeated cross validation over %d datasets",
                evalResults.getSampleData().getnRepetitions(), evalResults.getSampleData().getnFolds(),
                trainingDataSet.size());
        sampleOrigin = "per dataset";
        break;
    case TRAIN_TEST_DATASET_LVL:
        // In the train/test scenario, the number of datasets only includes
        // distinct ones
        Set<String> allDataSets = new HashSet<String>(testingDataSet);
        allDataSets.addAll(trainingDataSet);
        pipelineDescription = String.format("Train/Test over %d datasets", allDataSets.size());
        sampleOrigin = "per dataset";
        break;
    default:
        pipelineDescription = "!unknown pipeline type!";
        sampleOrigin = "!unknown pipeline type!";
        break;
    }

    boolean isBaselineEvaluation = evalResults.isBaselineEvaluation();
    report.append(String.format("The system performed a %s for the %d models in Tbl \\ref{%s}. ",
            pipelineDescription, nModels, ref));
    if (isBaselineEvaluation) {
        report.append(String.format("The models were compared against the first baseline model. \n",
                pipelineDescription, nModels, ref));
    } else {
        report.append(String.format("The models were compared against each other. \n", pipelineDescription,
                nModels, ref));
    }

    String[][] values = new String[nModels][3];
    for (int r = 0; r < nModels; r++) {
        values[r][0] = String.format("M%d", r);
        // Remove package prefix for algorithms, e.g. shorten "trees.J48" to "J48".
        String[] algorithm = evalResults.getSampleData().getModelMetadata().get(r).getKey().split("\\.");
        values[r][1] = escapeLatexCharacters(algorithm[algorithm.length - 1]);
        values[r][2] = escapeLatexCharacters(evalResults.getSampleData().getModelMetadata().get(r).getValue());
    }

    String table = createLatexTable("Evaluated models with classifier algorithm and feature sets", ref,
            new String[] { "Index", "Algorithm", "Feature Set" }, "|l|l|p{11cm}|", values);
    report.append(table);

    // List test/training datasets. Consider the case when these sets are
    // different.
    if (testingDataSet.isEmpty()) {
        if (trainingDataSet.size() == 1) {
            report.append(
                    String.format("The models were evaluated on the dataset %s. ", trainingDataList.get(0)));
        } else {
            report.append(String.format("The models were evaluated on the datasets %s. ",
                    this.createEnumeration(trainingDataList)));
        }
    } else {
        if (trainingDataSet.size() == 1 && testingDataSet.size() == 1) {
            report.append(
                    String.format("The models were trained on the dataset %s and tested on the dataset %s. ",
                            trainingDataList.get(0), testingDataList.get(0)));
        } else if (trainingDataSet.size() > 1 && testingDataSet.size() == 1) {
            report.append(String.format(
                    "The models were trained on the datasets %s and tested on the dataset %s. ",
                    this.createEnumeration(new ArrayList<String>(trainingDataSet)), testingDataList.get(0)));
        } else if (trainingDataSet.size() == 1 && testingDataSet.size() > 1) {
            report.append(String.format(
                    "The models were trained on the dataset %s and tested on the datasets %s. ",
                    trainingDataList.get(0), this.createEnumeration(new ArrayList<String>(testingDataSet))));
        } else {
            report.append(
                    String.format("The models were trained on the datasets %s and tested on the datasets %s. ",
                            this.createEnumeration(new ArrayList<String>(trainingDataSet)),
                            this.createEnumeration(new ArrayList<String>(testingDataSet))));
        }
    }
    report.append(String.format("Their performance was assessed with the %s", createEnumeration(measures)));
    report.append(
            ". In the analysis, the models thus represent levels of the independent variable, while the performance measures are dependent variables.\n");

    //
    // Results (for each measure separately)
    //
    report.append("\\FloatBarrier\n"); // All previous floats must be placed
    // before this point
    report.append("\\section{Results}\n");
    report.append(String.format(
            "Throughout the report, p-values are annotated if they are significant. While {\\footnotesize *} indicates low significance ($p<\\alpha=%.2f$), the annotations {\\footnotesize **} and {\\footnotesize ***} represent medium ($p<\\alpha=%.2f$) and high significance ($p<\\alpha=%.2f$).",
            significance_low, significance_medium, significance_high));

    for (int i = 0; i < measures.size(); i++) {
        /*
         * Create table with samples for the current performance measure If
         * samples are drawn over multiple datasets, transpose table
         */
        String measure = measures.get(i);
        if (!evalResults.getSampleData().getSamples().containsKey(measure)) {
            continue;
        }
        ArrayList<ArrayList<Double>> measureSamples = evalResults.getSampleData().getSamples().get(measure);
        ArrayList<Double> averageMeasureSamples = evalResults.getSampleData().getSamplesAverage().get(measure);

        report.append("\\FloatBarrier\n");
        report.append(String.format("\\subsection{%s}\n", measure));
        ref = String.format("tbl:%s", measure.replaceAll("\\s", ""));
        report.append(String.format(
                "The %s samples drawn from the %s and the %d models are presented in Tbl. \\ref{%s}.\n",
                measure, pipelineDescription, nModels, ref));

        // Plot Box-Whisker-Diagram of samples for the current measure and add the figure to the appendix
        // Use the min/max sample value as indicators for the box-plots limits
        String filename = String.format("boxPlot%s", measure.replaceAll("\\s", ""));
        String path = String.format("%s%s%s", outputFolderPath, File.separator, filename);
        String pathR = this.fixSlashes(path);
        String figRef = String.format("fig:boxPlot%s", measure.replaceAll("\\s", ""));
        String caption = String.format("Box-Whisker-Plot of %s samples. Red dots indicate means.", measure);
        double[][] samples = new double[nModels][];
        double minSample = Double.MAX_VALUE;
        double maxSample = Double.MIN_VALUE;
        for (int k = 0; k < nModels; k++) {
            ArrayList<Double> s = measureSamples.get(k);
            samples[k] = new double[s.size()];
            for (int j = 0; j < s.size(); j++) {
                samples[k][j] = s.get(j);
                if (minSample > s.get(j)) {
                    minSample = s.get(j);
                }
                if (maxSample < s.get(j)) {
                    maxSample = s.get(j);
                }
            }
        }
        double sampleRange = maxSample - minSample;
        int lowerLimit = (int) Math.floor(minSample - sampleRange * 0.1);
        int upperLimit = (int) Math.ceil(maxSample + sampleRange * 0.1);
        boolean successful = stats.plotBoxWhisker(samples, lowerLimit, upperLimit, pathR, measure);
        if (successful) {
            figures.add(new String[] { figRef, caption, filename });
            report.append(
                    String.format("See Fig. \\ref{%s} for a Box-Whisker plot of these samples. ", figRef));
        }

        caption = String.format("Samples of the %s drawn from the %s and the %d models", measure,
                pipelineDescription, nModels);
        switch (pipelineType) {
        case CV:
        case MULTIPLE_CV:
            values = new String[nModels + 1][nSamples + 2];
            for (int r = 0; r <= nModels; r++) {
                // First line of table = Fold indices
                if (r == 0) {
                    values[r][0] = "";
                    values[r][nSamples + 1] = "";
                    for (int f = 1; f <= nSamples; f++) {
                        values[r][f] = Integer.toString(f);
                    }
                    // Next lines with model indices, samples per fold and
                    // average measure over all samples
                } else {
                    values[r][0] = String.format("M%d", (r - 1));
                    //values[r][nSamples + 1] = String.format("%.2f", averageMeasureSamples.get(r - 1) * 100);
                    values[r][nSamples + 1] = String.format("%.2f", averageMeasureSamples.get(r - 1));
                    ArrayList<Double> s = measureSamples.get(r - 1);
                    for (int j = 0; j < s.size(); j++) {
                        //values[r][j + 1] = String.format("%.2f", s.get(j) * 100);
                        values[r][j + 1] = String.format("%.2f", s.get(j));
                    }
                }
            }
            if (values.length > 58) {
                table = createLatexLongTable(caption, ref,
                        new String[] { "Classifier",
                                String.format("\\multicolumn{%d}{|c|}{%s %s}", nSamples, measure, sampleOrigin),
                                "Average" },
                        String.format("|%s", StringUtils.repeat("l|", nSamples + 2)), values);
            } else {
                table = createLatexTable(caption, ref,
                        new String[] { "Classifier",
                                String.format("\\multicolumn{%d}{|c|}{%s %s}", nSamples, measure, sampleOrigin),
                                "Average" },
                        String.format("|%s", StringUtils.repeat("l|", nSamples + 2)), values);
            }
            break;

        case CV_DATASET_LVL:
        case MULTIPLE_CV_DATASET_LVL:
        case TRAIN_TEST_DATASET_LVL:
            values = new String[nSamples + 2][nModels + 1];
            // double[][] valuesNumeric = new double[nSamples][nModels];
            for (int r = 0; r <= nSamples + 1; r++) {
                // First line of table = Model indices
                if (r == 0) {
                    values[r][0] = "";
                    for (int j = 0; j < nModels; j++) {
                        values[r][j + 1] = String.format("M%d", (j));
                    }
                    // Last line of table = average sums
                } else if (r == nSamples + 1) {
                    values[r][0] = "Average";
                    for (int j = 0; j < nModels; j++) {
                        //values[r][j + 1] = String.format("%.2f", averageMeasureSamples.get(j) * 100);
                        values[r][j + 1] = String.format("%.2f", averageMeasureSamples.get(j));
                    }
                    // Next lines with model indices, samples per fold and
                    // average measure over all samples
                } else {
                    // Only print both train- and test set if there is more
                    // than one training set
                    Pair<String, String> trainTest = evalResults.getSampleData().getDatasetNames().get(r - 1);
                    if (pipelineType == ReportTypes.TRAIN_TEST_DATASET_LVL) {
                        if (trainingDataSet.size() > 1) {
                            values[r][0] = String.format("%s-%s", trainTest.getKey(), trainTest.getValue());
                        } else {
                            values[r][0] = trainTest.getValue();
                        }
                    } else {
                        values[r][0] = trainTest.getKey();
                    }
                    for (int j = 0; j < nModels; j++) {
                        ArrayList<Double> s = measureSamples.get(j);
                        //values[r][j + 1] = String.format("%.2f", s.get(r - 1) * 100);
                        values[r][j + 1] = String.format("%.2f", s.get(r - 1));
                    }
                }
            }
            if (values.length > 58) {
                table = createLatexLongTable(caption, ref,
                        new String[] { "Dataset",
                                String.format("\\multicolumn{%d}{|c|}{%s %s}", nModels, measure,
                                        sampleOrigin) },
                        String.format("|%s", StringUtils.repeat("l|", nModels + 1)), values);
            } else {
                table = createLatexTable(caption, ref,
                        new String[] { "Dataset",
                                String.format("\\multicolumn{%d}{|c|}{%s %s}", nModels, measure,
                                        sampleOrigin) },
                        String.format("|%s", StringUtils.repeat("l|", nModels + 1)), values);
            }
            break;
        }
        report.append(table);

        //
        // Results - First parametric tests, then non-parametric (2
        // iterations)
        // Print results for alls non-parametric tests except McNemar.
        // McNemar is not based on the same performance measures but on a
        // contingency matrix, which is
        // printed in a separate section.
        for (String testType : new String[] { "Parametric", "Non-Parametric" }) {
            report.append(String.format("\\subsubsection{%s Testing}", testType));

            Pair<String, AbstractTestResult> result = null;
            if (testType.equals("Parametric")) {
                result = evalResults.getParametricTestResults().get(measure);
            } else {
                result = evalResults.getNonParametricTestResults().get(measure);
            }

            // Use pretty-print method descriptor if specified
            String method = result.getKey();
            if (StatsConfigConstants.PRETTY_PRINT_METHODS.containsKey(method)) {
                method = StatsConfigConstants.PRETTY_PRINT_METHODS.get(method);
            }
            methodsSummary.put(testType, method);

            TestResult r = (TestResult) result.getValue();
            report.append(
                    String.format("The system compared the %d models using the \\emph{%s}. ", nModels, method));

            if (r != null && !Double.isNaN(r.getpValue())) {

                // A priori test: assumptions
                boolean assumptionViolated = false;
                Iterator<String> it = r.getAssumptions().keySet().iterator();
                while (it.hasNext()) {
                    String assumption = it.next();

                    TestResult at = (TestResult) r.getAssumptions().get(assumption);
                    if (at == null) {
                        report.append(String.format("Testing for %s failed. ", assumption));
                        assumptionViolated = true;
                        continue;
                    }
                    if (Double.isNaN(at.getpValue())) {
                        report.append(
                                String.format("Testing for %s using %s failed. ", assumption, at.getMethod()));
                        assumptionViolated = true;
                        continue;
                    }
                    double ap = at.getpValue();

                    if (ap <= this.significance_low) {
                        assumptionViolated = true;
                    }

                    // Verbalize result according to p value
                    Pair<String, Double> verbalizedP = verbalizeP(ap, true);

                    String testResultRepresentation = getTestResultRepresentation(at, verbalizedP.getValue());
                    report.append(String.format("%s %s violation of %s (%s). ", at.getMethod(),
                            verbalizedP.getKey(), assumption, testResultRepresentation));

                }

                // Create QQ-Normal diagram to support the analysis of a
                // normality assumption
                if (result.getKey().equals("DependentT") && samples.length == 2) {
                    filename = String.format("qqNormPlot%s", measure.replaceAll("\\s", ""));
                    path = String.format("%s%s%s", outputFolderPath, File.separator, filename);
                    pathR = this.fixSlashes(path);
                    figRef = String.format("fig:qqNormPlot%s", measure.replaceAll("\\s", ""));
                    caption = String.format("QQ-Normal plot of pairwise differences between %s samples.",
                            measure);
                    double[] differences = new double[samples[0].length];
                    for (int j = 0; j < samples[0].length; j++) {
                        differences[j] = samples[0][j] - samples[1][j];
                    }
                    successful = stats.plotQQNorm(differences, "M0-M1", measure, pathR);
                    if (successful) {
                        figures.add(new String[] { figRef, caption, filename });
                        report.append(String.format("See Fig. \\ref{%s} for a QQ-Normal plot of the samples. ",
                                figRef));
                    }
                }

                if (assumptionViolated) {
                    report.append(
                            "Given that the assumptions are violated, the following test may be corrupted. ");
                }

                // A Priori test results
                // Verbalize result according to p value
                Pair<String, Double> verbalizedP = verbalizeP(r.getpValue(), false);
                String testResultRepresentation = getTestResultRepresentation(r, verbalizedP.getValue());
                report.append(String.format(
                        "The %s %s differences between the performances of the models (%s).\\\\ \n\n ", method,
                        verbalizedP.getKey(), testResultRepresentation));

                // Store result for summary
                if (testSummary.get(testType).containsKey(verbalizedP.getKey())) {
                    testSummary.get(testType).get(verbalizedP.getKey()).add(measure);
                } else {
                    ArrayList<String> list = new ArrayList<String>();
                    list.add(measure);
                    testSummary.get(testType).put(verbalizedP.getKey(), list);
                }

                // Post-hoc test for >2 models (pairwise comparisons)
                if (evalResults.getSampleData().getModelMetadata().size() > 2) {

                    Pair<String, AbstractTestResult> postHocResult = null;
                    if (testType.equals("Parametric")) {
                        postHocResult = evalResults.getParametricPostHocTestResults().get(measure);
                    } else {
                        postHocResult = evalResults.getNonParametricPostHocTestResults().get(measure);
                    }
                    method = postHocResult.getKey();
                    if (StatsConfigConstants.PRETTY_PRINT_METHODS.containsKey(method)) {
                        method = StatsConfigConstants.PRETTY_PRINT_METHODS.get(method);
                    }
                    methodsSummary.put(String.format("%sPostHoc", testType), method);

                    PairwiseTestResult rPostHoc = (PairwiseTestResult) postHocResult.getValue();
                    report.append(String.format("The system performed the \\emph{%s} post-hoc. ", method));

                    if (rPostHoc == null) {
                        report.append("The test failed. ");
                        continue;
                    }

                    // Assumptions
                    boolean assumptionsViolated = false;
                    it = rPostHoc.getAssumptions().keySet().iterator();
                    while (it.hasNext()) {
                        String assumption = it.next();
                        PairwiseTestResult at = (PairwiseTestResult) rPostHoc.getAssumptions().get(assumption);
                        if (at == null) {
                            report.append(String.format("Testing for %s failed. ", assumption));
                            assumptionsViolated = true;
                            continue;
                        }

                        // Create table with pairwise p-values for
                        // assumption testing
                        double[][] ap = at.getpValue();
                        Pair<String[], String[][]> tableData = getPValueStringArray(ap, isBaselineEvaluation); // first
                        // element
                        // is
                        // header,
                        // second
                        // are
                        // values
                        caption = String.format("P-values from the %s for %s", at.getMethod(), measure);
                        ref = String.format("tbl:%s%s", at.getMethod().replaceAll("\\s", ""),
                                measure.replaceAll("\\s", ""));
                        table = createLatexTable(caption, ref, tableData.getKey(),
                                String.format("|%s", StringUtils.repeat("l|", nModels)), tableData.getValue());

                        double max = getMax(ap);
                        double min = getMin(ap);
                        verbalizedP = verbalizeP(min, true);
                        if ((max > significance_low && min <= significance_low)
                                || (max > significance_medium && min <= significance_medium)
                                || (max > significance_high && min <= significance_high)) {
                            // partly significant to degree as specified by
                            // verbalized p-value
                            report.append(String.format(
                                    "%s partly %s violation of %s ($\\alpha=%.2f$, Tbl. \\ref{%s}).\n",
                                    at.getMethod(), verbalizedP.getKey(), assumption, verbalizedP.getValue(),
                                    ref));
                        } else {
                            report.append(String.format(
                                    "%s %s violation of %s ($\\alpha=%.2f$, Tbl. \\ref{%s}).\n", at.getMethod(),
                                    verbalizedP.getKey(), assumption, verbalizedP.getValue(), ref));
                        }
                        report.append(table);

                        if (min <= this.significance_low) {
                            assumptionsViolated = true;
                        }

                    }

                    if (assumptionViolated) {
                        report.append(
                                "Given that the assumptions are violated, the following test may be corrupted. ");
                    }

                    // Result
                    double[][] ap = rPostHoc.getpValue();
                    Pair<String[], String[][]> tableData = getPValueStringArray(ap, isBaselineEvaluation); // first
                    // element
                    // is
                    // header,
                    // second
                    // are
                    // values
                    caption = String.format("P-values from the %s for %s", method, measure);
                    ref = String.format("tbl:%s%s", method.replaceAll("\\s", ""),
                            measure.replaceAll("\\s", ""));
                    String formatting = null;
                    if (!isBaselineEvaluation) {
                        formatting = String.format("|%s", StringUtils.repeat("l|", nModels));
                    } else {
                        formatting = String.format("|l|l|");
                    }
                    String tablePNonAdjusted = createLatexTable(caption, ref, tableData.getKey(), formatting,
                            tableData.getValue());

                    // Already fetch pairwise adjustments here in order to
                    // determine choice of words
                    double max = getMax(ap);
                    double min = getMin(ap);
                    verbalizedP = verbalizeP(min, false);
                    ArrayList<StatsConfigConstants.CORRECTION_VALUES> adjustments = new ArrayList<StatsConfigConstants.CORRECTION_VALUES>(
                            rPostHoc.getpValueCorrections().keySet());
                    String adjustWord = "";
                    if (adjustments.size() > 0) {
                        adjustWord = " for non-adjusted p-values";
                    }
                    if ((max > significance_low && min <= significance_low)
                            || (max > significance_medium && min <= significance_medium)
                            || (max > significance_high && min <= significance_high)) {
                        // partly significant to degree as specified by
                        // verbalized p-value
                        report.append(String.format(
                                "The %s partly %s differences between the performances of the models%s ($\\alpha=%.2f$, Tbl. \\ref{%s}). ",
                                method, verbalizedP.getKey(), adjustWord, verbalizedP.getValue(), ref));
                    } else {
                        report.append(String.format(
                                "The %s %s differences between the performances of the models%s ($\\alpha=%.2f$, Tbl. \\ref{%s}). ",
                                method, verbalizedP.getKey(), adjustWord, verbalizedP.getValue(), ref));
                    }

                    // Determine ordering of models
                    HashMap<Integer, TreeSet<Integer>> postHocOrdering = null;
                    int[][] orderingEdgeList = null;
                    if (testType.equals("Parametric")) {
                        postHocOrdering = evalResults.getParameticPostHocOrdering().get(measure);
                        orderingEdgeList = evalResults.getParameticPostHocEdgelist().get(measure);
                    } else {
                        postHocOrdering = evalResults.getNonParameticPostHocOrdering().get(measure);
                        orderingEdgeList = evalResults.getNonParameticPostHocEdgelist().get(measure);
                    }
                    String ordering = getModelOrderingRepresentation(postHocOrdering);
                    report.append(ordering);

                    // Print graphs of ordering for the current measure and
                    // add the figure to the appendix
                    filename = String.format("graphOrdering%s%s", measure.replaceAll("\\s", ""), testType);
                    path = String.format("%s%s%s", outputFolderPath, File.separator, filename);
                    pathR = this.fixSlashes(path);
                    figRef = String.format("fig:graphOrdering%s%s", measure.replaceAll("\\s", ""), testType);
                    caption = String.format(
                            "Directed graph of significant differences for %s, as indicated by the %s post-hoc test.",
                            measure, testType.toLowerCase());
                    // int nodes[] = new int[nModels];
                    // for(int j=0; j<nModels;j++){nodes[j]=j;};
                    successful = stats.plotGraph(orderingEdgeList, nModels, pathR);
                    if (successful) {
                        figures.add(new String[] { figRef, caption, filename });
                        report.append(String.format("The ordering is visualized in Fig. \\ref{%s}. ", figRef));
                    }

                    // Pairwise adjustments
                    String tablePAdjusted = null;
                    if (adjustments.size() > 0) {
                        String[] subcaption = new String[adjustments.size()];
                        String[] header = null;
                        String[][][] overallValues = new String[adjustments.size()][][];
                        double[] minAdjustments = new double[adjustments.size()];
                        double[] maxAdjustments = new double[adjustments.size()];
                        for (int j = 0; j < adjustments.size(); j++) {
                            StatsConfigConstants.CORRECTION_VALUES adjustmentMethod = adjustments.get(j);
                            subcaption[j] = adjustmentMethod.name();
                            double[][] correctedP = rPostHoc.getpValueCorrections().get(adjustmentMethod);
                            if (StatsConfigConstants.PRETTY_PRINT_METHODS.containsKey(adjustmentMethod)) {
                                subcaption[j] = StatsConfigConstants.PRETTY_PRINT_METHODS.get(adjustmentMethod);
                            }
                            tableData = getPValueStringArray(correctedP, isBaselineEvaluation);
                            header = tableData.getKey();
                            overallValues[j] = tableData.getValue();
                            minAdjustments[j] = getMin(correctedP);
                            maxAdjustments[j] = getMax(correctedP);
                        }

                        caption = String.format("Adjusted p-values from the %s for %s", method, measure);
                        ref = String.format("tbl:%s%sAdjusted", method.replaceAll("\\s", ""),
                                measure.replaceAll("\\s", ""));
                        formatting = null;
                        if (!isBaselineEvaluation) {
                            formatting = String.format("|%s", StringUtils.repeat("l|", nModels));
                        } else {
                            formatting = String.format("|l|l|");
                        }
                        tablePAdjusted = createLatexSubTable(caption, subcaption, ref, header, formatting,
                                overallValues);

                        min = getMin(minAdjustments);
                        max = getMax(maxAdjustments);
                        verbalizedP = verbalizeP(min, false);

                        if ((max > significance_low && min <= significance_low)
                                || (max > significance_medium && min <= significance_medium)
                                || (max > significance_high && min <= significance_high)) {
                            // partly significant to degree as specified by
                            // verbalized p-value
                            report.append(String.format(
                                    "It partly %s differences for adjusted p-values ($\\alpha=%.2f$, Tbl. \\ref{%s}).\n\n ",
                                    verbalizedP.getKey(), verbalizedP.getValue(), ref));
                        } else {
                            report.append(String.format(
                                    "It %s differences for adjusted p-values ($\\alpha=%.2f$, Tbl. \\ref{%s}).\n\n ",
                                    verbalizedP.getKey(), verbalizedP.getValue(), ref));
                        }
                    }

                    report.append(tablePNonAdjusted);
                    if (tablePAdjusted != null) {
                        report.append(tablePAdjusted);
                    }

                }
            } else {
                report.append(String.format("The %s failed.", method));
            }
        }

    }

    //
    // Contingency table and McNemar results if this test was performed
    //
    if (evalResults.getNonParametricTest().equals("McNemar")) {
        String measure = "Contingency Table";
        String testType = "Non-Parametric";
        report.append("\\FloatBarrier\n");
        report.append("\\subsection{Contingency Table}\n");

        String caption = String
                .format("Contingency table with correctly and incorrectly classified folds for %s", measure);
        if (evalResults.getSampleData().getPipelineType() == ReportTypes.MULTIPLE_CV) {
            report.append(String.format(
                    "The contingency table drawn from the %s and the %d models is listed in Tbl. \\ref{%s}. The correctly and incorrectly classified instances per fold were averaged over all repetitions. \n",
                    pipelineDescription, nModels, ref));
            caption = String.format(
                    "Averaged contingency table with correctly and incorrectly classified folds for %s",
                    measure);
        } else {
            report.append(String.format(
                    "The contingency table drawn from the %s and the %d models is listed in Tbl. \\ref{%s}.\n",
                    pipelineDescription, nModels, ref));
        }

        int[][] contingencyMatrix = evalResults.getSampleData().getContingencyMatrix();
        ref = "tbl:ContingencyMatrix";
        values = new String[][] { { "Wrong", "", "" }, { "Correct", "", "" } };
        values[0][1] = String.valueOf(contingencyMatrix[0][0]);
        values[0][2] = String.valueOf(contingencyMatrix[0][1]);
        values[1][1] = String.valueOf(contingencyMatrix[1][0]);
        values[1][2] = String.valueOf(contingencyMatrix[1][1]);

        table = createLatexTable(caption, ref, new String[] { "M0/M1", "Wrong", "Correct" }, "|l|l|l|", values);
        report.append(table);

        // Test results
        report.append(String.format("\\subsubsection{%s Testing}", testType));
        report.append(
                String.format("The system compared the %d models using the \\emph{McNemar test}. ", nModels));
        Pair<String, AbstractTestResult> result = evalResults.getNonParametricTestResults().get(measure);

        // Use pretty-print method descriptor if specified
        String method = result.getKey();
        if (StatsConfigConstants.PRETTY_PRINT_METHODS.containsKey(method)) {
            method = StatsConfigConstants.PRETTY_PRINT_METHODS.get(method);
        }
        methodsSummary.put(testType, method);

        TestResult r = (TestResult) result.getValue();
        if (r != null && !Double.isNaN(r.getpValue())) {
            StringBuilder parameters = new StringBuilder();
            Iterator<String> it = r.getParameter().keySet().iterator();
            while (it.hasNext()) {
                String parameter = it.next();
                double value = r.getParameter().get(parameter);
                parameters.append(String.format("%s=%.3f, ", parameter, value));
            }

            // Verbalize result according to p value
            Pair<String, Double> verbalizedP = verbalizeP(r.getpValue(), false);
            report.append(String.format(
                    "The test %s differences between the performances of the models ($%sp=%.3f, \\alpha=%.2f$).\\\\ \n",
                    verbalizedP.getKey(), parameters.toString(), r.getpValue(), verbalizedP.getValue()));
            // Store result for summary
            if (testSummary.get(testType).containsKey(verbalizedP.getKey())) {
                testSummary.get(testType).get(verbalizedP.getKey()).add(measure);
            } else {
                ArrayList<String> list = new ArrayList<String>();
                list.add(measure);
                testSummary.get(testType).put(verbalizedP.getKey(), list);
            }

        } else {
            report.append("The test failed.\\\\ \n");
        }
    }

    //
    // Summary of results
    //
    report.append("\\FloatBarrier\n");
    report.append("\\section{Summary}\n");
    for (String testType : new String[] { "Parametric", "Non-Parametric" }) {
        String prefix = "";

        if (nModels == 2) {
            report.append(
                    String.format("The system performed %s testing of the %d models using a %s. The test ",
                            testType.toLowerCase(), nModels, methodsSummary.get(testType)));
            prefix = "It";
        } else {
            String postHocTesting = String.format("%sPostHoc", testType);
            report.append(String.format(
                    "The system performed %s testing of the %d models using a %s and a %s post-hoc. The tests ",
                    testType.toLowerCase(), nModels, methodsSummary.get(testType),
                    methodsSummary.get(postHocTesting)));
            prefix = "They";
        }

        // If all tests failed, there're no results to summarize.
        HashMap<String, List<String>> summary = testSummary.get(testType);
        if (summary.keySet().size() == 0) {
            report.append("failed. ");
            continue;
        }

        Iterator<String> it = summary.keySet().iterator();
        boolean usePrefix = false;
        while (it.hasNext()) {
            String pVerbalization = it.next();
            List<String> affectedMeasures = summary.get(pVerbalization);
            if (!usePrefix) {
                report.append(String.format("%s differences in performance for the %s. ", pVerbalization,
                        createEnumeration(affectedMeasures)));
            } else {
                report.append(String.format("%s %s differences in performance for the %s. ", prefix,
                        pVerbalization, createEnumeration(affectedMeasures)));
            }
            usePrefix = true;
        }
        report.append("\\\\ \n\n");

    }

    //
    // Appendix
    //
    // Add all figures
    report.append("\\FloatBarrier\n");
    report.append("\\section{Appendix}\n");
    for (int i = 0; i < figures.size(); i++) {
        ref = figures.get(i)[0];
        String caption = figures.get(i)[1];
        String filename = figures.get(i)[2];
        report.append("\\begin{figure}\n");
        report.append("\\centering\n");
        report.append(String.format("\\includegraphics[width=1\\linewidth]{%s}\n", filename));
        report.append(String.format("\\caption{%s}\n", caption));
        report.append(String.format("\\label{%s}\n", ref));
        report.append("\\end{figure}\n\n");
    }

    // Close document
    report.append("\\end{document}");
    return report.toString();

}

From source file:forge.game.card.Card.java

private static String getTextForKwCantBeBlockedByType(final String keyword) {
    boolean negative = true;
    final List<String> subs = Lists.newArrayList(TextUtil.split(keyword.split(" ", 2)[1], ','));
    final List<List<String>> subsAnd = Lists.newArrayList();
    final List<String> orClauses = new ArrayList<>();
    for (final String expession : subs) {
        final List<String> parts = Lists.newArrayList(expession.split("[.+]"));
        for (int p = 0; p < parts.size(); p++) {
            final String part = parts.get(p);
            if (part.equalsIgnoreCase("creature")) {
                parts.remove(p--);/*from  w  ww.j  a  v a  2s. c om*/
                continue;
            }
            // based on suppossition that each expression has at least 1 predicate except 'creature'
            negative &= part.contains("non") || part.contains("without");
        }
        subsAnd.add(parts);
    }

    final boolean allNegative = negative;
    final String byClause = allNegative ? "except by " : "by ";

    final Function<Pair<Boolean, String>, String> withToString = new Function<Pair<Boolean, String>, String>() {
        @Override
        public String apply(Pair<Boolean, String> inp) {
            boolean useNon = inp.getKey() == allNegative;
            return (useNon ? "*NO* " : "") + inp.getRight();
        }
    };

    for (final List<String> andOperands : subsAnd) {
        final List<Pair<Boolean, String>> prependedAdjectives = Lists.newArrayList();
        final List<Pair<Boolean, String>> postponedAdjectives = Lists.newArrayList();
        String creatures = null;

        for (String part : andOperands) {
            boolean positive = true;
            if (part.startsWith("non")) {
                part = part.substring(3);
                positive = false;
            }
            if (part.startsWith("with")) {
                positive = !part.startsWith("without");
                postponedAdjectives.add(Pair.of(positive, part.substring(positive ? 4 : 7)));
            } else if (part.startsWith("powerLEX")) {// Kraken of the Straits
                postponedAdjectives.add(Pair.of(true, "power less than the number of islands you control"));
            } else if (part.startsWith("power")) {
                int kwLength = 5;
                String opName = Expressions.operatorName(part.substring(kwLength, kwLength + 2));
                String operand = part.substring(kwLength + 2);
                postponedAdjectives.add(Pair.of(true, "power" + opName + operand));
            } else if (CardType.isACreatureType(part)) {
                if (creatures != null && CardType.isACreatureType(creatures)) { // e.g. Kor Castigator
                    creatures = StringUtils.capitalize(Lang.getPlural(part)) + creatures;
                } else {
                    creatures = StringUtils.capitalize(Lang.getPlural(part))
                            + (creatures == null ? "" : " or " + creatures);
                }
            } else {
                prependedAdjectives.add(Pair.of(positive, part.toLowerCase()));
            }
        }

        StringBuilder sbShort = new StringBuilder();
        if (allNegative) {
            boolean isFirst = true;
            for (Pair<Boolean, String> pre : prependedAdjectives) {
                if (isFirst)
                    isFirst = false;
                else
                    sbShort.append(" and/or ");

                boolean useNon = pre.getKey() == allNegative;
                if (useNon)
                    sbShort.append("non-");
                sbShort.append(pre.getValue()).append(" ").append(creatures == null ? "creatures" : creatures);
            }
            if (prependedAdjectives.isEmpty())
                sbShort.append(creatures == null ? "creatures" : creatures);

            if (!postponedAdjectives.isEmpty()) {
                if (!prependedAdjectives.isEmpty()) {
                    sbShort.append(" and/or creatures");
                }

                sbShort.append(" with ");
                sbShort.append(
                        Lang.joinHomogenous(postponedAdjectives, withToString, allNegative ? "or" : "and"));
            }

        } else {
            for (Pair<Boolean, String> pre : prependedAdjectives) {
                boolean useNon = pre.getKey() == allNegative;
                if (useNon)
                    sbShort.append("non-");
                sbShort.append(pre.getValue()).append(" ");
            }
            sbShort.append(creatures == null ? "creatures" : creatures);

            if (!postponedAdjectives.isEmpty()) {
                sbShort.append(" with ");
                sbShort.append(
                        Lang.joinHomogenous(postponedAdjectives, withToString, allNegative ? "or" : "and"));
            }

        }
        orClauses.add(sbShort.toString());
    }
    return byClause + StringUtils.join(orClauses, " or ") + ".";
}

From source file:no.kantega.publishing.admin.topicmaps.action.EditTopicAction.java

private void addValidationError(HttpServletRequest request, String errorCode,
        Pair<String, String>... parameters) {
    ValidationErrors errors;/*from   ww  w .  j  a v a  2  s. c om*/
    if (request.getAttribute("errors") != null) {
        errors = (ValidationErrors) request.getAttribute("errors");
    } else {
        errors = new ValidationErrors();
        request.setAttribute("errors", errors);
    }
    Map<String, Object> params = new HashMap<>();
    if (parameters != null) {
        for (Pair<String, String> p : parameters) {
            params.put(p.getKey(), p.getValue());
        }
    }
    errors.add(new ValidationError(null, errorCode, params));
}