Example usage for java.lang Float POSITIVE_INFINITY

List of usage examples for java.lang Float POSITIVE_INFINITY

Introduction

In this page you can find the example usage for java.lang Float POSITIVE_INFINITY.

Prototype

float POSITIVE_INFINITY

To view the source code for java.lang Float POSITIVE_INFINITY.

Click Source Link

Document

A constant holding the positive infinity of type float .

Usage

From source file:org.apache.axis2.databinding.utils.ConverterUtil.java

public static float convertToFloat(String s) {
    if ((s == null) || s.equals("")) {
        return Float.NaN;
    }/*w w  w. j a  va  2  s  . com*/
    if (s.startsWith("+")) {
        s = s.substring(1);
    }
    if (POSITIVE_INFINITY.equals(s)) {
        return Float.POSITIVE_INFINITY;
    } else if (NEGATIVE_INFINITY.equals(s)) {
        return Float.NEGATIVE_INFINITY;
    }
    return Float.parseFloat(s);
}

From source file:com.ml.hadoop.nlp.SparseVectorsFromSequenceFiles.java

@Override
public int run(String[] args) throws Exception {
    DefaultOptionBuilder obuilder = new DefaultOptionBuilder();
    ArgumentBuilder abuilder = new ArgumentBuilder();
    GroupBuilder gbuilder = new GroupBuilder();

    Option inputDirOpt = DefaultOptionCreator.inputOption().create();

    Option outputDirOpt = DefaultOptionCreator.outputOption().create();

    Option minSupportOpt = obuilder.withLongName("minSupport")
            .withArgument(abuilder.withName("minSupport").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) Minimum Support. Default Value: 2").withShortName("s").create();

    Option analyzerNameOpt = obuilder.withLongName("analyzerName")
            .withArgument(abuilder.withName("analyzerName").withMinimum(1).withMaximum(1).create())
            .withDescription("The class name of the analyzer").withShortName("a").create();

    Option dictionaryPathOpt = obuilder.withLongName("dictionaryPath")
            .withArgument(abuilder.withName("dictionaryPath").withMinimum(1).withMaximum(1).create())
            .withDescription("Dictionary path for update TFIDF").withShortName("dp").create();

    Option docFrequencyPathOpt = obuilder.withLongName("docFrequencyPath")
            .withArgument(abuilder.withName("docFrequencyPath").withMinimum(1).withMaximum(1).create())
            .withDescription("Doc frequency path for update TFIDF").withShortName("dfp").create();

    Option tfVectorsPathOpt = obuilder.withLongName("tfVectorsPath")
            .withArgument(abuilder.withName("tfVectorsPath").withMinimum(1).withMaximum(1).create())
            .withDescription("TF Vectors path").withShortName("tfvp").create();

    Option chunkSizeOpt = obuilder.withLongName("chunkSize")
            .withArgument(abuilder.withName("chunkSize").withMinimum(1).withMaximum(1).create())
            .withDescription("The chunkSize in MegaBytes. 100-10000 MB").withShortName("chunk").create();

    Option weightOpt = obuilder.withLongName("weight").withRequired(false)
            .withArgument(abuilder.withName("weight").withMinimum(1).withMaximum(1).create())
            .withDescription("The kind of weight to use. Currently TF , TFIDF or TFIDF_UPDATE")
            .withShortName("wt").create();

    Option minDFOpt = obuilder.withLongName("minDF").withRequired(false)
            .withArgument(abuilder.withName("minDF").withMinimum(1).withMaximum(1).create())
            .withDescription("The minimum document frequency.  Default is 1").withShortName("md").create();

    Option maxDFPercentOpt = obuilder.withLongName("maxDFPercent").withRequired(false)
            .withArgument(abuilder.withName("maxDFPercent").withMinimum(1).withMaximum(1).create())
            .withDescription(//from  w  w  w . j a  v a2  s . c o  m
                    "The max percentage of docs for the DF.  Can be used to remove really high frequency terms."
                            + " Expressed as an integer between 0 and 100. Default is 99.  If maxDFSigma is also set, "
                            + "it will override this value.")
            .withShortName("x").create();

    Option maxDFSigmaOpt = obuilder.withLongName("maxDFSigma").withRequired(false)
            .withArgument(abuilder.withName("maxDFSigma").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "What portion of the tf (tf-idf) vectors to be used, expressed in times the standard deviation (sigma) "
                            + "of the document frequencies of these vectors. Can be used to remove really high frequency terms."
                            + " Expressed as a double value. Good value to be specified is 3.0. In case the value is less "
                            + "than 0 no vectors will be filtered out. Default is -1.0.  Overrides maxDFPercent")
            .withShortName("xs").create();

    Option minLLROpt = obuilder.withLongName("minLLR").withRequired(false)
            .withArgument(abuilder.withName("minLLR").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional)The minimum Log Likelihood Ratio(Float)  Default is "
                    + LLRReducer.DEFAULT_MIN_LLR)
            .withShortName("ml").create();

    Option numReduceTasksOpt = obuilder.withLongName("numReducers")
            .withArgument(abuilder.withName("numReducers").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) Number of reduce tasks. Default Value: 1").withShortName("nr")
            .create();

    Option powerOpt = obuilder.withLongName("norm").withRequired(false)
            .withArgument(abuilder.withName("norm").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "The norm to use, expressed as either a float or \"INF\" if you want to use the Infinite norm.  "
                            + "Must be greater or equal to 0.  The default is not to normalize")
            .withShortName("n").create();

    Option logNormalizeOpt = obuilder.withLongName("logNormalize").withRequired(false)
            .withDescription("(Optional) Whether output vectors should be logNormalize. If set true else false")
            .withShortName("lnorm").create();

    Option maxNGramSizeOpt = obuilder.withLongName("maxNGramSize").withRequired(false)
            .withArgument(abuilder.withName("ngramSize").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) The maximum size of ngrams to create"
                    + " (2 = bigrams, 3 = trigrams, etc) Default Value:1")
            .withShortName("ng").create();

    Option sequentialAccessVectorOpt = obuilder.withLongName("sequentialAccessVector").withRequired(false)
            .withDescription(
                    "(Optional) Whether output vectors should be SequentialAccessVectors. If set true else false")
            .withShortName("seq").create();

    Option namedVectorOpt = obuilder.withLongName("namedVector").withRequired(false)
            .withDescription("(Optional) Whether output vectors should be NamedVectors. If set true else false")
            .withShortName("nv").create();

    Option overwriteOutput = obuilder.withLongName("overwrite").withRequired(false)
            .withDescription("If set, overwrite the output directory").withShortName("ow").create();
    Option helpOpt = obuilder.withLongName("help").withDescription("Print out help").withShortName("h")
            .create();

    Group group = gbuilder.withName("Options").withOption(minSupportOpt).withOption(analyzerNameOpt)
            .withOption(dictionaryPathOpt).withOption(docFrequencyPathOpt).withOption(tfVectorsPathOpt)
            .withOption(chunkSizeOpt).withOption(outputDirOpt).withOption(inputDirOpt).withOption(minDFOpt)
            .withOption(maxDFSigmaOpt).withOption(maxDFPercentOpt).withOption(weightOpt).withOption(powerOpt)
            .withOption(minLLROpt).withOption(numReduceTasksOpt).withOption(maxNGramSizeOpt)
            .withOption(overwriteOutput).withOption(helpOpt).withOption(sequentialAccessVectorOpt)
            .withOption(namedVectorOpt).withOption(logNormalizeOpt).create();
    try {
        Parser parser = new Parser();
        parser.setGroup(group);
        parser.setHelpOption(helpOpt);
        CommandLine cmdLine = parser.parse(args);

        if (cmdLine.hasOption(helpOpt)) {
            CommandLineUtil.printHelp(group);
            return -1;
        }

        Path inputDir = new Path((String) cmdLine.getValue(inputDirOpt));
        Path outputDir = new Path((String) cmdLine.getValue(outputDirOpt));

        int chunkSize = 100;
        if (cmdLine.hasOption(chunkSizeOpt)) {
            chunkSize = Integer.parseInt((String) cmdLine.getValue(chunkSizeOpt));
        }
        int minSupport = 2;
        if (cmdLine.hasOption(minSupportOpt)) {
            String minSupportString = (String) cmdLine.getValue(minSupportOpt);
            minSupport = Integer.parseInt(minSupportString);
        }

        int maxNGramSize = 1;

        if (cmdLine.hasOption(maxNGramSizeOpt)) {
            try {
                maxNGramSize = Integer.parseInt(cmdLine.getValue(maxNGramSizeOpt).toString());
            } catch (NumberFormatException ex) {
                log.warn("Could not parse ngram size option");
            }
        }
        log.info("Maximum n-gram size is: {}", maxNGramSize);

        if (cmdLine.hasOption(overwriteOutput)) {
            HadoopUtil.delete(getConf(), outputDir);
        }

        float minLLRValue = LLRReducer.DEFAULT_MIN_LLR;
        if (cmdLine.hasOption(minLLROpt)) {
            minLLRValue = Float.parseFloat(cmdLine.getValue(minLLROpt).toString());
        }
        log.info("Minimum LLR value: {}", minLLRValue);

        int reduceTasks = 1;
        if (cmdLine.hasOption(numReduceTasksOpt)) {
            reduceTasks = Integer.parseInt(cmdLine.getValue(numReduceTasksOpt).toString());
        }
        log.info("Changed... Number of reduce tasks: {}", reduceTasks);

        Class<? extends Analyzer> analyzerClass = StandardAnalyzer.class;
        if (cmdLine.hasOption(analyzerNameOpt)) {
            String className = cmdLine.getValue(analyzerNameOpt).toString();
            analyzerClass = Class.forName(className).asSubclass(Analyzer.class);
            // try instantiating it, b/c there isn't any point in setting it if
            // you can't instantiate it
            AnalyzerUtils.createAnalyzer(analyzerClass);
        }

        //default process tfidf:1, tf:2, update tfidf:3
        int processIdf;

        if (cmdLine.hasOption(weightOpt)) {
            String wString = cmdLine.getValue(weightOpt).toString();
            if ("tf".equalsIgnoreCase(wString)) {
                processIdf = 2;
            } else if ("tfidf".equalsIgnoreCase(wString)) {
                processIdf = 1;
            } else if ("tfidf_update".equalsIgnoreCase(wString)) {
                processIdf = 3;
            } else {
                throw new OptionException(weightOpt);
            }
        } else {
            processIdf = 1;
        }

        int minDf = 1;
        if (cmdLine.hasOption(minDFOpt)) {
            minDf = Integer.parseInt(cmdLine.getValue(minDFOpt).toString());
        }
        int maxDFPercent = 99;
        if (cmdLine.hasOption(maxDFPercentOpt)) {
            maxDFPercent = Integer.parseInt(cmdLine.getValue(maxDFPercentOpt).toString());
        }
        double maxDFSigma = -1.0;
        if (cmdLine.hasOption(maxDFSigmaOpt)) {
            maxDFSigma = Double.parseDouble(cmdLine.getValue(maxDFSigmaOpt).toString());
        }

        float norm = PartialVectorMerger.NO_NORMALIZING;
        if (cmdLine.hasOption(powerOpt)) {
            String power = cmdLine.getValue(powerOpt).toString();
            if ("INF".equals(power)) {
                norm = Float.POSITIVE_INFINITY;
            } else {
                norm = Float.parseFloat(power);
            }
        }

        boolean logNormalize = false;
        if (cmdLine.hasOption(logNormalizeOpt)) {
            logNormalize = true;
        }
        log.info("Tokenizing documents in {}", inputDir);
        Configuration conf = getConf();
        Path tokenizedPath = new Path(outputDir, DocumentProcessor.TOKENIZED_DOCUMENT_OUTPUT_FOLDER);
        DocumentProcessor.tokenizeDocuments(inputDir, analyzerClass, tokenizedPath, conf);

        boolean sequentialAccessOutput = false;
        if (cmdLine.hasOption(sequentialAccessVectorOpt)) {
            sequentialAccessOutput = true;
        }

        boolean namedVectors = false;
        if (cmdLine.hasOption(namedVectorOpt)) {
            namedVectors = true;
        }
        boolean shouldPrune = maxDFSigma >= 0.0 || maxDFPercent > 0.00;
        String tfDirName = shouldPrune ? DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER + "-toprune"
                : DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER;
        log.info("Creating Term Frequency Vectors, prune {}", shouldPrune);

        String dictionaryPath = null;
        if (cmdLine.hasOption(dictionaryPathOpt)) {
            dictionaryPath = (String) cmdLine.getValue(dictionaryPathOpt);
            log.info("begin dic path {}", dictionaryPath);
        }

        if (processIdf == 1) {
            DictionaryVectorizer.createTermFrequencyVectors(tokenizedPath, outputDir, tfDirName, conf,
                    minSupport, maxNGramSize, minLLRValue, -1.0f, false, reduceTasks, chunkSize,
                    sequentialAccessOutput, namedVectors);
        } else if (processIdf == 3) {
            log.info("begin update term----------------");
            DictionaryVectorizer.createUpdateTermFrequencyVectors(tokenizedPath, outputDir, tfDirName, conf,
                    maxNGramSize, dictionaryPath, norm, logNormalize, reduceTasks, sequentialAccessOutput,
                    namedVectors);
        } else {
            DictionaryVectorizer.createTermFrequencyVectors(tokenizedPath, outputDir, tfDirName, conf,
                    minSupport, maxNGramSize, minLLRValue, norm, logNormalize, reduceTasks, chunkSize,
                    sequentialAccessOutput, namedVectors);
        }

        String docFrequencyPaths = null;
        if (cmdLine.hasOption(dictionaryPathOpt)) {
            docFrequencyPaths = (String) cmdLine.getValue(docFrequencyPathOpt);
            log.info("doc frequency path {}", docFrequencyPaths);
        }
        String tfVectorsPaths = null;
        if (cmdLine.hasOption(tfVectorsPathOpt)) {
            tfVectorsPaths = (String) cmdLine.getValue(tfVectorsPathOpt);
            log.info("tf vectors path {}", tfVectorsPaths);
        }

        Pair<Long[], List<Path>> docFrequenciesFeatures = null;
        // Should document frequency features be processed
        if (processIdf == 1) {
            log.info("Calculating IDF");
            docFrequenciesFeatures = TFIDFConverter.calculateDF(new Path(outputDir, tfDirName), outputDir, conf,
                    chunkSize);
            log.info("...docFrequencyPathBase {}, docFrequencyFile {}", docFrequenciesFeatures.getFirst()[0],
                    docFrequenciesFeatures.getFirst()[1]);
        } else if (processIdf == 3) {
            // load docFrequency path
            List<Path> docFrequencyChunks = Lists.newArrayList();
            String[] paths = docFrequencyPaths.split(",");

            long featureCount = 0;
            for (String path : paths) {
                int splitPos = path.lastIndexOf("/");
                String docFrequencyPathBase = path.substring(0, splitPos);
                String docFrequencyFile = path.substring(splitPos + 1, path.length());
                log.info("docFrequencyPathBase {}, docFrequencyFile {}", docFrequencyPathBase,
                        docFrequencyFile);
                Path docFrequencyPath = new Path(docFrequencyPathBase, docFrequencyFile);
                docFrequencyChunks.add(docFrequencyPath);

                /*for (Pair<IntWritable, LongWritable> record
                         : new SequenceFileIterable<IntWritable, LongWritable>(docFrequencyPath, true, conf)) {
                     featureCount = Math.max(record.getFirst().get(), featureCount);
                 }*/
            }
            featureCount = 107623;
            featureCount++;

            long vectorCount = Long.MAX_VALUE;
            /*Path tfDirPath = new Path(tfVectorsPaths + "/part-r-00000");
            int i = 0;
            for (Pair<Text, VectorWritable> record
                     : new SequenceFileIterable<Text, VectorWritable>(tfDirPath, true, conf)) {
               i++;
             }
            if (i > 0) {
               vectorCount = i;
            }*/
            vectorCount = 80000;
            //read docFrequencyFile to get featureCount and vectorCount
            Long[] counts = { featureCount, vectorCount };
            log.info("featureCount {}, vectorCount------------------ {}", featureCount, vectorCount);
            docFrequenciesFeatures = new Pair<Long[], List<Path>>(counts, docFrequencyChunks);
        }

        long maxDF = maxDFPercent; //if we are pruning by std dev, then this will get changed
        if (shouldPrune) {
            long vectorCount = docFrequenciesFeatures.getFirst()[1];
            if (maxDFSigma >= 0.0) {
                Path dfDir = new Path(outputDir, TFIDFConverter.WORDCOUNT_OUTPUT_FOLDER);
                Path stdCalcDir = new Path(outputDir, HighDFWordsPruner.STD_CALC_DIR);

                // Calculate the standard deviation
                double stdDev = BasicStats.stdDevForGivenMean(dfDir, stdCalcDir, 0.0, conf);
                maxDF = (int) (100.0 * maxDFSigma * stdDev / vectorCount);
            }

            long maxDFThreshold = (long) (vectorCount * (maxDF / 100.0f));

            // Prune the term frequency vectors
            Path tfDir = new Path(outputDir, tfDirName);
            Path prunedTFDir = new Path(outputDir, DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER);
            Path prunedPartialTFDir = new Path(outputDir,
                    DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER + "-partial");
            log.info("Pruning");
            if (processIdf == 1 || processIdf == 3) {
                HighDFWordsPruner.pruneVectors(tfDir, prunedTFDir, prunedPartialTFDir, maxDFThreshold, minDf,
                        conf, docFrequenciesFeatures, -1.0f, false, reduceTasks);
            } else {
                HighDFWordsPruner.pruneVectors(tfDir, prunedTFDir, prunedPartialTFDir, maxDFThreshold, minDf,
                        conf, docFrequenciesFeatures, norm, logNormalize, reduceTasks);
            }
            HadoopUtil.delete(new Configuration(conf), tfDir);
        }
        if (processIdf == 1 || processIdf == 3) {
            TFIDFConverter.processTfIdf(new Path(outputDir, DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER),
                    outputDir, conf, docFrequenciesFeatures, minDf, maxDF, norm, logNormalize,
                    sequentialAccessOutput, namedVectors, reduceTasks);
        }
    } catch (OptionException e) {
        log.error("Exception", e);
        CommandLineUtil.printHelp(group);
    }
    return 0;
}

From source file:edu.umich.flowfence.service.SandboxManager.java

public synchronized Sandbox tryGetSandboxForCall(CallRecord record) {
    Sandbox cheapestSandbox = null;//from w w w  .  ja  v  a2s  . co  m

    if (localLOGD) {
        Log.d(TAG, "getSandboxForCall " + record);
        dumpSandboxes();
    }
    int numHandles = record.getPredecessors().size();
    int numUnmarshalledHandles = 0;
    for (Handle h : record.getPredecessors()) {
        if (!h.isMarshalled()) {
            numUnmarshalledHandles++;
        }
    }

    // Start out with the hot spare.
    float cheapestCost = !mHotSpares.isEmpty() ? COST_LOAD_CODE + COST_RESOLVE_QM + 0.01f
            + COST_MARSHAL_OUT * numUnmarshalledHandles + COST_MARSHAL_IN * numHandles
            : Float.POSITIVE_INFINITY;
    if (localLOGD) {
        Log.d(TAG, String.format("%d handles, %d unmarshalled", numHandles, numUnmarshalledHandles));
        Log.d(TAG, String.format("Hot spare cost: %g", cheapestCost));
    }

    // Can we get away with an idle sandbox?
    TaintSet inboundTaint = record.getInboundTaints();
    if (localLOGD) {
        Log.d(TAG, String.format("Inbound taint: %s", inboundTaint));
    }
    final String packageName = record.getQM().getDescriptor().definingClass.getPackageName();
    for (Sandbox sb : mIdleSandboxes.keySet()) {
        String assignedPackage = sb.getAssignedPackage();
        if (assignedPackage != null && !assignedPackage.equals(packageName)) {
            continue;
        }

        TaintSet sandboxTaint = sb.getTaints();
        boolean mustMarshalOut = false;
        if (!sandboxTaint.isSubsetOf(inboundTaint)) {
            // SB more tainted than inbound; skip this SB
            continue;
        } else if (!inboundTaint.isSubsetOf(sandboxTaint)) {
            // Inbound more tainted than SB; must marshal things out of SB
            mustMarshalOut = true;
        }

        float cost = 0.0f;
        if (!sb.hasLoadedPackage(packageName)) {
            // Need to load this package into this sandbox.
            cost += COST_LOAD_CODE;
        }

        if (!record.getQM().isResolvedIn(sb)) {
            // Need to resolve the QM in this sandbox.
            cost += COST_RESOLVE_QM;
        }

        if (mustMarshalOut) {
            cost += COST_MARSHAL_OUT * sb.countUnmarshalledObjects();
        }

        for (Handle h : record.getPredecessors()) {
            if (!h.isLiveIn(sb)) {
                cost += COST_MARSHAL_IN;
                if (!h.isMarshalled()) {
                    cost += COST_MARSHAL_OUT;
                }
            } else if (h.getRefCount() == 1) {
                // We won't need to marshal this one out after all.
                cost -= COST_MARSHAL_OUT;
            }
        }

        if (localLOGD) {
            Log.d(TAG, String.format("%s %s: cost %g", sb, sandboxTaint, cost));
        }

        if (cost < cheapestCost) {
            cheapestCost = cost;
            cheapestSandbox = sb;
        }
    }

    if (localLOGD) {
        Log.d(TAG, String.format("Final choice: %s, cost %g",
                (cheapestSandbox == null) ? "hot spare" : cheapestSandbox, cheapestCost));
    }

    return (cheapestSandbox != null) ? cheapestSandbox : tryGetHotSpare();
}

From source file:com.digitalpebble.behemoth.mahout.SparseVectorsFromBehemoth.java

public int run(String[] args) throws Exception {
    DefaultOptionBuilder obuilder = new DefaultOptionBuilder();
    ArgumentBuilder abuilder = new ArgumentBuilder();
    GroupBuilder gbuilder = new GroupBuilder();

    Option inputDirOpt = DefaultOptionCreator.inputOption().create();

    Option outputDirOpt = DefaultOptionCreator.outputOption().create();

    Option minSupportOpt = obuilder.withLongName("minSupport")
            .withArgument(abuilder.withName("minSupport").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) Minimum Support. Default Value: 2").withShortName("s").create();

    Option typeNameOpt = obuilder.withLongName("typeToken").withRequired(false)
            .withArgument(abuilder.withName("typeToken").withMinimum(1).withMaximum(1).create())
            .withDescription("The annotation type for Tokens").withShortName("t").create();

    Option featureNameOpt = obuilder.withLongName("featureName").withRequired(false)
            .withArgument(abuilder.withName("featureName").withMinimum(1).withMaximum(1).create())
            .withDescription(//  www  . java 2 s. c o m
                    "The name of the feature containing the token values, uses the text if unspecified")
            .withShortName("f").create();

    Option analyzerNameOpt = obuilder.withLongName("analyzerName")
            .withArgument(abuilder.withName("analyzerName").withMinimum(1).withMaximum(1).create())
            .withDescription("The class name of the analyzer").withShortName("a").create();

    Option chunkSizeOpt = obuilder.withLongName("chunkSize")
            .withArgument(abuilder.withName("chunkSize").withMinimum(1).withMaximum(1).create())
            .withDescription("The chunkSize in MegaBytes. 100-10000 MB").withShortName("chunk").create();

    Option weightOpt = obuilder.withLongName("weight").withRequired(false)
            .withArgument(abuilder.withName("weight").withMinimum(1).withMaximum(1).create())
            .withDescription("The kind of weight to use. Currently TF or TFIDF").withShortName("wt").create();

    Option minDFOpt = obuilder.withLongName("minDF").withRequired(false)
            .withArgument(abuilder.withName("minDF").withMinimum(1).withMaximum(1).create())
            .withDescription("The minimum document frequency.  Default is 1").withShortName("md").create();

    Option maxDFPercentOpt = obuilder.withLongName("maxDFPercent").withRequired(false)
            .withArgument(abuilder.withName("maxDFPercent").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "The max percentage of docs for the DF.  Can be used to remove really high frequency terms."
                            + " Expressed as an integer between 0 and 100. Default is 99.  If maxDFSigma is also set, it will override this value.")
            .withShortName("x").create();

    Option maxDFSigmaOpt = obuilder.withLongName("maxDFSigma").withRequired(false)
            .withArgument(abuilder.withName("maxDFSigma").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "What portion of the tf (tf-idf) vectors to be used, expressed in times the standard deviation (sigma) of the document frequencies of these vectors."
                            + "  Can be used to remove really high frequency terms."
                            + " Expressed as a double value. Good value to be specified is 3.0. In case the value is less then 0 no vectors "
                            + "will be filtered out. Default is -1.0.  Overrides maxDFPercent")
            .withShortName("xs").create();

    Option minLLROpt = obuilder.withLongName("minLLR").withRequired(false)
            .withArgument(abuilder.withName("minLLR").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional)The minimum Log Likelihood Ratio(Float)  Default is "
                    + LLRReducer.DEFAULT_MIN_LLR)
            .withShortName("ml").create();

    Option numReduceTasksOpt = obuilder.withLongName("numReducers")
            .withArgument(abuilder.withName("numReducers").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) Number of reduce tasks. Default Value: 1").withShortName("nr")
            .create();

    Option powerOpt = obuilder.withLongName("norm").withRequired(false)
            .withArgument(abuilder.withName("norm").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "The norm to use, expressed as either a float or \"INF\" if you want to use the Infinite norm.  "
                            + "Must be greater or equal to 0.  The default is not to normalize")
            .withShortName("n").create();

    Option logNormalizeOpt = obuilder.withLongName("logNormalize").withRequired(false)
            .withDescription("(Optional) Whether output vectors should be logNormalize. If set true else false")
            .withShortName("lnorm").create();

    Option maxNGramSizeOpt = obuilder.withLongName("maxNGramSize").withRequired(false)
            .withArgument(abuilder.withName("ngramSize").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) The maximum size of ngrams to create"
                    + " (2 = bigrams, 3 = trigrams, etc) Default Value:1")
            .withShortName("ng").create();

    Option sequentialAccessVectorOpt = obuilder.withLongName("sequentialAccessVector").withRequired(false)
            .withDescription(
                    "(Optional) Whether output vectors should be SequentialAccessVectors. If set true else false")
            .withShortName("seq").create();

    Option namedVectorOpt = obuilder.withLongName("namedVector").withRequired(false)
            .withDescription("(Optional) Whether output vectors should be NamedVectors. If set true else false")
            .withShortName("nv").create();

    Option overwriteOutput = obuilder.withLongName("overwrite").withRequired(false)
            .withDescription("If set, overwrite the output directory").withShortName("ow").create();

    Option labelMDOpt = obuilder.withLongName("labelMDKey").withRequired(false)
            .withArgument(abuilder.withName("label_md_key").create())
            .withDescription("Document metadata holding the label").withShortName("label").create();

    Option helpOpt = obuilder.withLongName("help").withDescription("Print out help").withShortName("h")
            .create();

    Group group = gbuilder.withName("Options").withOption(minSupportOpt).withOption(typeNameOpt)
            .withOption(featureNameOpt).withOption(analyzerNameOpt).withOption(chunkSizeOpt)
            .withOption(outputDirOpt).withOption(inputDirOpt).withOption(minDFOpt).withOption(maxDFSigmaOpt)
            .withOption(maxDFPercentOpt).withOption(weightOpt).withOption(powerOpt).withOption(minLLROpt)
            .withOption(numReduceTasksOpt).withOption(maxNGramSizeOpt).withOption(overwriteOutput)
            .withOption(helpOpt).withOption(sequentialAccessVectorOpt).withOption(namedVectorOpt)
            .withOption(logNormalizeOpt).withOption(labelMDOpt).create();
    CommandLine cmdLine = null;
    try {
        Parser parser = new Parser();
        parser.setGroup(group);
        parser.setHelpOption(helpOpt);
        cmdLine = parser.parse(args);

        if (cmdLine.hasOption(helpOpt)) {
            CommandLineUtil.printHelp(group);
            return -1;
        }

        if (!cmdLine.hasOption(inputDirOpt)) {
            CommandLineUtil.printHelp(group);
            return -1;
        }

        if (!cmdLine.hasOption(outputDirOpt)) {
            CommandLineUtil.printHelp(group);
            return -1;
        }

    } catch (OptionException e) {
        log.error("Exception", e);
        CommandLineUtil.printHelp(group);
        return -1;
    }

    Path inputDir = new Path((String) cmdLine.getValue(inputDirOpt));
    Path outputDir = new Path((String) cmdLine.getValue(outputDirOpt));

    int chunkSize = 100;
    if (cmdLine.hasOption(chunkSizeOpt)) {
        chunkSize = Integer.parseInt((String) cmdLine.getValue(chunkSizeOpt));
    }
    int minSupport = 2;
    if (cmdLine.hasOption(minSupportOpt)) {
        String minSupportString = (String) cmdLine.getValue(minSupportOpt);
        minSupport = Integer.parseInt(minSupportString);
    }

    int maxNGramSize = 1;

    if (cmdLine.hasOption(maxNGramSizeOpt)) {
        try {
            maxNGramSize = Integer.parseInt(cmdLine.getValue(maxNGramSizeOpt).toString());
        } catch (NumberFormatException ex) {
            log.warn("Could not parse ngram size option");
        }
    }
    log.info("Maximum n-gram size is: {}", maxNGramSize);

    if (cmdLine.hasOption(overwriteOutput)) {
        HadoopUtil.delete(getConf(), outputDir);
    }

    float minLLRValue = LLRReducer.DEFAULT_MIN_LLR;
    if (cmdLine.hasOption(minLLROpt)) {
        minLLRValue = Float.parseFloat(cmdLine.getValue(minLLROpt).toString());
    }
    log.info("Minimum LLR value: {}", minLLRValue);

    int reduceTasks = 1;
    if (cmdLine.hasOption(numReduceTasksOpt)) {
        reduceTasks = Integer.parseInt(cmdLine.getValue(numReduceTasksOpt).toString());
    }
    log.info("Number of reduce tasks: {}", reduceTasks);

    Class<? extends Analyzer> analyzerClass = DefaultAnalyzer.class;
    if (cmdLine.hasOption(analyzerNameOpt)) {
        String className = cmdLine.getValue(analyzerNameOpt).toString();
        analyzerClass = Class.forName(className).asSubclass(Analyzer.class);
        // try instantiating it, b/c there isn't any point in setting it
        // if
        // you can't instantiate it
        ClassUtils.instantiateAs(analyzerClass, Analyzer.class);
    }

    String type = null;
    String featureName = "";
    if (cmdLine.hasOption(typeNameOpt)) {
        type = cmdLine.getValue(typeNameOpt).toString();
        Object tempFN = cmdLine.getValue(featureNameOpt);
        if (tempFN != null) {
            featureName = tempFN.toString();
            log.info("Getting tokens from " + type + "." + featureName.toString());
        } else
            log.info("Getting tokens from " + type);
    }

    boolean processIdf;

    if (cmdLine.hasOption(weightOpt)) {
        String wString = cmdLine.getValue(weightOpt).toString();
        if ("tf".equalsIgnoreCase(wString)) {
            processIdf = false;
        } else if ("tfidf".equalsIgnoreCase(wString)) {
            processIdf = true;
        } else {
            throw new OptionException(weightOpt);
        }
    } else {
        processIdf = true;
    }

    int minDf = 1;
    if (cmdLine.hasOption(minDFOpt)) {
        minDf = Integer.parseInt(cmdLine.getValue(minDFOpt).toString());
    }
    int maxDFPercent = 99;
    if (cmdLine.hasOption(maxDFPercentOpt)) {
        maxDFPercent = Integer.parseInt(cmdLine.getValue(maxDFPercentOpt).toString());
    }
    double maxDFSigma = -1.0;
    if (cmdLine.hasOption(maxDFSigmaOpt)) {
        maxDFSigma = Double.parseDouble(cmdLine.getValue(maxDFSigmaOpt).toString());
    }

    float norm = PartialVectorMerger.NO_NORMALIZING;
    if (cmdLine.hasOption(powerOpt)) {
        String power = cmdLine.getValue(powerOpt).toString();
        if ("INF".equals(power)) {
            norm = Float.POSITIVE_INFINITY;
        } else {
            norm = Float.parseFloat(power);
        }
    }

    boolean logNormalize = false;
    if (cmdLine.hasOption(logNormalizeOpt)) {
        logNormalize = true;
    }

    String labelMDKey = null;
    if (cmdLine.hasOption(labelMDOpt)) {
        labelMDKey = cmdLine.getValue(labelMDOpt).toString();
    }

    Configuration conf = getConf();
    Path tokenizedPath = new Path(outputDir, DocumentProcessor.TOKENIZED_DOCUMENT_OUTPUT_FOLDER);

    // no annotation type degfin
    if (type != null) {
        BehemothDocumentProcessor.tokenizeDocuments(inputDir, type, featureName, tokenizedPath);
    }
    // no annotation type defined : rely on Lucene's analysers
    else {
        BehemothDocumentProcessor.tokenizeDocuments(inputDir, analyzerClass, tokenizedPath, conf);
    }
    boolean sequentialAccessOutput = false;
    if (cmdLine.hasOption(sequentialAccessVectorOpt)) {
        sequentialAccessOutput = true;
    }

    boolean namedVectors = false;
    if (cmdLine.hasOption(namedVectorOpt)) {
        namedVectors = true;
    }
    boolean shouldPrune = maxDFSigma >= 0.0;
    String tfDirName = shouldPrune ? DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER + "-toprune"
            : DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER;

    try {
        if (!processIdf) {
            DictionaryVectorizer.createTermFrequencyVectors(tokenizedPath, outputDir, tfDirName, conf,
                    minSupport, maxNGramSize, minLLRValue, norm, logNormalize, reduceTasks, chunkSize,
                    sequentialAccessOutput, namedVectors);
        } else {
            DictionaryVectorizer.createTermFrequencyVectors(tokenizedPath, outputDir, tfDirName, conf,
                    minSupport, maxNGramSize, minLLRValue, -1.0f, false, reduceTasks, chunkSize,
                    sequentialAccessOutput, namedVectors);
        }
        Pair<Long[], List<Path>> docFrequenciesFeatures = null;
        // Should document frequency features be processed
        if (shouldPrune || processIdf) {
            docFrequenciesFeatures = TFIDFConverter.calculateDF(new Path(outputDir, tfDirName), outputDir, conf,
                    chunkSize);
        }

        long maxDF = maxDFPercent; // if we are pruning by std dev, then
                                   // this will get changed
        if (shouldPrune) {
            Path dfDir = new Path(outputDir, TFIDFConverter.WORDCOUNT_OUTPUT_FOLDER);
            Path stdCalcDir = new Path(outputDir, HighDFWordsPruner.STD_CALC_DIR);

            // Calculate the standard deviation
            double stdDev = BasicStats.stdDevForGivenMean(dfDir, stdCalcDir, 0.0, conf);
            long vectorCount = docFrequenciesFeatures.getFirst()[1];
            maxDF = (int) (100.0 * maxDFSigma * stdDev / vectorCount);

            // Prune the term frequency vectors
            Path tfDir = new Path(outputDir, tfDirName);
            Path prunedTFDir = new Path(outputDir, DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER);
            Path prunedPartialTFDir = new Path(outputDir,
                    DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER + "-partial");
            if (processIdf) {
                HighDFWordsPruner.pruneVectors(tfDir, prunedTFDir, prunedPartialTFDir, maxDF, conf,
                        docFrequenciesFeatures, -1.0f, false, reduceTasks);
            } else {
                HighDFWordsPruner.pruneVectors(tfDir, prunedTFDir, prunedPartialTFDir, maxDF, conf,
                        docFrequenciesFeatures, norm, logNormalize, reduceTasks);
            }
            HadoopUtil.delete(new Configuration(conf), tfDir);
        }
        if (processIdf) {
            TFIDFConverter.processTfIdf(new Path(outputDir, DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER),
                    outputDir, conf, docFrequenciesFeatures, minDf, maxDF, norm, logNormalize,
                    sequentialAccessOutput, namedVectors, reduceTasks);
        }

        // dump labels?
        if (labelMDKey != null) {
            conf.set(BehemothDocumentProcessor.MD_LABEL, labelMDKey);
            BehemothDocumentProcessor.dumpLabels(inputDir, new Path(outputDir, "labels"), conf);
        }
    } catch (RuntimeException e) {
        Log.error("Exception caught", e);
        return -1;
    }

    return 0;
}

From source file:edu.umich.oasis.service.SandboxManager.java

public synchronized Sandbox tryGetSandboxForCall(CallRecord record) {
    Sandbox cheapestSandbox = null;//from  w w  w . j ava2 s .c  o  m

    if (localLOGD) {
        Log.d(TAG, "getSandboxForCall " + record);
        dumpSandboxes();
    }
    int numHandles = record.getPredecessors().size();
    int numUnmarshalledHandles = 0;
    for (Handle h : record.getPredecessors()) {
        if (!h.isMarshalled()) {
            numUnmarshalledHandles++;
        }
    }

    // Start out with the hot spare.
    float cheapestCost = !mHotSpares.isEmpty() ? COST_LOAD_CODE + COST_RESOLVE_SODA + 0.01f
            + COST_MARSHAL_OUT * numUnmarshalledHandles + COST_MARSHAL_IN * numHandles
            : Float.POSITIVE_INFINITY;
    if (localLOGD) {
        Log.d(TAG, String.format("%d handles, %d unmarshalled", numHandles, numUnmarshalledHandles));
        Log.d(TAG, String.format("Hot spare cost: %g", cheapestCost));
    }

    // Can we get away with an idle sandbox?
    TaintSet inboundTaint = record.getInboundTaints();
    if (localLOGD) {
        Log.d(TAG, String.format("Inbound taint: %s", inboundTaint));
    }
    final String packageName = record.getSODA().getDescriptor().definingClass.getPackageName();
    for (Sandbox sb : mIdleSandboxes.keySet()) {
        String assignedPackage = sb.getAssignedPackage();
        if (assignedPackage != null && !assignedPackage.equals(packageName)) {
            continue;
        }

        TaintSet sandboxTaint = sb.getTaints();
        boolean mustMarshalOut = false;
        if (!sandboxTaint.isSubsetOf(inboundTaint)) {
            // SB more tainted than inbound; skip this SB
            continue;
        } else if (!inboundTaint.isSubsetOf(sandboxTaint)) {
            // Inbound more tainted than SB; must marshal things out of SB
            mustMarshalOut = true;
        }

        float cost = 0.0f;
        if (!sb.hasLoadedPackage(packageName)) {
            // Need to load this package into this sandbox.
            cost += COST_LOAD_CODE;
        }

        if (!record.getSODA().isResolvedIn(sb)) {
            // Need to resolve the SODA in this sandbox.
            cost += COST_RESOLVE_SODA;
        }

        if (mustMarshalOut) {
            cost += COST_MARSHAL_OUT * sb.countUnmarshalledObjects();
        }

        for (Handle h : record.getPredecessors()) {
            if (!h.isLiveIn(sb)) {
                cost += COST_MARSHAL_IN;
                if (!h.isMarshalled()) {
                    cost += COST_MARSHAL_OUT;
                }
            } else if (h.getRefCount() == 1) {
                // We won't need to marshal this one out after all.
                cost -= COST_MARSHAL_OUT;
            }
        }

        if (localLOGD) {
            Log.d(TAG, String.format("%s %s: cost %g", sb, sandboxTaint, cost));
        }

        if (cost < cheapestCost) {
            cheapestCost = cost;
            cheapestSandbox = sb;
        }
    }

    if (localLOGD) {
        Log.d(TAG, String.format("Final choice: %s, cost %g",
                (cheapestSandbox == null) ? "hot spare" : cheapestSandbox, cheapestCost));
    }

    return (cheapestSandbox != null) ? cheapestSandbox : tryGetHotSpare();
}

From source file:papaya.Rank.java

/** Decide how to proceed with the NaN values */
private static List<Integer> nanStrategy(IntFloatPair[] ranks, int nanStrategy) {
    List<Integer> nanPositions = null;

    // Recode, remove or record positions of NaNs        
    switch (nanStrategy) {
    case MAXIMAL: // Replace NaNs with +INFs
        recodeNaNs(ranks, Float.POSITIVE_INFINITY);
        break;//from   ww  w .jav a  2 s  . com
    case MINIMAL: // Replace NaNs with -INFs
        recodeNaNs(ranks, Float.NEGATIVE_INFINITY);
        break;
    case REMOVED: // Drop NaNs from data
        ranks = removeNaNs(ranks);
        break;
    case FIXED: // Record positions of NaNs
        nanPositions = getNanPositions(ranks);
        break;
    }
    return nanPositions;
}

From source file:boundary.GraphPane.java

private void createTree(Dominoes domino) {

    // Matrix represents a relationship among elements
    ArrayList<Cell> nz = domino.getMat().getNonZeroData();
    MatrixDescriptor desc = domino.getMat().getMatrixDescriptor();
    float min = domino.getMat().findMinValue();
    float max = domino.getMat().findMaxValue();
    float dist = max - min;

    List<Cell> _cells = new ArrayList<>();

    if (domino.getType() == DominoType.SUPPORT) {
        for (Cell cell : nz) {

            boolean toAdd = true;

            for (Cell _c : _cells) {
                if (_c.row == cell.col && _c.col == cell.row) {
                    toAdd = false;//w w w  .j  ava  2  s .  c om
                    break;
                }
            }

            if (toAdd) {
                _cells.add(cell);
            }
        }
    } else {
        _cells = nz;
    }

    for (Cell cell : _cells) {
        NodeInfo n1 = null;
        NodeInfo n2 = null;
        String id1 = null;
        String id2 = null;

        if (domino.isSquare() && domino.getIdRow().equals(domino.getIdCol())) {

            if (cell.row == cell.col) {
                continue;
            }
        }

        if (domino.isSquare()) {
            id1 = Integer.toString(cell.row);
            id2 = Integer.toString(cell.col);
        } else {
            id1 = "R" + cell.row;
            id2 = "" + cell.col;
        }

        if (nodes.containsKey(id1)) {
            n1 = nodes.get(id1);
        } else {
            n1 = new NodeInfo(id1);
            n1.setColor(Color.BLUE);
            n1.setUserData(desc.getRowAt(cell.row));
            n1.setThreshold(Float.POSITIVE_INFINITY);
            nodes.put(n1.toString(), n1);
            graph.addVertex(n1.toString());
        }

        if (nodes.containsKey(id2)) {
            n2 = nodes.get(id2);
            n2.setThreshold(Math.max(n2.getThreshold(), cell.value));
        } else {

            n2 = new NodeInfo(id2);
            if (domino.getType() == DominoType.SUPPORT)
                n2.setColor(Color.BLUE);
            else
                n2.setColor(Color.GREEN);

            n2.setUserData(desc.getColumnAt(cell.col));
            n2.setThreshold(cell.value);
            nodes.put(n2.toString(), n2);
            graph.addVertex(n2.toString());
        }
        float perc = cell.value / dist;
        float intensityColor = Math.max(0.1f, perc);

        NodeLink edge = new NodeLink("E" + n1.toString() + n2.toString(), cell.value);
        edge.setColor(new Color(1.0f - intensityColor, 1.0f - intensityColor, 1.0f - intensityColor));

        edges.put(edge.getId(), edge);
        if (domino.getType() == DominoType.SUPPORT)
            graph.addEdge(edge.getId(), n1.toString(), n2.toString(), EdgeType.UNDIRECTED);
        else
            graph.addEdge(edge.getId(), n1.toString(), n2.toString(), EdgeType.DIRECTED);

    }

    vertexPredicate = new VertexDisplayPredicate(nodes);
    edgePredicate = new DirectionDisplayPredicate(nodes, edges);

}

From source file:com.opera.core.systems.scope.stp.services.ScopeEcmascriptService.java

/**
 * Parses a reply and returns the result of the script execution.  The result from EcmaScript is
 * converted based on the object types specified by Scope:
 *
 * <dl> <dt>undefined</dt> <dd> The undefined type, returned when no value was found or
 * "undefined" was references.  Will return null. </dd>
 *
 * <dt>true</dt> <dd>Boolean true value.</dd>
 *
 * <dt>false</dt> <dd>Boolean false value.</dd>
 *
 * <dt>NaN</dt> <dd> NaN value (not a number) which cannot be exported to JSON directly.  Will be
 * treated as a number, and returns a {@link Float#NaN} reference. </dd>
 *
 * <dt>Infinity</dt> <dd> Plus infinity value which cannot be exported to JSON directly.  Will be
 * treated as a number, and returns a {@link Float#POSITIVE_INFINITY} reference. </dd>
 *
 * <dt>-Infinity</dt> <dd> Negative infinity value which cannot be exported to JSON directly. Will
 * be treated as a number, and returns a {@link Float#NEGATIVE_INFINITY} reference. </dd>
 *
 * <dt>number</dt> <dd>A number, will return a long or double value depending on its value.</dd>
 *
 * <dt>string</dt> <dd>A string.</dd>
 *
 * <dt>object</dt> <dd> A non-primitive value in EcmaScript, typically a generic object.  This
 * includes functions and arrays. </dd> </dl>
 *
 * @param result the result of a script execution
 * @return the parsed result of a reply//  w w  w. j a va  2 s  .  c om
 */
private Object parseEvalReply(EvalResult result) {
    Status status = result.getStatus();

    switch (status) {
    case CANCELLED:
        return null;
    case EXCEPTION:
        throw new ScopeException("EcmaScript exception");
    case NO_MEMORY:
        //releaseObjects();
        throw new ScopeException("Out of memory");
    case FAILURE:
        throw new ScopeException("Could not execute script");
    }

    Value value = result.getValue();
    Type type = value.getType();

    switch (type) {
    case STRING:
        return value.getStr();
    case FALSE:
        return false;
    case TRUE:
        return true;
    case OBJECT:
        return value.getObject();
    case NUMBER:
        return parseNumber(String.valueOf(value.getNumber()));
    case NAN:
        return Float.NaN;
    case MINUS_INFINITY:
        return Float.NEGATIVE_INFINITY;
    case PLUS_INFINITY:
        return Float.POSITIVE_INFINITY;
    case UNDEFINED:
    case NULL:
    default:
        return null;
    }
}

From source file:com.cognitect.transit.TransitTest.java

public void testSpecialNumbers() throws Exception {
    assertEquals(scalar("\"~zNaN\""), writeJson(Double.NaN));
    assertEquals(scalar("\"~zINF\""), writeJson(Double.POSITIVE_INFINITY));
    assertEquals(scalar("\"~z-INF\""), writeJson(Double.NEGATIVE_INFINITY));

    assertEquals(scalar("\"~zNaN\""), writeJson(Float.NaN));
    assertEquals(scalar("\"~zINF\""), writeJson(Float.POSITIVE_INFINITY));
    assertEquals(scalar("\"~z-INF\""), writeJson(Float.NEGATIVE_INFINITY));

    assertEquals(scalarVerbose("\"~zNaN\""), writeJsonVerbose(Double.NaN));
    assertEquals(scalarVerbose("\"~zINF\""), writeJsonVerbose(Double.POSITIVE_INFINITY));
    assertEquals(scalarVerbose("\"~z-INF\""), writeJsonVerbose(Double.NEGATIVE_INFINITY));

    assertEquals(scalarVerbose("\"~zNaN\""), writeJsonVerbose(Float.NaN));
    assertEquals(scalarVerbose("\"~zINF\""), writeJsonVerbose(Float.POSITIVE_INFINITY));
    assertEquals(scalarVerbose("\"~z-INF\""), writeJsonVerbose(Float.NEGATIVE_INFINITY));
}

From source file:gov.nih.nci.caarray.util.CaArrayUtils.java

private static String toXmlString(float value) {
    if (Float.isNaN(value)) {
        return "NaN";
    } else if (value == Float.POSITIVE_INFINITY) {
        return "INF";
    } else if (value == Float.NEGATIVE_INFINITY) {
        return "-INF";
    } else {//from w  w w.  ja  va 2  s .  c  om
        return Float.toString(value);
    }
}