Example usage for java.lang Math log10

List of usage examples for java.lang Math log10

Introduction

In this page you can find the example usage for java.lang Math log10.

Prototype

@HotSpotIntrinsicCandidate
public static double log10(double a) 

Source Link

Document

Returns the base 10 logarithm of a double value.

Usage

From source file:uk.ac.kcl.it.MagicSquare.java

@PostConstruct
public void readFile() throws IOException {
    // If included in an Eclipse project.
    InputStream stream = ClassLoader.getSystemResourceAsStream("magic-square.csv");
    BufferedReader buffer = new BufferedReader(new InputStreamReader(stream));

    // If in the same directory - Probably in your case...
    // Just comment out the 2 lines above this and uncomment the line
    // that follows.
    //BufferedReader buffer = new BufferedReader(new FileReader(filename));

    String line;/*from w w  w. ja  va 2 s .  c om*/
    int row = 0;

    while ((line = buffer.readLine()) != null) {
        String[] vals = line.trim().split("\\s+");

        // Lazy instantiation.
        if (matrix == null) {
            size = vals.length;
            matrix = new int[size][size];
            log10 = (int) Math.floor(Math.log10(size * size)) + 1;
            numberFormat = String.format("%%%dd", log10);
        }

        for (int col = 0; col < size; col++) {
            matrix[row][col] = Integer.parseInt(vals[col]);
        }

        row++;
    }
}

From source file:org.radargun.reporting.BarPlotGenerator.java

/**
 * @param operation Name of the plotted operation
 * @param ranges ranges[0] = min, ranges[ranges.length - 1] = max
 * @param counts counts[i] is number of entries with value >= ranges[i - 1] and < ranges[i]
 * @param reportDir/*from w ww.  j  a v a  2s.com*/
 * @param filename
 * @throws IOException
 */
public static void generate(String operation, long[] ranges, long[] counts, String reportDir, String filename)
        throws IOException {
    XYSeries series = new XYSeries(operation + " response times");
    long totalCount = 0;
    for (long count : counts) {
        totalCount += count;
    }
    double left = Math.log10(ranges[0]);
    double right = Math.log10(ranges[ranges.length - 1]);

    for (int i = 0; i < counts.length; i++) {
        series.add(Math.log10(ranges[i]), (double) counts[i] / totalCount);
    }
    series.add(right, 0d);
    XYDataset dataset = new XYSeriesCollection(series);
    JFreeChart chart = ChartFactory.createXYStepAreaChart(operation + " response time histogram",
            "Response time", "Percentage", dataset, PlotOrientation.VERTICAL, false, false, false);
    XYPlot plot = (XYPlot) chart.getPlot();
    NumberAxis d = (NumberAxis) plot.getDomainAxis();
    d.setRange(left, right);
    d.setStandardTickUnits(new HistoTickUnitSource());
    plot.setDomainAxis(d);
    FileOutputStream output = new FileOutputStream(new File(reportDir + File.separator + filename));
    ChartUtilities.writeChartAsPNG(output, chart, 1024, 768);
    output.close();
}

From source file:ldbc.snb.datagen.generator.distribution.utils.Bucket.java

public static ArrayList<Bucket> bucketizeHistogram(ArrayList<Pair<Integer, Integer>> histogram,
        int num_buckets) {

    ArrayList<Bucket> buckets = new ArrayList<Bucket>();
    int population = 0;
    int num_edges = 0;
    for (Pair<Integer, Integer> i : histogram) {
        population += i.getValue();//from   w w  w  . ja va 2s.co  m
        num_edges += i.getValue() * i.getKey();
    }
    num_edges /= 2;

    int avgDegreeAt1B = 200;
    int avgDegree = num_edges / population;
    double aCoeff = Math.log(avgDegreeAt1B) / Math.log(1000000000);
    double bCoeff = (aCoeff - (Math.log(avgDegree) / Math.log(population))) / Math.log10(population);

    int target_mean = (int) Math.round(
            Math.pow(DatagenParams.numPersons, (aCoeff - bCoeff * Math.log10(DatagenParams.numPersons))));
    System.out.println("Distribution mean degree: " + avgDegree + " Distribution target mean " + target_mean);
    int bucket_size = (int) (Math.ceil(population / (double) (num_buckets)));
    int current_histogram_index = 0;
    int current_histogram_left = histogram.get(current_histogram_index).getValue();
    for (int i = 0; i < num_buckets && (current_histogram_index < histogram.size()); ++i) {
        int current_bucket_count = 0;
        int min = population;
        int max = 0;
        while (current_bucket_count < bucket_size && current_histogram_index < histogram.size()) {
            int degree = histogram.get(current_histogram_index).getKey();
            min = degree < min ? degree : min;
            max = degree > max ? degree : max;
            if ((bucket_size - current_bucket_count) > current_histogram_left) {
                current_bucket_count += current_histogram_left;
                current_histogram_index++;
                if (current_histogram_index < histogram.size()) {
                    current_histogram_left = histogram.get(current_histogram_index).getValue();
                }
            } else {
                current_histogram_left -= (bucket_size - current_bucket_count);
                current_bucket_count = bucket_size;
            }
        }
        min = (int) (min * target_mean / (double) avgDegree);
        max = (int) (max * target_mean / (double) avgDegree);
        buckets.add(new Bucket(min, max));
    }
    return buckets;
}

From source file:ricecompression.RiceCompression.java

public String compress(int m, int n) {
    String riceCode;//  w  ww.j a  v  a2 s.  c om
    int nBitsM = (int) (Math.log10(m) / Math.log10(2));
    if (n < 0)
        riceCode = "0"; //Valor negatiu
    else
        riceCode = "1"; //Valor negatiu
    int q = Math.abs(n) / m;
    char[] array = new char[q];
    Arrays.fill(array, '1');
    if (array.length > 0)
        riceCode = riceCode.concat(String.valueOf(array)); //Si el quocient es major a 0
    riceCode = riceCode.concat("0");
    int r = Math.abs(n) % m;
    String rBinary = String.format("%" + nBitsM + "s", Integer.toBinaryString(r)).replace(' ', '0');
    riceCode = riceCode.concat(rBinary);
    return riceCode;
}

From source file:net.nicoulaj.benchmarks.math.DoubleLog10.java

@GenerateMicroBenchmark
public void math(BlackHole hole) {
    for (int i = 0; i < data.length - 1; i++)
        hole.consume(Math.log10(data[i]));
}

From source file:org.dkpro.similarity.experiments.rte.util.CharacterNGramIdfValuesGenerator.java

public static void computeIdfScores(Dataset dataset, int n) throws Exception {
    System.out.println("Computing character " + n + "-grams");

    File outputFile = new File(
            UTILS_DIR + "/character-ngrams-idf/" + n + "/" + RteUtil.getCommonDatasetName(dataset) + ".txt");

    if (outputFile.exists()) {
        System.out.println(" - skipping, already exists");
    } else {//www  .jav  a 2s.c  om
        // Input data
        File inputDir = new File(UTILS_DIR + "/plaintexts/" + RteUtil.getCommonDatasetName(dataset));

        Collection<File> files = FileUtils.listFiles(inputDir, new String[] { "txt" }, false);

        // Map to hold the idf values
        Map<String, Double> idfValues = new HashMap<String, Double>();

        CharacterNGramMeasure measure = new CharacterNGramMeasure(n, new HashMap<String, Double>());

        // Get n-gram representations of texts
        List<Set<String>> docs = new ArrayList<Set<String>>();

        for (File file : files) {
            Set<String> ngrams = measure.getNGrams(FileUtils.readFileToString(file));

            docs.add(ngrams);
        }

        // Get all ngrams
        Set<String> allNGrams = new HashSet<String>();
        for (Set<String> doc : docs) {
            allNGrams.addAll(doc);
        }

        // Compute idf values         
        for (String ngram : allNGrams) {
            double count = 0;
            for (Set<String> doc : docs) {
                if (doc.contains(ngram)) {
                    count++;
                }
            }
            idfValues.put(ngram, count);
        }

        // Compute the idf
        for (String lemma : idfValues.keySet()) {
            double idf = Math.log10(files.size() / idfValues.get(lemma));
            idfValues.put(lemma, idf);
        }

        // Store persistently
        StringBuilder sb = new StringBuilder();
        for (String key : idfValues.keySet()) {
            sb.append(key + "\t" + idfValues.get(key) + LF);
        }
        FileUtils.writeStringToFile(outputFile, sb.toString());

        System.out.println(" - done");
    }
}

From source file:math2605.gn_log.java

private static void setR(List<String[]> pairs, double a, double b, double c, RealMatrix r) {
    int row = 0;/*from  w ww  . ja v  a2s  .  c  o  m*/
    for (String[] p : pairs) {
        double x = Double.parseDouble(p[0]);
        double fx = a * Math.log10(x + b) + c;
        double y = Double.parseDouble(p[1]);
        double resid = y - fx;
        r.setEntry(row, 0, resid);
        row++;
    }
}

From source file:search10.DFA.java

public double dfa() {
    SimpleRegression reg = new SimpleRegression(true);
    int size = data.size();
    int n = 4;//  w w w  .  j  a  v a 2  s  .co  m
    while (n <= size / 4) {
        reg.addData(Math.log10(n), Math.log10(getFluctuation(n)));
        n += 1;
    }
    return reg.getSlope();
}

From source file:ttf.analysis.command.TfIdfDetectionCommand.java

@Override
public boolean execute(Context context) throws Exception {
    AnalysisContext ctx = (AnalysisContext) context;

    Article article = ctx.getProcessedArticle();
    String address = article.getAddress();

    TfIdfDetector detector = ctx.getTfIdfDetector();
    Collection<TfIdfEntity> entities = detector.getTfIdfForURL(address);

    PropertyGroup<String, NumericalValue> termGroup;
    termGroup = article.getTermGroup();/*from ww  w  .  j a  v a 2  s  . c  o m*/

    PropertyGroup<String, NumericalValue> tokenAppearancy;
    tokenAppearancy = ctx.getTokenAppearancy();

    double NoArticles = ctx.getTotalArticles();

    for (TfIdfEntity entity : entities) {
        String key = entity.getToken().getValue();

        NumericalValue Appearancy = tokenAppearancy.get(key);
        if (Appearancy == null)
            Appearancy = new NumericalValue(0);

        double idf = Math.log10((NoArticles + 1) / (Appearancy.getDouble() + 1));

        entity.setIdf(idf);
        ctx.getIdf().put(key, new NumericalValue(idf));

        termGroup.put(key, new NumericalValue(entity.getTf()));

        System.out.println(entity);
    }

    log.debug("Found: " + termGroup.size() + " entities.");

    return false;
}

From source file:gate.corpora.twitter.Population.java

/**
 * //  www.  ja  v a 2  s  .c  o  m
 * @param corpus
 * @param inputUrl
 * @param encoding
 * @param contentKeys
 * @param featureKeys
 * @param tweetsPerDoc 0 = put them all in one document; otherwise the number per document
 * @throws ResourceInstantiationException
 */
public static void populateCorpus(final Corpus corpus, URL inputUrl, String encoding, List<String> contentKeys,
        List<String> featureKeys, int tweetsPerDoc) throws ResourceInstantiationException {
    try {
        InputStream input = inputUrl.openStream();
        List<String> lines = IOUtils.readLines(input, encoding);
        IOUtils.closeQuietly(input);

        // TODO: sort this out so it processes one at a time instead of reading the
        // whole hog into memory

        // For now, we assume the streaming API format (concatenated maps, not in a list)
        List<Tweet> tweets = TweetUtils.readTweetStrings(lines, contentKeys, featureKeys);

        int digits = (int) Math.ceil(Math.log10(tweets.size()));
        int tweetCounter = 0;
        Document document = newDocument(inputUrl, tweetCounter, digits);
        StringBuilder content = new StringBuilder();
        Map<PreAnnotation, Integer> annotandaOffsets = new HashMap<PreAnnotation, Integer>();

        for (Tweet tweet : tweets) {
            if ((tweetsPerDoc > 0) && (tweetCounter > 0) && ((tweetCounter % tweetsPerDoc) == 0)) {
                closeDocument(document, content, annotandaOffsets, corpus);
                document = newDocument(inputUrl, tweetCounter, digits);
                content = new StringBuilder();
                annotandaOffsets = new HashMap<PreAnnotation, Integer>();
            }

            int startOffset = content.length();
            content.append(tweet.getString());
            for (PreAnnotation preAnn : tweet.getAnnotations()) {
                annotandaOffsets.put(preAnn, startOffset);
            }

            content.append('\n');
            tweetCounter++;
        } // end of Tweet loop

        if (content.length() > 0) {
            closeDocument(document, content, annotandaOffsets, corpus);
        } else {
            Factory.deleteResource(document);
        }

        if (corpus.getDataStore() != null) {
            corpus.getDataStore().sync(corpus);
        }

    } catch (Exception e) {
        throw new ResourceInstantiationException(e);
    }
}