Example usage for com.google.common.io Files append

List of usage examples for com.google.common.io Files append

Introduction

In this page you can find the example usage for com.google.common.io Files append.

Prototype

public static void append(CharSequence from, File to, Charset charset) throws IOException 

Source Link

Usage

From source file:org.primefaces.extensions.optimizerplugin.optimizer.YuiCompressorOptimizer.java

@Override
public void optimize(final ResourcesSetAdapter rsAdapter, final Log log) throws MojoExecutionException {
    ResourcesSetCssAdapter rsa = (ResourcesSetCssAdapter) rsAdapter;
    Reader in = null;/*from  ww  w  .  j  a va 2  s.  c  o m*/
    OutputStreamWriter out = null;

    try {
        if (rsa.getAggregation() == null) {
            // no aggregation
            for (File file : rsa.getFiles()) {
                log.info("Optimize CSS file " + file.getName() + " ...");

                // statistic
                addToOriginalSize(file);

                in = getReader(rsa, file);

                // generate output
                String path = file.getCanonicalPath();
                if (StringUtils.isNotBlank(rsa.getSuffix())) {
                    // create a new output stream
                    File outputFile = getFileWithSuffix(path, rsa.getSuffix());
                    out = new OutputStreamWriter(new FileOutputStream(outputFile), rsa.getEncoding());

                    // compress and write compressed content into the new file
                    CssCompressor compressor = new CssCompressor(in);
                    compressor.compress(out, 500);
                    closeStreams(in, out);

                    // statistic
                    addToOptimizedSize(outputFile);
                } else {
                    // path of temp. file
                    String pathOptimized = FileUtils.removeExtension(path) + OPTIMIZED_FILE_EXTENSION;

                    // create a new temp. file and output stream
                    File outputFile = new File(pathOptimized);
                    Files.touch(outputFile);
                    out = new OutputStreamWriter(new FileOutputStream(outputFile), rsa.getEncoding());

                    // compress and write compressed content into the new file
                    CssCompressor compressor = new CssCompressor(in);
                    compressor.compress(out, 500);
                    closeStreams(in, out);

                    // rename the new file (overwrite the original file)
                    FileUtils.rename(outputFile, file);

                    // statistic
                    addToOptimizedSize(file);
                }
            }
        } else if (rsa.getAggregation().getOutputFile() != null) {
            // aggregation to one output file
            File outputFile;
            Charset cset = Charset.forName(rsa.getEncoding());

            if (!rsa.getAggregation().isWithoutCompress()) {
                ByteArrayOutputStream baos = new ByteArrayOutputStream();
                OutputStreamWriter osw = new OutputStreamWriter(baos, rsa.getEncoding());

                // with compressing before aggregation
                for (File file : rsa.getFiles()) {
                    log.info("Optimize CSS file " + file.getName() + " ...");

                    // statistic
                    addToOriginalSize(file);

                    // create reader for the current file
                    in = getReader(rsa, file);

                    // compress and write compressed content into the output stream
                    CssCompressor compressor = new CssCompressor(in);
                    compressor.compress(osw, 500);

                    // close stream
                    IOUtil.close(in);
                }

                // close stream
                IOUtil.close(osw);

                if (rsa.getAggregation().getPrependedFile() != null) {
                    // statistic
                    addToOriginalSize(rsa.getAggregation().getPrependedFile());
                }

                // get right output file
                outputFile = getOutputFile(rsa);

                long sizeBefore = outputFile.length();

                if (rsa.getAggregation().getPrependedFile() != null) {
                    // write / append to be prepended file into / to the output file
                    prependFile(rsa.getAggregation().getPrependedFile(), outputFile, cset, rsa);
                }

                // write / append compiled content into / to the output file
                Files.append(baos.toString(rsa.getEncoding()), outputFile, cset);

                // statistic
                addToOptimizedSize(outputFile.length() - sizeBefore);
            } else {
                // only aggregation without compressing
                outputFile = aggregateFiles(rsa, cset, false);

                // statistic
                long size = addToOriginalSize(outputFile);
                addToOptimizedSize(size);
            }

            // delete single files if necessary
            deleteFilesIfNecessary(rsa, log);

            // rename aggregated file if necessary
            renameOutputFileIfNecessary(rsa, outputFile);
        } else {
            // should not happen
            log.error("Wrong plugin's internal state.");
        }
    } catch (Exception e) {
        throw new MojoExecutionException("Resources optimization failure: " + e.getLocalizedMessage(), e);
    } finally {
        closeStreams(in, out);
    }
}

From source file:com.cloudera.science.ml.kmeans.core.KMeansEvaluation.java

private void init() {
    predictionStrengths = Lists.newArrayListWithExpectedSize(testCenters.size());
    trainCosts = Lists.newArrayListWithExpectedSize(testCenters.size());
    testCosts = Lists.newArrayListWithExpectedSize(testCenters.size());
    stableClusters = Lists.newArrayListWithExpectedSize(testCenters.size());
    stablePoints = Lists.newArrayListWithExpectedSize(testCenters.size());

    for (int i = 0; i < testCenters.size(); i++) {
        Centers test = testCenters.get(i);
        Centers train = trainCenters.get(i);
        double trainCost = 0.0;
        double testCost = 0.0;
        double[][] assignments = new double[test.size()][train.size()];
        double totalPoints = 0.0;
        for (Weighted<Vector> wv : testPoints) {
            double wt = wv.weight();
            totalPoints += wt;//from w  w  w .  j a  v  a 2s  . co  m
            Vector v = wv.thing();
            int testId = test.indexOfClosest(v);
            testCost += wt * v.getDistanceSquared(test.get(testId));
            int trainId = train.indexOfClosest(wv.thing());
            trainCost += wt * v.getDistanceSquared(train.get(trainId));
            assignments[testId][trainId] += wt;
        }
        trainCosts.add(trainCost);
        testCosts.add(testCost);

        double minScore = Double.POSITIVE_INFINITY;
        double points = 0;
        double clusters = 0;
        List<String> details = Lists.newArrayList();
        for (int j = 0; j < assignments.length; j++) {
            double[] assignment = assignments[j];
            double total = 0.0;
            double same = 0.0;
            for (double a : assignment) {
                total += a;
                same += a * (a - 1);
            }
            double score = total > 1 ? same / (total * (total - 1)) : 1.0;
            // Only consider clusters that contain a non-trivial number of obs
            if (total > assignment.length && score < minScore) {
                minScore = score;
            }
            if (score > 0.8) { // stability threshold
                clusters++;
                points += total;
            }
            if (detailsFile != null) {
                details.add(String.format("%d,%d,%d,%.4f", i, j, (int) total, score));
            }
        }
        predictionStrengths.add(minScore);
        stableClusters.add(clusters / assignments.length);
        stablePoints.add(points / totalPoints);
        if (detailsFile != null) {
            try {
                if (i == 0) {
                    Files.write("ClusteringId,CenterId,NumPoints,PredictionStrength\n", detailsFile,
                            Charsets.UTF_8);
                }
                Files.append(NEWLINE_JOINER.join(details) + '\n', detailsFile, Charsets.UTF_8);
            } catch (IOException e) {
                LOG.warn("Exception writing evaluation details file: {}", detailsFile, e);
            }
        }
    }
}

From source file:net.oneandone.maven.plugins.billofmaterials.CreateBillOfMaterialsMojo.java

/**
 * Writes content to the bomFile creating intermediate directories.
 *
 * @param content to write/*from  www  . j a  va 2 s .  c om*/
 * @throws IOException when the target directory could not be created or the content could not be written.
 */
void write(final String content) throws IOException {
    final File bomFile = calculateBillOfMaterialsFile();
    final File parentDirectory = bomFile.getParentFile();
    if (!createParentDirectory(parentDirectory)) {
        throw new IOException("Could not create parent directory for " + bomFile);
    }
    Files.append(content, bomFile, Charsets.UTF_8);
}

From source file:com.andado.spark.examples.streaming.JavaWordBlacklist.java

private static JavaStreamingContext createContext(String ip, int port, String checkpointDirectory,
        String outputPath) {//from  w w  w  .  jav  a2 s  . co m

    // If you do not see this printed, that means the StreamingContext has been loaded
    // from the new checkpoint
    System.out.println("Creating new context");
    final File outputFile = new File(outputPath);
    if (outputFile.exists()) {
        outputFile.delete();
    }
    SparkConf sparkConf = new SparkConf().setAppName("JavaRecoverableNetworkWordCount");
    // Create the context with a 1 second batch size
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
    ssc.checkpoint(checkpointDirectory);

    // Create a socket stream on target ip:port and count the
    // words in input stream of \n delimited text (eg. generated by 'nc')
    JavaReceiverInputDStream<String> lines = ssc.socketTextStream(ip, port);
    JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
        @Override
        public Iterator<String> call(String x) {
            return Arrays.asList(SPACE.split(x)).iterator();
        }
    });
    JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() {
        @Override
        public Tuple2<String, Integer> call(String s) {
            return new Tuple2<>(s, 1);
        }
    }).reduceByKey(new Function2<Integer, Integer, Integer>() {
        @Override
        public Integer call(Integer i1, Integer i2) {
            return i1 + i2;
        }
    });

    wordCounts.foreachRDD(new VoidFunction2<JavaPairRDD<String, Integer>, Time>() {
        @Override
        public void call(JavaPairRDD<String, Integer> rdd, Time time) throws IOException {
            // Get or register the blacklist Broadcast
            final Broadcast<List<String>> blacklist = JavaWordBlacklist
                    .getInstance(new JavaSparkContext(rdd.context()));
            // Get or register the droppedWordsCounter Accumulator
            final LongAccumulator droppedWordsCounter = JavaDroppedWordsCounter
                    .getInstance(new JavaSparkContext(rdd.context()));
            // Use blacklist to drop words and use droppedWordsCounter to count them
            String counts = rdd.filter(new Function<Tuple2<String, Integer>, Boolean>() {
                @Override
                public Boolean call(Tuple2<String, Integer> wordCount) {
                    if (blacklist.value().contains(wordCount._1())) {
                        droppedWordsCounter.add(wordCount._2());
                        return false;
                    } else {
                        return true;
                    }
                }
            }).collect().toString();
            String output = "Counts at time " + time + " " + counts;
            System.out.println(output);
            System.out.println("Dropped " + droppedWordsCounter.value() + " word(s) totally");
            System.out.println("Appending to " + outputFile.getAbsolutePath());
            Files.append(output + "\n", outputFile, Charset.defaultCharset());
        }
    });

    return ssc;
}

From source file:com.hxr.bigdata.spark.streaming.NetworkWordCount.java

private static JavaStreamingContext createContext(final String ip, final int port,
        final String checkpointDirectory, final String outputPath) {

    // If you do not see this printed, that means the StreamingContext has been loaded
    // from the new checkpoint
    System.out.println("Creating new context");
    final File outputFile = new File(outputPath);

    SparkConf sparkConf = new SparkConf().setAppName("NetworkWordCount");
    // Create the context with a 1 second batch size
    //  JavaStreamingContextFactory  Spark Streaming 
    //  Spark Streaming ??? 4 
    // 1 Streaming Context 
    //  Spark ?? SparkContext  Spark Streaming ? StreamingContext  StreamingContext ?
    // SparkContext  master?????? Second(1)Spark Streaming ??? 1s Spark Streaming
    //  1s ???????? StreamingContext
    // ??????//w  ww.  jav  a 2  s .c o  m
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(10));

    // ?
    ssc.checkpoint(checkpointDirectory);

    // Create a socket stream on target ip:port and count the
    // words in input stream of \n delimited text (eg. generated by 'nc')
    // 2 InputDStream
    // ? Strom  Spout Spark Streaming ??? socketTextStreamSpark Streaming ????Spark
    // Streaming ?????? kafkaStream?flumeStream?fileStream?networkStream 
    JavaReceiverInputDStream<String> lines = ssc.socketTextStream(ip, port);
    JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
        public Iterable<String> call(final String x) {
            return Lists.newArrayList(SPACE.split(x));
        }
    });
    JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() {
        public Tuple2<String, Integer> call(final String s) {
            return new Tuple2<String, Integer>(s, 1);
        }
    }).reduceByKey(new Function2<Integer, Integer, Integer>() {
        public Integer call(final Integer i1, final Integer i2) {
            return i1 + i2;
        }
    });

    //        UpdateStateByKey ?? UpdateStateByKey ????????????

    // map(func)  DStream? DStream ? func 
    // flatMap(func)  map ??? func ? 0 
    // reduceByKey(func,numTasks)  DStream[(K,V)]  V  K ?? func ?? Spark ?? (
    // 2 8)?? numTasks ???
    // foreachRDD(func) ? DStream ? RDD  func ? RDD ? RDD ?
    //mapToPair 
    wordCounts.foreachRDD(new Function2<JavaPairRDD<String, Integer>, Time, Void>() {
        public Void call(final JavaPairRDD<String, Integer> rdd, final Time time) throws IOException {
            String counts = "Counts at time " + time + " " + rdd.collect();
            if (!outputFile.exists()) {
                try {
                    outputFile.createNewFile();

                } catch (IOException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            }
            Files.append(counts + "\n", outputFile, Charset.defaultCharset());
            return null;
        }
    });

    return ssc;
}

From source file:com.sdw.dream.spark.examples.streaming.JavaWordBlacklist.java

private static JavaStreamingContext createContext(String ip, int port, String checkpointDirectory,
        String outputPath) {// w  w w  . ja v  a 2 s. c om

    // If you do not see this printed, that means the StreamingContext has been loaded
    // from the new checkpoint
    System.out.println("Creating new context");
    final File outputFile = new File(outputPath);
    if (outputFile.exists()) {
        outputFile.delete();
    }
    SparkConf sparkConf = new SparkConf().setAppName("JavaRecoverableNetworkWordCount");
    // Create the context with a 1 second batch size
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
    ssc.checkpoint(checkpointDirectory);

    // Create a socket stream on target ip:port and count the
    // words in input stream of \n delimited text (eg. generated by 'nc')
    JavaReceiverInputDStream<String> lines = ssc.socketTextStream(ip, port);
    JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
        @Override
        public Iterable<String> call(String x) {
            return Lists.newArrayList(SPACE.split(x));
        }
    });
    JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() {
        @Override
        public Tuple2<String, Integer> call(String s) {
            return new Tuple2<String, Integer>(s, 1);
        }
    }).reduceByKey(new Function2<Integer, Integer, Integer>() {
        @Override
        public Integer call(Integer i1, Integer i2) {
            return i1 + i2;
        }
    });

    wordCounts.foreachRDD(new Function2<JavaPairRDD<String, Integer>, Time, Void>() {
        @Override
        public Void call(JavaPairRDD<String, Integer> rdd, Time time) throws IOException {
            // Get or register the blacklist Broadcast
            final Broadcast<List<String>> blacklist = JavaWordBlacklist
                    .getInstance(new JavaSparkContext(rdd.context()));
            // Get or register the droppedWordsCounter Accumulator
            final Accumulator<Integer> droppedWordsCounter = JavaDroppedWordsCounter
                    .getInstance(new JavaSparkContext(rdd.context()));
            // Use blacklist to drop words and use droppedWordsCounter to count them
            String counts = rdd.filter(new Function<Tuple2<String, Integer>, Boolean>() {
                @Override
                public Boolean call(Tuple2<String, Integer> wordCount) throws Exception {
                    if (blacklist.value().contains(wordCount._1())) {
                        droppedWordsCounter.add(wordCount._2());
                        return false;
                    } else {
                        return true;
                    }
                }
            }).collect().toString();
            String output = "Counts at time " + time + " " + counts;
            System.out.println(output);
            System.out.println("Dropped " + droppedWordsCounter.value() + " word(s) totally");
            System.out.println("Appending to " + outputFile.getAbsolutePath());
            Files.append(output + "\n", outputFile, Charset.defaultCharset());
            return null;
        }
    });

    return ssc;
}

From source file:com.github.rinde.gpem17.evo.StatsLogger.java

public void printMore(EvolutionState state, Individual best, List<GPComputationResult> bestResults) {

    List<SimulationResult> results = new ArrayList<>();
    for (GPComputationResult res : bestResults) {
        results.add(((SingleResult) res).getSimulationResult());
    }/* www .  j  av a  2  s  .  c o m*/

    appendResults(results, statsLog, Integer.toString(state.generation));

    File programFile = new File(experimentDirectory, "programs/best-individual-" + state.generation + ".txt");

    try {
        Files.createParentDirs(programFile);
        Files.append(bestResults.get(0).getTaskDataId(), programFile, Charsets.UTF_8);
    } catch (IOException e) {
        throw new IllegalStateException(e);
    }
}

From source file:org.dllearner.algorithms.qtl.experiments.PathDetectionTask.java

@Override
public List<Path> call() throws Exception {
    if (!cancelled) {

        // check if class was already processed
        String filename = UrlEscapers.urlFormParameterEscaper().escape(cls.toStringID()) + "-" + depth + ".log";
        File file = new File(dataDir, filename);

        if (file.exists()) {
            System.out.println(Thread.currentThread().getId() + ":" + cls.toStringID() + " already analyzed.");
            // load from disk
            List<String> lines;
            try {
                lines = Files.readLines(file, Charsets.UTF_8);

                List<Path> paths = new ArrayList<>();

                // each 5th line contains the path
                for (int i = 0; i < lines.size(); i += 4) {
                    String line = lines.get(i);
                    ArrayList<String> split = Lists.newArrayList(Splitter.on("\t").split(line));
                    String object = split.remove(split.size() - 1);
                    List<Set<String>> propertyClusters = new ArrayList<>();

                    for (String clusterString : split) {
                        Set<String> cluster = new TreeSet<>();
                        for (String property : Splitter.on(",").trimResults().split(clusterString)) {
                            cluster.add(property.replace("[", "").replace("]", ""));
                        }/* w  ww.j  ava2 s . c o  m*/
                        propertyClusters.add(cluster);
                    }

                    paths.add(new Path(cls, propertyClusters, object));
                }

                return paths;
            } catch (IOException e) {
                throw new RuntimeException("Path loading failed. ", e);
            }
        } else {

            QueryExecutionFactory qef;
            if (localMode) {
                // load data
                System.out.println(Thread.currentThread().getId() + ":" + "Loading data of depth " + depth
                        + " for " + cls.toStringID() + "...");
                long s = System.currentTimeMillis();
                Model data = loadDataFromCacheOrCompute(cls, depth, true);
                System.out.println(Thread.currentThread().getId() + ":" + "Got " + data.size() + " triples for "
                        + cls.toStringID() + " in " + (System.currentTimeMillis() - s) + "ms");

                qef = new QueryExecutionFactoryModel(data);
            } else {
                qef = ks.getQueryExecutionFactory();
            }

            // analyze
            System.out.println(Thread.currentThread().getId() + ":" + "Searching for " + cls.toStringID()
                    + " path of length " + depth + "...");
            long s = System.currentTimeMillis();
            List<Path> paths = findPathsOfDepthN(cls, qef, depth, maxPathsPerClass);
            System.out
                    .println(Thread.currentThread().getId() + ":" + "Finished searching for " + cls.toStringID()
                            + " path of length " + depth + " in " + (System.currentTimeMillis() - s) + "ms");

            if (paths.isEmpty()) {
                System.out.println(Thread.currentThread().getId() + ":" + "Could not find " + cls.toStringID()
                        + " path of length " + depth + ".");
            } else {
                System.out.println(Thread.currentThread().getId() + ":" + "Paths found:" + paths);

                // serialize
                String delimiter = "\t";
                try {
                    for (Path path : paths) {
                        String content = Joiner.on(delimiter).join(path.getProperties()) + delimiter
                                + path.getObject() + "\n";
                        content += path.asSPARQLQuery(Var.alloc("s")) + "\n";
                        content += path.asSPARQLPathQuery(Var.alloc("s"));
                        content += "\n#\n";
                        Files.append(content, file, Charsets.UTF_8);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }

            return paths;
        }
    }
    return null;
}

From source file:com.github.rinde.gpem17.eval.ResultWriter.java

void createCSVWithHeader(File f) {
    try {//w  ww .  j a va  2  s . c  o  m
        Files.createParentDirs(f);
        Files.append(createHeader(), f, Charsets.UTF_8);
    } catch (final IOException e1) {
        throw new IllegalStateException(e1);
    }
}

From source file:dollar.learner.smart.ParagraphVectorsClassifierExample.java

public void learn(@NotNull String name, @NotNull SourceSegment source, @NotNull List<var> inputs,
        @NotNull Type type) {/*from   w ww .j a v a2s .  c om*/
    File corpus = new File(new File(new File(TYPE_LEARNING_DIR, "corpus"), type.name()), type.name() + ".txt");
    corpus.getParentFile().mkdirs();
    try {
        Files.append(signatureToText(name, inputs) + "\n", corpus, Charsets.UTF_8);
    } catch (IOException e) {
        e.printStackTrace();
    }
}