Example usage for java.io PrintWriter close

List of usage examples for java.io PrintWriter close

Introduction

In this page you can find the example usage for java.io PrintWriter close.

Prototype

public void close() 

Source Link

Document

Closes the stream and releases any system resources associated with it.

Usage

From source file:dpfmanager.shell.modules.report.core.ReportGenerator.java

/**
 * Parse an individual report to XML format.
 *
 * @param filename the file name./*from w  w w.ja  v  a2  s .c  om*/
 * @param content      the individual report.
 * @return the content string.
 */
public static void writeProcomputedIndividual(String filename, String content) {
    try {
        PrintWriter out = new PrintWriter(filename);
        out.print(content);
        out.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:com.ebay.erl.mobius.core.mapred.ConfigurableJob.java

private static void writePartitionFile(JobConf job, Sampler sampler) {
    try {//from   ww  w .ja  v  a 2  s  .com
        ////////////////////////////////////////////////
        // first, getting samples from the data sources
        ////////////////////////////////////////////////
        LOGGER.info("Running local sampling for job [" + job.getJobName() + "]");
        InputFormat inf = job.getInputFormat();
        Object[] samples = sampler.getSample(inf, job);
        LOGGER.info("Samples retrieved, sorting...");

        ////////////////////////////////////////////////
        // sort the samples
        ////////////////////////////////////////////////
        RawComparator comparator = job.getOutputKeyComparator();
        Arrays.sort(samples, comparator);

        if (job.getBoolean("mobius.print.sample", false)) {
            PrintWriter pw = new PrintWriter(
                    new OutputStreamWriter(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(
                            new File(job.get("mobius.sample.file", "./samples.txt.gz")))))));
            for (Object obj : samples) {
                pw.println(obj);
            }
            pw.flush();
            pw.close();
        }

        ////////////////////////////////////////////////
        // start to write partition files
        ////////////////////////////////////////////////

        FileSystem fs = FileSystem.get(job);
        Path partitionFile = fs.makeQualified(new Path(TotalOrderPartitioner.getPartitionFile(job)));
        while (fs.exists(partitionFile)) {
            partitionFile = new Path(partitionFile.toString() + "." + System.currentTimeMillis());
        }
        fs.deleteOnExit(partitionFile);
        TotalOrderPartitioner.setPartitionFile(job, partitionFile);
        LOGGER.info("write partition file to:" + partitionFile.toString());

        int reducersNbr = job.getNumReduceTasks();
        Set<Object> wroteSamples = new HashSet<Object>();

        SequenceFile.Writer writer = SequenceFile.createWriter(fs, job, partitionFile, Tuple.class,
                NullWritable.class);

        float avgReduceSize = samples.length / reducersNbr;

        int lastBegin = 0;
        for (int i = 0; i < samples.length;) {
            // trying to distribute the load for every reducer evenly,
            // dividing the <code>samples</code> into a set of blocks
            // separated by boundaries, objects that selected from the
            // <code>samples</code> array, and each blocks should have
            // about the same size.

            // find the last index of element that equals to samples[i], as
            // such element might appear multiple times in the samples.
            int upperBound = Util.findUpperBound(samples, samples[i], comparator);

            int lowerBound = i;//Util.findLowerBound(samples, samples[i], comparator);

            // the repeat time of samples[i], if the key itself is too big
            // select it as boundary
            int currentElemSize = upperBound - lowerBound + 1;

            if (currentElemSize > avgReduceSize * 2) // greater than two times of average reducer size
            {
                // the current element is too big, greater than
                // two times of the <code>avgReduceSize</code>, 
                // put itself as boundary
                writer.append(((DataJoinKey) samples[i]).getKey(), NullWritable.get());
                wroteSamples.add(((DataJoinKey) samples[i]).getKey());
                //pw.println(samples[i]);

                // immediate put the next element to the boundary,
                // the next element starts at <code> upperBound+1
                // </code>, to prevent the current one consume even 
                // more.
                if (upperBound + 1 < samples.length) {
                    writer.append(((DataJoinKey) samples[upperBound + 1]).getKey(), NullWritable.get());
                    wroteSamples.add(((DataJoinKey) samples[upperBound + 1]).getKey());
                    //pw.println(samples[upperBound+1]);

                    // move on to the next element of <code>samples[upperBound+1]/code>
                    lastBegin = Util.findUpperBound(samples, samples[upperBound + 1], comparator) + 1;
                    i = lastBegin;
                } else {
                    break;
                }
            } else {
                // current element is small enough to be consider
                // with previous group
                int size = upperBound - lastBegin;
                if (size > avgReduceSize) {
                    // by including the current elements, we have
                    // found a block that's big enough, select it
                    // as boundary
                    writer.append(((DataJoinKey) samples[i]).getKey(), NullWritable.get());
                    wroteSamples.add(((DataJoinKey) samples[i]).getKey());
                    //pw.println(samples[i]);

                    i = upperBound + 1;
                    lastBegin = i;
                } else {
                    i = upperBound + 1;
                }
            }
        }

        writer.close();

        // if the number of wrote samples doesn't equals to number of
        // reducer minus one, then it means the key spaces is too small
        // hence TotalOrderPartitioner won't work, it works only if 
        // the partition boundaries are distinct.
        //
        // we need to change the number of reducers
        if (wroteSamples.size() + 1 != reducersNbr) {
            LOGGER.info("Write complete, but key space is too small, sample size=" + wroteSamples.size()
                    + ", reducer size:" + (reducersNbr));
            LOGGER.info("Set the reducer size to:" + (wroteSamples.size() + 1));

            // add 1 because the wrote samples define boundary, ex, if
            // the sample size is two with two element [300, 1000], then 
            // there should be 3 reducers, one for handling i<300, one 
            // for n300<=i<1000, and another one for 1000<=i
            job.setNumReduceTasks((wroteSamples.size() + 1));
        }

        samples = null;
    } catch (IOException e) {
        LOGGER.error(e.getMessage(), e);
        throw new RuntimeException(e);
    }
}

From source file:com.dumontierlab.pdb2rdf.Pdb2Rdf.java

private static void outputStats(CommandLine cmd, Map<String, Double> stats) throws FileNotFoundException {
    File outputDir = getOutputDirectory(cmd);
    File statsFile = null;//from   www.  j a  va2 s  .co  m
    if (outputDir != null) {
        statsFile = new File(outputDir, STATSFILE_NAME);
    } else {
        statsFile = new File(STATSFILE_NAME);
    }
    PrintWriter out = new PrintWriter(statsFile);
    try {
        for (Map.Entry<String, Double> stat : stats.entrySet()) {
            out.println(stat.getKey() + ": " + stat.getValue());
        }
        out.flush();
    } finally {
        out.close();
    }

}

From source file:avantssar.aslanpp.testing.HTMLHelper.java

public static File toHTML(File textFile, boolean lineNumbers) {
    if (textFile != null) {
        File htmlFile = new File(textFile.getAbsolutePath() + ".html");
        try {/*from  w w w  .  j a  v  a2 s .com*/
            BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(textFile)));
            PrintWriter writer = new PrintWriter(htmlFile);
            String line;
            writer.println("<html>");
            writer.println("<body>");
            writer.println("<pre>");
            int lineCount = 1;
            while ((line = reader.readLine()) != null) {
                if (lineNumbers) {
                    line = String.format("%4d:   %s", lineCount++, line);
                }
                writer.println(line);
            }
            writer.println("</pre>");
            writer.println("</body>");
            writer.println("</html>");
            reader.close();
            writer.close();
            return htmlFile;
        } catch (IOException ex) {
            System.out.println(
                    "Failed to convert to HTML file '" + textFile.getAbsolutePath() + "': " + ex.getMessage());
            Debug.logger.error(ex);
            return null;
        }
    } else {
        return null;
    }
}

From source file:com.sshtools.common.vomanagementtool.common.VOHelper.java

private static void writeToFile(String filename, String content) throws IOException {
    File newFile = new File(filename);
    if (!newFile.isFile()) {
        newFile.createNewFile();/*from  ww  w.jav  a2  s  .  c  o m*/
    }
    PrintWriter out = new PrintWriter(newFile.getAbsolutePath());
    out.println(content);
    out.close();
}

From source file:at.tuwien.ifs.somtoolbox.data.InputDataWriter.java

/** Writes the class information to a tab-separated file. */
public static void writeToFileTabSeparated(SOMLibClassInformation classInfo, String fileName)
        throws IOException, SOMLibFileFormatException {
    PrintWriter writer = FileUtils.openFileForWriting("Tab-separated class info", fileName);
    for (String element : classInfo.getDataNames()) {
        writer.println(element + "\t" + classInfo.getClassName(element));
    }/* w  w w .  j  a  v  a2  s.c o m*/
    writer.flush();
    writer.close();
}

From source file:TrainLogistic.java

private static void saveTo(FileOutputStream modelOutput, OnlineLogisticRegression lr) {
    PrintWriter w = new PrintWriter(new OutputStreamWriter(modelOutput));
    String str = new String(" ");
    //System.out.printf("%d columns\n",lr.getBeta().numCols());
    System.out.printf("Now, writing file...\n");
    for (int column = 0; column < lr.getBeta().numCols(); column++) {
        //System.out.printf("%f, ", lr.getBeta().get(0, column));
        str = java.lang.String.format("%f\n", lr.getBeta().get(0, column));
        w.write(str);//from w  w w .jav  a2 s  .  com
        w.flush();
    }
    w.close();
}

From source file:de.zib.scalaris.examples.wikipedia.data.xml.Main.java

/**
 * Filters all pages in the Wikipedia XML2DB dump from the given file and
 * creates a list of page names belonging to certain categories.
 * //from  w  w  w.ja  va  2  s.  c om
 * @param filename
 * @param args
 * 
 * @throws RuntimeException
 * @throws IOException
 * @throws SAXException
 * @throws FileNotFoundException
 */
private static void doDumpdbFilter(String filename, String[] args)
        throws RuntimeException, IOException, SAXException, FileNotFoundException {
    int i = 0;
    int recursionLvl = 1;
    if (args.length > i) {
        try {
            recursionLvl = Integer.parseInt(args[i]);
        } catch (NumberFormatException e) {
            System.err.println("no number: " + args[i]);
            System.exit(-1);
        }
    }
    ++i;

    String pageListFileName = "";
    if (args.length > i && !args[i].isEmpty()) {
        pageListFileName = args[i];
    } else {
        System.err.println("need a pagelist file name for filter; arguments given: " + Arrays.toString(args));
        System.exit(-1);
    }
    ++i;

    Set<String> allowedPages0 = new HashSet<String>();
    allowedPages0.add("Main Page");
    String allowedPagesFileName = "";
    if (args.length > i && !args[i].isEmpty()) {
        allowedPagesFileName = args[i];
        addFromFile(allowedPages0, allowedPagesFileName);
    }
    ++i;

    LinkedList<String> rootCategories = new LinkedList<String>();
    if (args.length > i) {
        for (String rCat : Arrays.asList(args).subList(i, args.length)) {
            if (!rCat.isEmpty()) {
                rootCategories.add(rCat);
            }
        }
    }
    WikiDumpHandler.println(System.out, "filtering by categories " + rootCategories.toString() + " ...");
    WikiDumpHandler.println(System.out, " wiki dump     : " + filename);
    WikiDumpHandler.println(System.out, " allowed pages : " + allowedPagesFileName);
    WikiDumpHandler.println(System.out, " recursion lvl : " + recursionLvl);

    WikiDumpHandler.println(System.out,
            "creating list of pages to import (recursion level: " + recursionLvl + ") ...");
    Set<String> allowedCats0 = new HashSet<String>(rootCategories);

    WikiDumpSQLiteLinkTables handler = new WikiDumpSQLiteLinkTables(filename);
    handler.setUp();
    SortedSet<String> pages = handler.getPagesInCategories(allowedCats0, allowedPages0, recursionLvl, false);
    handler.tearDown();

    do {
        FileWriter outFile = new FileWriter(pageListFileName);
        PrintWriter out = new PrintWriter(outFile);
        for (String page : pages) {
            out.println(page);
        }
        out.close();
    } while (false);
    exitCheckHandler(handler);
}

From source file:de.zib.scalaris.examples.wikipedia.data.xml.Main.java

/**
 * Filters all pages in the Wikipedia XML dump from the given file and
 * creates a list of page names belonging to certain categories.
 * /*w  w  w .  j a va  2  s . c  om*/
 * @param filename
 * @param args
 * 
 * @throws RuntimeException
 * @throws IOException
 * @throws SAXException
 * @throws FileNotFoundException
 */
private static void doFilter(String filename, String[] args)
        throws RuntimeException, IOException, SAXException, FileNotFoundException {
    int i = 0;
    int recursionLvl = 1;
    if (args.length > i) {
        try {
            recursionLvl = Integer.parseInt(args[i]);
        } catch (NumberFormatException e) {
            System.err.println("no number: " + args[i]);
            System.exit(-1);
        }
    }
    ++i;

    // a timestamp in ISO8601 format
    Calendar maxTime = null;
    if (args.length > i && !args[i].isEmpty()) {
        try {
            maxTime = Revision.stringToCalendar(args[i]);
        } catch (IllegalArgumentException e) {
            System.err.println("no date in ISO8601: " + args[i]);
            System.exit(-1);
        }
    }
    ++i;

    String pageListFileName = "";
    if (args.length > i && !args[i].isEmpty()) {
        pageListFileName = args[i];
    } else {
        System.err.println("need a pagelist file name for filter; arguments given: " + Arrays.toString(args));
        System.exit(-1);
    }
    ++i;

    Set<String> allowedPages = new HashSet<String>();
    allowedPages.add("Main Page");
    String allowedPagesFileName = "";
    if (args.length > i && !args[i].isEmpty()) {
        allowedPagesFileName = args[i];
        addFromFile(allowedPages, allowedPagesFileName);
    }
    ++i;

    LinkedList<String> rootCategories = new LinkedList<String>();
    if (args.length > i) {
        for (String rCat : Arrays.asList(args).subList(i, args.length)) {
            if (!rCat.isEmpty()) {
                rootCategories.add(rCat);
            }
        }
    }
    WikiDumpHandler.println(System.out, "filtering by categories " + rootCategories.toString() + " ...");
    WikiDumpHandler.println(System.out, " wiki dump     : " + filename);
    WikiDumpHandler.println(System.out, " max time      : " + maxTime);
    WikiDumpHandler.println(System.out, " allowed pages : " + allowedPagesFileName);
    WikiDumpHandler.println(System.out, " recursion lvl : " + recursionLvl);
    SortedSet<String> pages = getPageList(filename, maxTime, allowedPages, rootCategories, recursionLvl);

    do {
        FileWriter outFile = new FileWriter(pageListFileName);
        PrintWriter out = new PrintWriter(outFile);
        for (String page : pages) {
            out.println(page);
        }
        out.close();
    } while (false);
}

From source file:co.raveesh.yspages.YSScraper.java

/**
 * This function writes a JSONObject to the required output file
 * @param fileName Output file name. The /output directory and .json extension are added by this method
 * @param object The required output content in JSONObject form
 *//*from  w  w w .ja v a 2  s .c  o  m*/
private static void writeToFile(String fileName, JSONObject object) {
    PrintWriter writer = null;
    try {
        writer = new PrintWriter(Constants.OUTPUT_BASE + fileName + ".json", "UTF-8");
    } catch (FileNotFoundException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (UnsupportedEncodingException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    writer.println(object.toString());
    writer.close();
}