Example usage for java.io BufferedWriter close

List of usage examples for java.io BufferedWriter close

Introduction

In this page you can find the example usage for java.io BufferedWriter close.

Prototype

@SuppressWarnings("try")
    public void close() throws IOException 

Source Link

Usage

From source file:PodbaseMetadataMigration2.java

public static void main(String[] args) throws Exception {
    System.out.println("Running data migration");

    String projectString = FileUtils.readFileToString(new File("projects.txt"));
    Map<String, Integer> projectIdMapping = new HashMap<String, Integer>();
    for (String line : projectString.split("\n")) {
        String[] split = line.split(":");
        int id = Integer.parseInt(split[0].trim());
        String name = split[1].trim();
        projectIdMapping.put(name, id);/* w w  w.j  a  v a2  s .  c om*/
    }

    System.out.println("Reading projects..");
    List<ProjectEntry> projects = dataFromFile("./migrate/projects.data", ProjectEntry.class);
    projectIdMap = parseProjectMap(projects, projectIdMapping);

    System.out.println("Found " + projects.size() + " projects.");

    System.out.println("Reading tags..");
    List<TagEntry> tags = dataFromFile("./migrate/tags.data", TagEntry.class);
    System.out.println("Found " + tags.size() + " tags.");

    System.out.println("Reading templates..");
    List<TemplateEntry> templates = dataFromFile("./migrate/templates.data", TemplateEntry.class);
    System.out.println("Found " + templates.size() + " templates.");

    System.out.println("Reading template fields..");
    List<TemplateFieldEntry> templateFields = dataFromFile("./migrate/template_fields.data",
            TemplateFieldEntry.class);
    System.out.println("Found " + templateFields.size() + " templateFields.");

    int entryCount = tags.size() + templates.size() + templateFields.size();

    //System.out.println("Generating Project SQL");
    //String projectSql = generateSql((List<AbstractEntry>)(List<?>)projects);
    System.out.println("Generating Attribute SQL");
    String imageAttributes = generateSql((List<AbstractEntry>) (List<?>) tags);
    System.out.println("Generating Image SQL");
    String databaseImages = generateDatabaseImageSql();
    //System.out.println("Generating Directory SQL");
    //String directorySql = generateDirectorySql(projects);

    //System.out.println("Generating Template SQL");
    //String templateSql = generateSql((List<AbstractEntry>)(List<?>)templates);
    //System.out.println("Generating Field SQL");
    //String fieldsSql = generateSql((List<AbstractEntry>)(List<?>)templateFields);

    System.out.println("Writing database.sql");
    BufferedWriter bw = new BufferedWriter(new FileWriter(new File("./database.sql")));
    //bw.append(projectSql);
    //bw.append("\n\n");
    bw.append(databaseImages);
    bw.append("\n\n");
    //bw.append(directorySql);
    //bw.append("\n\n");
    bw.append(imageAttributes);
    bw.append("\n\n");
    //      bw.append(templateSql);
    //      bw.append("\n\n");
    //      bw.append(fieldsSql);
    //      bw.append("\n\n");
    bw.close();

    System.out.println("Writing missingImages.txt");
    bw = new BufferedWriter(new FileWriter(new File("./missingImages.txt")));
    for (String img : missingImages) {
        bw.append(img + "\n");
    }
    bw.close();

    System.out.println("Migration completed successfully!");
}

From source file:com.bright.json.JSonRequestor.java

public static void main(String[] args) {
    String fileBasename = null;/* w  w  w .  j a va  2  s . com*/
    String[] zipArgs = null;
    JFileChooser chooser = new JFileChooser("/Users/panos/STR_GRID");
    try {

        chooser.setCurrentDirectory(new java.io.File("."));
        chooser.setDialogTitle("Select the input directory");

        chooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
        chooser.setAcceptAllFileFilterUsed(false);

        if (chooser.showOpenDialog(null) == JFileChooser.APPROVE_OPTION) {
            System.out.println("getCurrentDirectory(): " + chooser.getCurrentDirectory());
            System.out.println("getSelectedFile() : " + chooser.getSelectedFile());

            // String fileBasename =
            // chooser.getSelectedFile().toString().substring(chooser.getSelectedFile().toString().lastIndexOf(File.separator)+1,chooser.getSelectedFile().toString().lastIndexOf("."));
            fileBasename = chooser.getSelectedFile().toString()
                    .substring(chooser.getSelectedFile().toString().lastIndexOf(File.separator) + 1);
            System.out.println("Base name: " + fileBasename);

            zipArgs = new String[] { chooser.getSelectedFile().toString(),
                    chooser.getCurrentDirectory().toString() + File.separator + fileBasename + ".zip" };
            com.bright.utils.ZipFile.main(zipArgs);

        } else {
            System.out.println("No Selection ");

        }
    } catch (Exception e) {

        System.out.println(e.toString());

    }

    JTextField uiHost = new JTextField("ucs-head.brightcomputing.com");
    // TextPrompt puiHost = new
    // TextPrompt("hadoop.brightcomputing.com",uiHost);
    JTextField uiUser = new JTextField("nexus");
    // TextPrompt puiUser = new TextPrompt("nexus", uiUser);
    JTextField uiPass = new JPasswordField("system");
    // TextPrompt puiPass = new TextPrompt("", uiPass);
    JTextField uiWdir = new JTextField("/home/nexus/pp1234");
    // TextPrompt puiWdir = new TextPrompt("/home/nexus/nexus_workdir",
    // uiWdir);
    JTextField uiOut = new JTextField("foo");
    // TextPrompt puiOut = new TextPrompt("foobar123", uiOut);

    JPanel myPanel = new JPanel(new GridLayout(5, 1));
    myPanel.add(new JLabel("Bright HeadNode hostname:"));
    myPanel.add(uiHost);
    // myPanel.add(Box.createHorizontalStrut(1)); // a spacer
    myPanel.add(new JLabel("Username:"));
    myPanel.add(uiUser);
    myPanel.add(new JLabel("Password:"));
    myPanel.add(uiPass);
    myPanel.add(new JLabel("Working Directory:"));
    myPanel.add(uiWdir);
    // myPanel.add(Box.createHorizontalStrut(1)); // a spacer
    myPanel.add(new JLabel("Output Study Name ( -s ):"));
    myPanel.add(uiOut);

    int result = JOptionPane.showConfirmDialog(null, myPanel, "Please fill in all the fields.",
            JOptionPane.OK_CANCEL_OPTION);
    if (result == JOptionPane.OK_OPTION) {
        System.out.println("Input received.");

    }

    String rfile = uiWdir.getText();
    String rhost = uiHost.getText();
    String ruser = uiUser.getText();
    String rpass = uiPass.getText();
    String nexusOut = uiOut.getText();

    String[] myarg = new String[] { zipArgs[1], ruser + "@" + rhost + ":" + rfile, nexusOut, fileBasename };
    com.bright.utils.ScpTo.main(myarg);

    String cmURL = "https://" + rhost + ":8081/json";
    List<Cookie> cookies = doLogin(ruser, rpass, cmURL);
    chkVersion(cmURL, cookies);

    jobSubmit myjob = new jobSubmit();
    jobSubmit.jobObject myjobObj = new jobSubmit.jobObject();

    myjob.setService("cmjob");
    myjob.setCall("submitJob");

    myjobObj.setQueue("defq");
    myjobObj.setJobname("myNexusJob");
    myjobObj.setAccount(ruser);
    myjobObj.setRundirectory(rfile);
    myjobObj.setUsername(ruser);
    myjobObj.setGroupname("cmsupport");
    myjobObj.setPriority("1");
    myjobObj.setStdinfile(rfile + "/stdin-mpi");
    myjobObj.setStdoutfile(rfile + "/stdout-mpi");
    myjobObj.setStderrfile(rfile + "/stderr-mpi");
    myjobObj.setResourceList(Arrays.asList(""));
    myjobObj.setDependencies(Arrays.asList(""));
    myjobObj.setMailNotify(false);
    myjobObj.setMailOptions("ALL");
    myjobObj.setMaxWallClock("00:10:00");
    myjobObj.setNumberOfProcesses(1);
    myjobObj.setNumberOfNodes(1);
    myjobObj.setNodes(Arrays.asList(""));
    myjobObj.setCommandLineInterpreter("/bin/bash");
    myjobObj.setUserdefined(Arrays.asList("cd " + rfile, "date", "pwd"));
    myjobObj.setExecutable("mpirun");
    myjobObj.setArguments("-env I_MPI_FABRICS shm:tcp " + Constants.NEXUSSIM_EXEC + " -mpi -c " + rfile + "/"
            + fileBasename + "/" + fileBasename + " -s " + rfile + "/" + fileBasename + "/" + nexusOut);
    myjobObj.setModules(Arrays.asList("shared", "nexus", "intel-mpi/64"));
    myjobObj.setDebug(false);
    myjobObj.setBaseType("Job");
    myjobObj.setIsSlurm(true);
    myjobObj.setUniqueKey(0);
    myjobObj.setModified(false);
    myjobObj.setToBeRemoved(false);
    myjobObj.setChildType("SlurmJob");
    myjobObj.setJobID("Nexus test");

    // Map<String,jobSubmit.jobObject > mymap= new HashMap<String,
    // jobSubmit.jobObject>();
    // mymap.put("Slurm",myjobObj);
    ArrayList<Object> mylist = new ArrayList<Object>();
    mylist.add("slurm");
    mylist.add(myjobObj);
    myjob.setArgs(mylist);

    GsonBuilder builder = new GsonBuilder();
    builder.enableComplexMapKeySerialization();

    // Gson g = new Gson();
    Gson g = builder.create();

    String json2 = g.toJson(myjob);

    // To be used from a real console and not Eclipse
    Delete.main(zipArgs[1]);
    String message = JSonRequestor.doRequest(json2, cmURL, cookies);
    @SuppressWarnings("resource")
    Scanner resInt = new Scanner(message).useDelimiter("[^0-9]+");
    int jobID = resInt.nextInt();
    System.out.println("Job ID: " + jobID);

    JOptionPane optionPane = new JOptionPane(message);
    JDialog myDialog = optionPane.createDialog(null, "CMDaemon response: ");
    myDialog.setModal(false);
    myDialog.setVisible(true);

    ArrayList<Object> mylist2 = new ArrayList<Object>();
    mylist2.add("slurm");
    String JobID = Integer.toString(jobID);
    mylist2.add(JobID);
    myjob.setArgs(mylist2);
    myjob.setService("cmjob");
    myjob.setCall("getJob");
    String json3 = g.toJson(myjob);
    System.out.println("JSON Request No. 4 " + json3);

    cmReadFile readfile = new cmReadFile();
    readfile.setService("cmmain");
    readfile.setCall("readFile");
    readfile.setUserName(ruser);

    int fileByteIdx = 1;

    readfile.setPath(rfile + "/" + fileBasename + "/" + fileBasename + ".sum@+" + fileByteIdx);
    String json4 = g.toJson(readfile);

    String monFile = JSonRequestor.doRequest(json4, cmURL, cookies).replaceAll("^\"|\"$", "");
    if (monFile.startsWith("Unable")) {
        monFile = "";
    } else {
        fileByteIdx += countLines(monFile, "\\\\n");
        System.out.println("");
    }

    StringBuffer output = new StringBuffer();
    // Get the correct Line Separator for the OS (CRLF or LF)
    String nl = System.getProperty("line.separator");
    String filename = chooser.getCurrentDirectory().toString() + File.separator + fileBasename + ".sum.txt";
    System.out.println("Local monitoring file: " + filename);

    output.append(monFile.replaceAll("\\\\n", System.getProperty("line.separator")));

    String getJobJSON = JSonRequestor.doRequest(json3, cmURL, cookies);
    jobGet getJobObj = new Gson().fromJson(getJobJSON, jobGet.class);
    System.out.println("Job " + jobID + " status: " + getJobObj.getStatus().toString());

    while (getJobObj.getStatus().toString().equals("RUNNING")
            || getJobObj.getStatus().toString().equals("COMPLETING")) {
        try {

            getJobJSON = JSonRequestor.doRequest(json3, cmURL, cookies);
            getJobObj = new Gson().fromJson(getJobJSON, jobGet.class);
            System.out.println("Job " + jobID + " status: " + getJobObj.getStatus().toString());

            readfile.setPath(rfile + "/" + fileBasename + "/" + fileBasename + ".sum@+" + fileByteIdx);
            json4 = g.toJson(readfile);
            monFile = JSonRequestor.doRequest(json4, cmURL, cookies).replaceAll("^\"|\"$", "");
            if (monFile.startsWith("Unable")) {
                monFile = "";
            } else {

                output.append(monFile.replaceAll("\\\\n", System.getProperty("line.separator")));
                System.out.println("FILE INDEX:" + fileByteIdx);
                fileByteIdx += countLines(monFile, "\\\\n");
            }
            Thread.sleep(Constants.STATUS_CHECK_INTERVAL);
        } catch (InterruptedException ex) {
            Thread.currentThread().interrupt();
        }

    }

    Gson gson_nice = new GsonBuilder().setPrettyPrinting().create();
    String json_out = gson_nice.toJson(getJobJSON);
    System.out.println(json_out);
    System.out.println("JSON Request No. 5 " + json4);

    readfile.setPath(rfile + "/" + fileBasename + "/" + fileBasename + ".sum@+" + fileByteIdx);
    json4 = g.toJson(readfile);
    monFile = JSonRequestor.doRequest(json4, cmURL, cookies).replaceAll("^\"|\"$", "");
    if (monFile.startsWith("Unable")) {
        monFile = "";
    } else {

        output.append(monFile.replaceAll("\\\\n", System.getProperty("line.separator")));
        fileByteIdx += countLines(monFile, "\\\\n");
    }
    System.out.println("FILE INDEX:" + fileByteIdx);

    /*
     * System.out.print("Monitoring file: " + monFile.replaceAll("\\n",
     * System.getProperty("line.separator"))); try {
     * FileUtils.writeStringToFile( new
     * File(chooser.getCurrentDirectory().toString() + File.separator +
     * fileBasename + ".sum.txt"), monFile.replaceAll("\\n",
     * System.getProperty("line.separator"))); } catch (IOException e) {
     * 
     * e.printStackTrace(); }
     */

    if (getJobObj.getStatus().toString().equals("COMPLETED")) {
        String[] zipArgs_from = new String[] { chooser.getSelectedFile().toString(),
                chooser.getCurrentDirectory().toString() + File.separator + fileBasename + "_out.zip" };
        String[] myarg_from = new String[] {
                ruser + "@" + rhost + ":" + rfile + "/" + fileBasename + "_out.zip", zipArgs_from[1], rfile,
                fileBasename };
        com.bright.utils.ScpFrom.main(myarg_from);

        JOptionPane optionPaneS = new JOptionPane("Job execution completed without errors!");
        JDialog myDialogS = optionPaneS.createDialog(null, "Job status: ");
        myDialogS.setModal(false);
        myDialogS.setVisible(true);

    } else {
        JOptionPane optionPaneF = new JOptionPane("Job execution FAILED!");
        JDialog myDialogF = optionPaneF.createDialog(null, "Job status: ");
        myDialogF.setModal(false);
        myDialogF.setVisible(true);
    }

    try {
        System.out.println("Local monitoring file: " + filename);

        BufferedWriter out = new BufferedWriter(new FileWriter(filename));
        String outText = output.toString();
        String newString = outText.replace("\\\\n", nl);

        System.out.println("Text: " + outText);
        out.write(newString);

        out.close();
        rmDuplicateLines.main(filename);
    } catch (IOException e) {
        e.printStackTrace();
    }
    doLogout(cmURL, cookies);
    System.exit(0);
}

From source file:at.newmedialab.ldpath.template.LDTemplate.java

public static void main(String[] args) {
    Options options = buildOptions();//from  w w w . j  a va 2  s  .  c om

    CommandLineParser parser = new PosixParser();
    try {
        CommandLine cmd = parser.parse(options, args);

        Level logLevel = Level.WARN;

        if (cmd.hasOption("loglevel")) {
            String logLevelName = cmd.getOptionValue("loglevel");
            if ("DEBUG".equals(logLevelName.toUpperCase())) {
                logLevel = Level.DEBUG;
            } else if ("INFO".equals(logLevelName.toUpperCase())) {
                logLevel = Level.INFO;
            } else if ("WARN".equals(logLevelName.toUpperCase())) {
                logLevel = Level.WARN;
            } else if ("ERROR".equals(logLevelName.toUpperCase())) {
                logLevel = Level.ERROR;
            } else {
                log.error("unsupported log level: {}", logLevelName);
            }
        }

        if (logLevel != null) {
            for (String logname : new String[] { "at", "org", "net", "com" }) {

                ch.qos.logback.classic.Logger logger = (ch.qos.logback.classic.Logger) LoggerFactory
                        .getLogger(logname);
                logger.setLevel(logLevel);
            }
        }

        File template = null;
        if (cmd.hasOption("template")) {
            template = new File(cmd.getOptionValue("template"));
        }

        GenericSesameBackend backend;
        if (cmd.hasOption("store")) {
            backend = new LDPersistentBackend(new File(cmd.getOptionValue("store")));
        } else {
            backend = new LDMemoryBackend();
        }

        Resource context = null;
        if (cmd.hasOption("context")) {
            context = backend.getRepository().getValueFactory().createURI(cmd.getOptionValue("context"));
        }

        BufferedWriter out = null;
        if (cmd.hasOption("out")) {
            File of = new File(cmd.getOptionValue("out"));
            if (of.canWrite()) {
                out = new BufferedWriter(new FileWriter(of));
            } else {
                log.error("cannot write to output file {}", of);
                System.exit(1);
            }
        } else {
            out = new BufferedWriter(new OutputStreamWriter(System.out));
        }

        if (backend != null && context != null && template != null) {
            TemplateEngine<Value> engine = new TemplateEngine<Value>(backend);

            engine.setDirectoryForTemplateLoading(template.getParentFile());
            engine.processFileTemplate(context, template.getName(), out);
            out.flush();
            out.close();
        }

        if (backend instanceof LDPersistentBackend) {
            ((LDPersistentBackend) backend).shutdown();
        }

    } catch (ParseException e) {
        System.err.println("invalid arguments");
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("LDQuery", options, true);
    } catch (FileNotFoundException e) {
        System.err.println("file or program could not be found");
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("LDQuery", options, true);
    } catch (IOException e) {
        System.err.println("could not access file");
        e.printStackTrace(System.err);
    } catch (TemplateException e) {
        System.err.println("error while processing template");
        e.printStackTrace(System.err);
    }

}

From source file:ivory.core.tokenize.Tokenizer.java

@SuppressWarnings("static-access")
public static void main(String[] args) {
    Options options = new Options();
    options.addOption(OptionBuilder.withArgName("full path to model file or directory").hasArg()
            .withDescription("model file").create("model"));
    options.addOption(OptionBuilder.withArgName("full path to input file").hasArg()
            .withDescription("input file").isRequired().create("input"));
    options.addOption(OptionBuilder.withArgName("full path to output file").hasArg()
            .withDescription("output file").isRequired().create("output"));
    options.addOption(OptionBuilder.withArgName("en | zh | de | fr | ar | tr | es").hasArg()
            .withDescription("2-character language code").isRequired().create("lang"));
    options.addOption(OptionBuilder.withArgName("path to stopwords list").hasArg()
            .withDescription("one stopword per line").create("stopword"));
    options.addOption(OptionBuilder.withArgName("path to stemmed stopwords list").hasArg()
            .withDescription("one stemmed stopword per line").create("stemmed_stopword"));
    options.addOption(OptionBuilder.withArgName("true|false").hasArg().withDescription("turn on/off stemming")
            .create("stem"));
    options.addOption(OptionBuilder.withDescription("Hadoop option to load external jars")
            .withArgName("jar packages").hasArg().create("libjars"));

    CommandLine cmdline;/*from   ww w .  j  ava 2  s.  c o  m*/
    CommandLineParser parser = new GnuParser();
    try {
        String stopwordList = null, stemmedStopwordList = null, modelFile = null;
        boolean isStem = true;
        cmdline = parser.parse(options, args);
        if (cmdline.hasOption("stopword")) {
            stopwordList = cmdline.getOptionValue("stopword");
        }
        if (cmdline.hasOption("stemmed_stopword")) {
            stemmedStopwordList = cmdline.getOptionValue("stemmed_stopword");
        }
        if (cmdline.hasOption("stem")) {
            isStem = Boolean.parseBoolean(cmdline.getOptionValue("stem"));
        }
        if (cmdline.hasOption("model")) {
            modelFile = cmdline.getOptionValue("model");
        }

        ivory.core.tokenize.Tokenizer tokenizer = TokenizerFactory.createTokenizer(
                cmdline.getOptionValue("lang"), modelFile, isStem, stopwordList, stemmedStopwordList, null);
        BufferedWriter out = new BufferedWriter(
                new OutputStreamWriter(new FileOutputStream(cmdline.getOptionValue("output")), "UTF8"));
        BufferedReader in = new BufferedReader(
                new InputStreamReader(new FileInputStream(cmdline.getOptionValue("input")), "UTF8"));

        String line = null;
        while ((line = in.readLine()) != null) {
            String[] tokens = tokenizer.processContent(line);
            String s = "";
            for (String token : tokens) {
                s += token + " ";
            }
            out.write(s.trim() + "\n");
        }
        in.close();
        out.close();

    } catch (Exception exp) {
        System.out.println(exp);
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("Tokenizer", options);
        System.exit(-1);
    }
}

From source file:edu.cmu.lti.oaqa.knn4qa.apps.ExtractDataAndQueryAsSparseVectors.java

public static void main(String[] args) {
    String optKeys[] = { CommonParams.MAX_NUM_QUERY_PARAM, MAX_NUM_DATA_PARAM, CommonParams.MEMINDEX_PARAM,
            IN_QUERIES_PARAM, OUT_QUERIES_PARAM, OUT_DATA_PARAM, TEXT_FIELD_PARAM, TEST_QTY_PARAM, };
    String optDescs[] = { CommonParams.MAX_NUM_QUERY_DESC, MAX_NUM_DATA_DESC, CommonParams.MEMINDEX_DESC,
            IN_QUERIES_DESC, OUT_QUERIES_DESC, OUT_DATA_DESC, TEXT_FIELD_DESC, TEST_QTY_DESC };
    boolean hasArg[] = { true, true, true, true, true, true, true, true };

    ParamHelper prmHlp = null;/*  www . ja va2 s  . c o  m*/

    try {

        prmHlp = new ParamHelper(args, optKeys, optDescs, hasArg);

        CommandLine cmd = prmHlp.getCommandLine();
        Options opt = prmHlp.getOptions();

        int maxNumQuery = Integer.MAX_VALUE;

        String tmpn = cmd.getOptionValue(CommonParams.MAX_NUM_QUERY_PARAM);
        if (tmpn != null) {
            try {
                maxNumQuery = Integer.parseInt(tmpn);
            } catch (NumberFormatException e) {
                UsageSpecify(CommonParams.MAX_NUM_QUERY_PARAM, opt);
            }
        }

        int maxNumData = Integer.MAX_VALUE;
        tmpn = cmd.getOptionValue(MAX_NUM_DATA_PARAM);
        if (tmpn != null) {
            try {
                maxNumData = Integer.parseInt(tmpn);
            } catch (NumberFormatException e) {
                UsageSpecify(MAX_NUM_DATA_PARAM, opt);
            }
        }
        String memIndexPref = cmd.getOptionValue(CommonParams.MEMINDEX_PARAM);
        if (null == memIndexPref) {
            UsageSpecify(CommonParams.MEMINDEX_PARAM, opt);
        }
        String textField = cmd.getOptionValue(TEXT_FIELD_PARAM);
        if (null == textField) {
            UsageSpecify(TEXT_FIELD_PARAM, opt);
        }

        textField = textField.toLowerCase();
        int fieldId = -1;
        for (int i = 0; i < FeatureExtractor.mFieldNames.length; ++i)
            if (FeatureExtractor.mFieldNames[i].compareToIgnoreCase(textField) == 0) {
                fieldId = i;
                break;
            }
        if (-1 == fieldId) {
            Usage("Wrong field index, should be one of the following: "
                    + String.join(",", FeatureExtractor.mFieldNames), opt);
        }

        InMemForwardIndex indx = new InMemForwardIndex(
                FeatureExtractor.indexFileName(memIndexPref, FeatureExtractor.mFieldNames[fieldId]));

        BM25SimilarityLucene bm25simil = new BM25SimilarityLucene(FeatureExtractor.BM25_K1,
                FeatureExtractor.BM25_B, indx);

        String inQueryFile = cmd.getOptionValue(IN_QUERIES_PARAM);
        String outQueryFile = cmd.getOptionValue(OUT_QUERIES_PARAM);
        if ((inQueryFile == null) != (outQueryFile == null)) {
            Usage("You should either specify both " + IN_QUERIES_PARAM + " and " + OUT_QUERIES_PARAM
                    + " or none of them", opt);
        }
        String outDataFile = cmd.getOptionValue(OUT_DATA_PARAM);

        tmpn = cmd.getOptionValue(TEST_QTY_PARAM);
        int testQty = 0;
        if (tmpn != null) {
            try {
                testQty = Integer.parseInt(tmpn);
            } catch (NumberFormatException e) {
                UsageSpecify(TEST_QTY_PARAM, opt);
            }
        }

        ArrayList<DocEntry> testDocEntries = new ArrayList<DocEntry>();
        ArrayList<DocEntry> testQueryEntries = new ArrayList<DocEntry>();
        ArrayList<TrulySparseVector> testDocVectors = new ArrayList<TrulySparseVector>();
        ArrayList<TrulySparseVector> testQueryVectors = new ArrayList<TrulySparseVector>();

        if (outDataFile != null) {
            BufferedWriter out = new BufferedWriter(
                    new OutputStreamWriter(CompressUtils.createOutputStream(outDataFile)));

            ArrayList<DocEntryExt> docEntries = indx.getDocEntries();

            for (int id = 0; id < Math.min(maxNumData, docEntries.size()); ++id) {
                DocEntry e = docEntries.get(id).mDocEntry;
                TrulySparseVector v = bm25simil.getDocSparseVector(e, false);
                if (id < testQty) {
                    testDocEntries.add(e);
                    testDocVectors.add(v);
                }
                outputVector(out, v);
            }

            out.close();

        }

        Splitter splitOnSpace = Splitter.on(' ').trimResults().omitEmptyStrings();

        if (outQueryFile != null) {
            BufferedReader inpText = new BufferedReader(
                    new InputStreamReader(CompressUtils.createInputStream(inQueryFile)));
            BufferedWriter out = new BufferedWriter(
                    new OutputStreamWriter(CompressUtils.createOutputStream(outQueryFile)));

            String queryText = XmlHelper.readNextXMLIndexEntry(inpText);

            for (int queryQty = 0; queryText != null && queryQty < maxNumQuery; queryText = XmlHelper
                    .readNextXMLIndexEntry(inpText), queryQty++) {
                Map<String, String> queryFields = null;
                // 1. Parse a query

                try {
                    queryFields = XmlHelper.parseXMLIndexEntry(queryText);
                } catch (Exception e) {
                    System.err.println("Parsing error, offending QUERY:\n" + queryText);
                    throw new Exception("Parsing error.");
                }

                String fieldText = queryFields.get(FeatureExtractor.mFieldsSOLR[fieldId]);

                if (fieldText == null) {
                    fieldText = "";
                }

                ArrayList<String> tmpa = new ArrayList<String>();
                for (String s : splitOnSpace.split(fieldText))
                    tmpa.add(s);

                DocEntry e = indx.createDocEntry(tmpa.toArray(new String[tmpa.size()]));

                TrulySparseVector v = bm25simil.getDocSparseVector(e, true);
                if (queryQty < testQty) {
                    testQueryEntries.add(e);
                    testQueryVectors.add(v);
                }
                outputVector(out, v);
            }

            out.close();
        }

        int testedQty = 0, diffQty = 0;
        // Now let's do some testing
        for (int iq = 0; iq < testQueryEntries.size(); ++iq) {
            DocEntry queryEntry = testQueryEntries.get(iq);
            TrulySparseVector queryVector = testQueryVectors.get(iq);
            for (int id = 0; id < testDocEntries.size(); ++id) {
                DocEntry docEntry = testDocEntries.get(id);
                TrulySparseVector docVector = testDocVectors.get(id);
                float val1 = bm25simil.compute(queryEntry, docEntry);
                float val2 = TrulySparseVector.scalarProduct(queryVector, docVector);
                ++testedQty;
                if (Math.abs(val1 - val2) > 1e5) {
                    System.err.println(
                            String.format("Potential mismatch BM25=%f <-> scalar product=%f", val1, val2));
                    ++diffQty;
                }
            }
        }
        if (testedQty > 0)
            System.out.println(String.format("Tested %d Mismatched %d", testedQty, diffQty));

    } catch (ParseException e) {
        Usage("Cannot parse arguments: " + e, prmHlp != null ? prmHlp.getOptions() : null);
        e.printStackTrace();
    } catch (Exception e) {
        e.printStackTrace();
        System.err.println("Terminating due to an exception: " + e);
        System.exit(1);
    }
}

From source file:fr.eo.util.dumper.JSONDumper.java

/**
 * @param args main args//from ww  w  . ja  va  2 s .  c o  m
 */
public static void main(String[] args) {

    String appName = args[0];
    String jdbcConnectionType = args.length > 1 ? args[1] : "jtds";

    System.out.println("Starting dumper ...");

    try (Connection conn = getConnection(jdbcConnectionType)) {
        System.out.println("Getting database connection ...");

        List<RequestDefinitionBean> requests = RequestDefinitionParser.getRequests(appName);

        baseFolder = RequestDefinitionParser.getAppBaseDir(appName) + "/";

        System.out.println("Reading old table dumps...");
        Map<String, JsonTableDump> oldTables = JSONDeltaDumper.readOldTables(baseFolder);
        List<JsonTableDump> newTables = new ArrayList<>();

        for (RequestDefinitionBean request : requests) {
            try (Statement stmt = conn.createStatement()) {
                BufferedWriter bw = getWriter(request.name, baseFolder);

                if (!request.disabled) {
                    System.out.println("Dumping " + request.name + "...");
                    ResultSet rs = stmt.executeQuery(request.sql);
                    JsonTableWritable dump = new JsonTableWritable();
                    dump.name = request.table;
                    while (rs.next()) {
                        int pos = 0;
                        for (String fieldName : request.fields) {
                            Object obj = getFieldValue(request, pos, rs, fieldName);
                            dump.addColumn(obj);
                            pos++;
                        }
                        dump.commit();
                    }
                    bw.append(dump.toJson());
                    newTables.add(dump);
                    bw.flush();
                    bw.close();
                } else {
                    System.out.println("Skiping " + request.name + "...");
                }
            }

            System.out.println("done.");
        }

        newTables.addAll(BlueprintDumper.dump(baseFolder));
        newTables.addAll(TranslationsDumper.dump(baseFolder, jdbcConnectionType));

        System.out.println("Computing delta...");
        JSONDeltaDumper.computeDelta(oldTables, newTables, baseFolder);

    } catch (SQLException | ClassNotFoundException | IOException e) {
        e.printStackTrace();
    }
}

From source file:immf.EmojiUtil.java

public static void main(String[] args) {
    BufferedWriter br = null;
    try {//from  w  ww. j a v a 2 s.c  o m
        br = new BufferedWriter(new OutputStreamWriter(new FileOutputStream("./emoji.html"), "UTF-8"));

        br.write("<html>" + "<head>" + "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">"
                + "</head>" + "<body>" + ServerMain.Version + "<br><br>" + "<table>");
        br.write("<th>Unicode</th><th>?</th><th></th>");
        for (Emoji e : map.values()) {
            br.write("<tr>\n");
            br.write("    <td>" + String.format("0x%x", (int) e.getC()) + "</td>" + "<td><img src='"
                    + emojiToImageUrl(e.getC()) + "'></td>" + "<td>" + e.getLabel() + "</td>\n");
            br.write("</tr>\n");
        }
        br.write("</table></body></html>");
        br.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:main.Driver.java

/**
 * The path to a properties file which will supply parameter values for the tests should be passed in as argument 0 to main. 
 * The test that will be run is determined by the value of 'test_type' in the properties file, and each of the tests have their own properties:
 *      'encode+decode' - Encode and decode the given leadsheet with the autoencoder, writing the result to a leadsheet file.
 *              Params: //w  w w  .  jav  a  2s .  co  m
 *                  * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with}
 *                  * name_generator_connectome={the path to the connectome which the name generator will be loaded with}
 *                  * input_leadsheet={the path to the leadsheet file which will be encoded and decoded}
 *                  * output_folder={the path to the output folder which the result leadsheet file will be written in}
 * 
 *      'encode+write_queue' - Encode the given leadsheet with the autoencoder, then write the encoded feature queue to a queue file.
 *              Params:
 *                  * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with}
 *                  * input_leadsheet={the path to the leadsheet file which will be encoded}
 *                  * queue_folder={the path to the output folder which the result queue file will be written in}
 * 
 *      'encode+write_queue+decode' - Encode the given leadsheet with the autoencoder, write the encoded feature queue to a queue file, and then write the result leadsheet to a leadsheet file.
 *                  * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with}
 *                  * name_generator_connectome={the path to the connectome which the name generator will be loaded with}
 *                  * input_leadsheet={the path to the leadsheet file which will be encoded and decoded}
 *                  * queue_folder={the path to the output folder which the result queue file will be written in}
 *                  * output_folder={the path to the output folder which the result leadsheet file will be written in}
 *      'create_feature_property_vector' - Given a corpus folder of leadsheets, construct a vector consisting of property analysis values for each feature in the corpus data
 *                  * input_corpus_folder={the path to the corpus folder containing all leadsheets to analyze}
 *                  * feature_size={the size (in time steps) of each feature}
 *                  * feature_properties_path={the path to write the generated vector file to (the file will be a csv file containing all the values in left-to-right order}
 *                  * feature_property={the type of feature property to analyze - current options are 'rest', 'sustain', articulate' (these return ratios of time steps with the given property to the total time steps in the feature).
 *      'compile_feature_queue_matrix' - Given a corpus folder of feature queues, construct a matrix of all feature vectors and write it as a csv file
 *                  * queue_folder={the path to the folder containing all queue files to compile}
 *                  * feature_matrix_path={the path to write the result csv file to}
 *      'generate_from_feature_queue_matrix' - Given a matrix of feature vectors, load the autoencoder with a queue of those features and decode from it, writing the result leadsheet to a file
 *                  * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with}
 *                  * reference_leadsheet={the path to the leadsheet we will take the chord sequence from (and loop it to match the length of the feature queue)}
 *                  * feature_queue_matrix_path={the path to the feature queue matrix file we will decode from}
 *                  * output_file_path={the path to the file we will write our result leadsheet to}
 *                  * (optional) song_title={the song title to write in the leadsheet file - by default this is "Generation from Feature Matrix {path of the feature matrix}"}
 *                  * feature_size={the size (in time steps) of features}
 *      'population_trade' - Given a leadsheet file, split it into sections of a specified size, and between sections, generate a response that plays off of a population of previously encoded feature queues
 *                  * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with}
 *                  * input_leadsheet={the path to the leadsheet file which will be encoded and traded with}     
 *                  * output_folder={the path to the output folder which the result leadsheet file will be written in}
 *                  * trading_part_size={the size (in time steps) of each trading part. The input leadsheet will be split into sections of this size, and trading responses will be generated in between.}
 *                  * interpolation_variance={a random value between zero and this will be added to the interpolation_min at each trading section to calculate the interpolation of the recently encoded queue towards the queue population before decoding the trading response}
 *                  * interpolation_min={the minimum ratio of interpolation at each trading section}
 *                  * herding_strength={the maximum strength of the herding operation at each section (all queues in the population are interpolated a random amount towards the most recent queue)}
 *                  * mutation_strength={the maximum strength of mutation at each section (each element of the feature vectors of all queues in the population are mutated at a random strength}
 *                  * crossover_strength{the maximum strength of crossover at each section (there is a chance for every queue that the queue will swap a random feature of itself with the corresponding feature of another random queue)}
 *      'interpolation' - Given a leadsheet file and a reference queue file, encode the leadsheet file with the autoencoder, and generate from the encoded queue for a number of divisions of a full interpolation towards the target queue
 *                  * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with}
 *                  * input_leadsheet={the path to the leadsheet file which will be encoded and interpolated}
 *                  * target_queue={the path to the queue to interpolate towards at each interpolation value};
 *                  * output_folder={the path to the output folder which the result leadsheet file will be written in}
 *                  * num_interpolation_divisions={the number of divisions of the interpolation strength from 0.0 to 1.0 (the length of the result leadsheet will be equal to the length of the original times 1 + number of divisions, as the first section of the result leadsheet is for interpolation 0.0)}
 *      'frankenstein' - Given a primary queue, a reference leadsheet for chords, and a corpus of queue files, construct the result leadsheet from a series of randomly weighted interpolations of the primary queue towards the set of selected queues.
 *                  * autoencoder_connectome={the path to the connectome which the autoencoder will be loaded with}
 *                  * primary_queue_path={the path to the queue which will serve as the base for all of the queue combinations (which are the result of sequential interpolations instead of a weighted sum)}
 *                  * reference_leadsheet={the path to the leadsheet we will take the chord sequence from (and loop it to match the desired length of our output}
 *                  * queue_folder={the path to the folder containing all queue files we can select from}
 *                  * output_file_path={the path to the file we will write our result leadsheet to}
 *                  * num_reference_queues={the number of reference queues we will pick at random from the queue folder to sample from)
 *                  * num_combinations={the number of queue combinations to sample and create the result leadsheet from}
 *                  * interpolation_strength={the total magnitude of all interpolation operations for each combination}
 */
public static void main(String[] args) throws FileNotFoundException, IOException, ConfigurationException {
    FileBasedConfigurationBuilder<PropertiesConfiguration> builder = new FileBasedConfigurationBuilder<>(
            PropertiesConfiguration.class).configure(
                    new Parameters().properties().setFileName(args[0]).setThrowExceptionOnMissing(true)
                            .setListDelimiterHandler(new DefaultListDelimiterHandler(';'))
                            .setIncludesAllowed(false));
    Configuration config = builder.getConfiguration();

    LogTimer.initStartTime(); //start our logging timer to keep track of our execution time

    //switch statement to run the appropriate test
    switch (config.getString("test_type")) {
    case "encode+decode": {
        //load parameter values from config file
        String autoencoderConnectomePath = config.getString("autoencoder_connectome");
        String nameGeneratorConnectomePath = config.getString("name_generator_connectome");
        String inputLeadsheetPath = config.getString("input_leadsheet");
        String outputFolderPath = config.getString("output_folder");

        //initialize networks
        NameGenerator nameGenerator = initializeNameGenerator(nameGeneratorConnectomePath);
        ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false);

        //initialize input sequences and output sequence
        LeadsheetDataSequence inputSequence = leadsheetToSequence(inputLeadsheetPath);
        LeadsheetDataSequence outputSequence = inputSequence.copy();
        outputSequence.clearMelody();
        LeadsheetDataSequence decoderInputSequence = outputSequence.copy();

        //encode and decode
        encodeFromSequence(autoencoder, inputSequence);
        decodeToSequence(autoencoder, outputSequence, decoderInputSequence);

        //generate song title
        String songTitle = nameGenerator.generateName();

        //write output to specified directory with same file name + _aeOutput suffix
        writeLeadsheetFile(outputSequence, outputFolderPath, new File(inputLeadsheetPath).getName(),
                "_aeOutput", songTitle);
    }
        break;

    case "encode+write_queue": {
        //load parameter values from config file
        String autoencoderConnectomePath = config.getString("autoencoder_connectome");
        String inputLeadsheetPath = config.getString("input_leadsheet");
        String queueFolderPath = config.getString("queue_folder");

        //initialize network
        ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false);

        //initialize input sequence
        LeadsheetDataSequence inputSequence = leadsheetToSequence(inputLeadsheetPath);

        //encode
        encodeFromSequence(autoencoder, inputSequence);
        //write to a queue file in the specified queue folder (the write method will handle removing/adding extensions
        writeQueueFile(autoencoder, queueFolderPath, new File(inputLeadsheetPath).getName());
    }
        break;
    case "encode+write_queue+decode": {
        //load parameter values from config file
        String autoencoderConnectomePath = config.getString("autoencoder_connectome");
        String nameGeneratorConnectomePath = config.getString("name_generator_connectome");
        String inputLeadsheetPath = config.getString("input_leadsheet");
        String queueFolderPath = config.getString("queue_folder");
        String outputFolderPath = config.getString("output_folder");

        //initialize networks
        NameGenerator nameGenerator = initializeNameGenerator(nameGeneratorConnectomePath);
        ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false);

        //initialize input sequences and output sequence
        LeadsheetDataSequence inputSequence = leadsheetToSequence(inputLeadsheetPath);
        LeadsheetDataSequence outputSequence = inputSequence.copy();
        outputSequence.clearMelody();
        LeadsheetDataSequence decoderInputSequence = outputSequence.copy();

        //encode
        encodeFromSequence(autoencoder, inputSequence);
        //write to a queue file in the specified queue folder (the write method will handle removing/adding extensions
        writeQueueFile(autoencoder, queueFolderPath, new File(inputLeadsheetPath).getName());
        //decode
        decodeToSequence(autoencoder, outputSequence, decoderInputSequence);

        //generate song title
        String songTitle = nameGenerator.generateName();

        //write output to specified directory with same file name + _aeOutput suffix
        writeLeadsheetFile(outputSequence, outputFolderPath, new File(inputLeadsheetPath).getName(),
                "_aeOutput", songTitle);
    }
        break;
    case "create_feature_property_vector": {
        //load parameter values from config file
        String inputCorpusFolder = config.getString("input_corpus_folder");
        int featureSize = config.getInt("feature_size");
        String featurePropertiesPath = config.getString("feature_properties_path");
        String featureProperty = config.getString("feature_property");

        //compile array of valid leadsheet files
        File[] songFiles = new File(inputCorpusFolder)
                .listFiles((File dir, String name) -> name.endsWith(".ls"));

        //construct feature property vector from analyzed feature property values of all songs
        AVector featurePropertyValues = Vector.createLength(0);
        int featureIndex = 0;
        for (File inputFile : songFiles) {
            LeadsheetDataSequence melodySequence = leadsheetToSequence(inputFile.getPath());
            featurePropertyValues.join(melodyFeatureAnalysis(melodySequence, featureProperty, featureSize));
        }

        //write generated feature_properties
        BufferedWriter writer = new BufferedWriter(
                new FileWriter(featurePropertiesPath + "_" + featureProperty + ".v"));
        writer.write(ReadWriteUtilities.getNumpyCSVString(featurePropertyValues));
        writer.close();
    }
        break;
    case "compile_feature_queue_matrix": {
        //load parameter values from config file
        String queueFolderPath = config.getString("queue_folder");
        String featureMatrixPath = config.getString("feature_matrix_path");

        //generate feature matrix from all feature queues in specified queue folder
        File[] queueFiles = new File(queueFolderPath).listFiles((File dir, String name) -> name.endsWith(".q"));
        AMatrix totalFeatureMatrix = generateFeatureQueueMatrix(queueFiles);
        String writeData = ReadWriteUtilities.getNumpyCSVString(totalFeatureMatrix);
        BufferedWriter writer = new BufferedWriter(new FileWriter(featureMatrixPath));
        writer.write(writeData);
        writer.close();
    }
        break;
    case "generate_from_feature_queue_matrix": {
        //load parameter values from config file
        String autoencoderConnectomePath = config.getString("autoencoder_connectome");
        String referenceLeadsheetPath = config.getString("reference_leadsheet");
        String featureQueueMatrixPath = config.getString("feature_queue_matrix_path");
        String outputFilePath = config.getString("output_file_path");
        String songTitle = config.getString("song_title",
                "Generation from Feature Matrix " + featureQueueMatrixPath);
        int featureSize = config.getInt("feature_size");

        //initialize network
        ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false);

        //initialize chord sequence
        LeadsheetDataSequence chordSequence = leadsheetToSequence(referenceLeadsheetPath);
        chordSequence.clearMelody();

        //call generation method
        generateFromFeatureMatrix(autoencoder, autoencoderConnectomePath, chordSequence, featureQueueMatrixPath,
                featureSize, outputFilePath, songTitle);
    }
        break;
    case "population_trade": {
        //load parameter values from config file
        String autoencoderConnectomePath = config.getString("autoencoder_connectome");
        String inputLeadsheetPath = config.getString("input_leadsheet");
        String outputFolderPath = config.getString("output_folder");
        int tradingPartSize = config.getInt("trading_part_size");
        double interpVariance = config.getDouble("interpolation_variance");
        double interpMin = config.getDouble("interpolation_min");
        double herdingStrength = config.getDouble("herding_strength");
        double mutationStrength = config.getDouble("mutation_strength");
        double crossoverStrength = config.getDouble("crossover_strength");

        //initialize network
        ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, true);

        //perform population trading test
        populationTradingTest(autoencoder, autoencoderConnectomePath, new File(inputLeadsheetPath),
                new File(outputFolderPath), tradingPartSize, interpVariance, interpMin, herdingStrength,
                mutationStrength, crossoverStrength);
    }
        break;
    case "interpolation": {
        //load parameter values from config file
        String autoencoderConnectomePath = config.getString("autoencoder_connectome");
        String inputLeadsheetPath = config.getString("input_leadsheet");
        String targetQueuePath = config.getString("target_queue");
        String outputFolderPath = config.getString("output_folder");
        int numInterpolationDivisions = config.getInt("num_interpolation_divisions");

        //initialize network
        ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false);

        //perform the interpolation test
        interpolateTest(autoencoder, autoencoderConnectomePath, new File(inputLeadsheetPath),
                new File(targetQueuePath), new File(outputFolderPath), numInterpolationDivisions);
    }
        break;
    case "frankenstein": {
        //load parameter values from config file
        String autoencoderConnectomePath = config.getString("autoencoder_connectome");
        String primaryQueuePath = config.getString("primary_queue_path");
        String referenceLeadsheetPath = config.getString("reference_leadsheet");
        String queueFolderPath = config.getString("queue_folder");
        String outputFilePath = config.getString("output_file_path");
        int numReferenceQueues = config.getInt("num_reference_queues");
        int numCombinations = config.getInt("num_combinations");
        double interpolationMagnitude = config.getDouble("interpolation_strength");

        //initialize network
        ProductCompressingAutoencoder autoencoder = initializeAutoencoder(autoencoderConnectomePath, false);

        //initialize chord sequence
        LeadsheetDataSequence chordSequence = leadsheetToSequence(referenceLeadsheetPath);
        chordSequence.clearMelody();

        //perform frankenstein test
        frankensteinTest(autoencoder, autoencoderConnectomePath, primaryQueuePath, new File(queueFolderPath),
                outputFilePath, chordSequence, numReferenceQueues, numCombinations, interpolationMagnitude);
    }
        break;
    default:
        throw new RuntimeException("Unrecognized test type");
    }
    LogTimer.log("Process finished"); //Done!
}

From source file:com.idega.util.Stripper.java

public static void main(String[] args) {
    // Stripper stripper1 = new Stripper();

    if (args.length != 2) {
        System.err.println("Auli.  tt a hafa tvo parametra me essu, innskr og tskr");

        return;//from  w  w  w  .j  a  v  a 2  s.c o  m
    }

    BufferedReader in = null;
    BufferedWriter out = null;

    try {
        in = new BufferedReader(new FileReader(args[0]));
    } catch (java.io.FileNotFoundException e) {
        System.err.println("Auli. Error : " + e.toString());

        return;
    }

    try {
        out = new BufferedWriter(new FileWriter(args[1]));
    } catch (java.io.IOException e) {
        System.err.println("Auli. Error : " + e.toString());
        IOUtils.closeQuietly(in);
        return;
    }

    try {
        String input = in.readLine();
        int count = 0;
        while (input != null) {
            int index = input.indexOf("\\CVS\\");
            if (index > -1) {
                System.out.println("Skipping : " + input);
                count++;
            } else {
                out.write(input);
                out.newLine();
            }

            input = in.readLine();
        }
        System.out.println("Skipped : " + count);
    } catch (java.io.IOException e) {
        System.err.println("Error reading or writing file : " + e.toString());
    }

    try {
        in.close();
        out.close();
    } catch (java.io.IOException e) {
        System.err.println("Error closing files : " + e.toString());
    }
}

From source file:featureExtractor.popularityMeasure.java

public static void main(String[] args) throws IOException {
    //ReadKnownPopularityScores();
    FileWriter fw = new FileWriter(Out_resultFile);
    BufferedWriter bw = new BufferedWriter(fw);

    FileReader inputFile = new FileReader(In_entities);
    BufferedReader bufferReader = new BufferedReader(inputFile);
    String line;/* ww  w.  j  a  v a  2  s .  c om*/
    while ((line = bufferReader.readLine()) != null) {
        String[] row = line.split("\t");
        double score = 0;
        String entityName = row[0].toLowerCase().trim();
        System.out.println("Searching for : " + entityName);
        if (knownScore_table.containsKey(entityName)) {
            //System.out.println("Already known for: " + entityName);
            score = knownScore_table.get(entityName);
        } else {
            System.out.println("Not known for: " + entityName);
            String json = searchTest(entityName, "&scoring=entity");
            try {
                score = ParseJSON_getScore(json);
            } catch (Exception e) {
                score = 0;
            }
            System.out.println("Putting : " + entityName);
            knownScore_table.put(entityName, score);
        }
        bw.write(row[0] + "\t" + score + "\n");
        System.out.println(row[0]);
    }
    bw.close();
}