Example usage for org.apache.commons.io FileUtils readLines

List of usage examples for org.apache.commons.io FileUtils readLines

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils readLines.

Prototype

public static List readLines(File file) throws IOException 

Source Link

Document

Reads the contents of a file line by line to a List of Strings using the default encoding for the VM.

Usage

From source file:common.ReverseWordsCount.java

public static void main(String[] args) throws IOException {
    List<String> readLines = FileUtils.readLines(new File("G:\\\\LTNMT\\LTNMT\\sougou\\sougou2500.txt"));
    Map<String, Integer> words = new HashMap<>();

    for (String line : readLines) {
        String[] split = line.split(" ");
        for (String wprd : split) {
            Integer get = words.get(wprd);
            if (get == null) {
                words.put(wprd, 1);/*from  w w  w .  j a  v a  2  s. co m*/
            } else {
                words.put(wprd, get + 1);
            }
        }
    }
    Set<Map.Entry<String, Integer>> entrySet = words.entrySet();
    List<Map.Entry<String, Integer>> reverseLists = new ArrayList<>(entrySet);
    Collections.sort(reverseLists, new Comparator<Map.Entry<String, Integer>>() {
        @Override
        public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
            return o2.getValue().compareTo(o1.getValue());
        }
    });
    PrintStream ps = new PrintStream("c:/reverseWords.txt");
    for (Map.Entry<String, Integer> teEntry : reverseLists) {
        ps.println(teEntry.getKey() + " " + teEntry.getValue());
    }
    ps.close();
}

From source file:eu.europeana.datamigration.ese2edm.LogCleaner.java

public static void main(String[] args) {
    try {//from w  ww. ja  v a2  s  .  co  m
        List<String> lines = FileUtils.readLines(new File("/home/gmamakis/test.log"));
        List<String> newLines = new ArrayList<String>();
        for (String line : lines) {
            if (!line.startsWith("Apr")) {
                newLines.add(line);
            }
        }
        FileUtils.writeLines(new File("/home/gmamakis/rdfslabel.log"), newLines);
    } catch (IOException ex) {
        Logger.getLogger(LogCleaner.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:de.tudarmstadt.ukp.dkpro.keyphrases.wikipediafilter.filter.util.FileFilter.java

public static void main(String[] args) throws IOException {
    Set<String> articles = new HashSet<String>();
    articles.addAll(FileUtils.readLines(new File("target/wikipedia/articles.txt")));

    BufferedWriter writer = new BufferedWriter(new FileWriter(new File("target/passwords.txt.filtered")));
    for (String line : FileUtils.readLines(new File("target/passwords.txt"))) {
        if (articles.contains(line.split("\t")[0].toLowerCase())) {
            writer.write(line);// w  w  w.ja  v a  2s. c o m
            writer.newLine();
        }
        writer.close();
    }
}

From source file:edu.illinois.cs.cogcomp.wikifier.utils.freebase.cleanDL.java

public static void main(String[] args) throws IOException {
    List<String> lines = FileUtils.readLines(new File("/Users/Shyam/mention.eval.dl"));

    for (String line : lines) {
        String[] parts = line.split("\\s+");
        System.out.println(parts[0] + parts[1] + parts[2]);
        StringBuilder sb = new StringBuilder();
        for (int i = 3; i < parts.length; i++)
            sb.append(parts[i] + " ");
        if (mentionFilter(parts)) {
            System.out.println("removing " + Arrays.asList(parts));
            continue;
        }/*from   w  w w .  j a v  a2s . c  om*/
        if (mentions.containsKey(parts[0])) {
            mentions.get(parts[0]).add(new DocMention(parts[0], sb.toString(), Integer.parseInt(parts[1]),
                    Integer.parseInt(parts[2])));
        } else {
            mentions.put(parts[0], new ArrayList<DocMention>());
            mentions.get(parts[0]).add(new DocMention(parts[0], sb.toString(), Integer.parseInt(parts[1]),
                    Integer.parseInt(parts[2])));
        }
    }
    for (String doc : mentions.keySet()) {
        handleDoc(mentions.get(doc));
    }

    outputMentions();
}

From source file:bixo.tools.RunUrlNormalizerTool.java

/**
 * @param args/*from   www  .j av  a 2  s. co m*/
 */
public static void main(String[] args) {
    String curUrl = null;

    try {
        List<String> lines = FileUtils.readLines(new File(args[0]));

        BaseUrlNormalizer urlNormalizer = new SimpleUrlNormalizer();
        for (String url : lines) {
            curUrl = url;
            String normalized = urlNormalizer.normalize(curUrl);
            if (!normalized.equalsIgnoreCase(curUrl)) {
                System.out.println(curUrl + " ==> " + normalized);
            }
        }
    } catch (Throwable t) {
        System.err.println("Exception while processing URLs: " + t.getMessage());
        System.err.println("Current url: " + curUrl);
        t.printStackTrace(System.err);
        System.exit(-1);
    }
}

From source file:jsonparser.ToJSON.java

public static void main(String args[]) throws FileNotFoundException, IOException {
    String text_file = "C:/Users/Kevin/Documents/NetBeansProjects/JsonParser/src/jsonparser/sample.txt";
    File file = new File(text_file);
    String s1, s2, s3;/*from  w w w . j a v a  2s  . c om*/

    s1 = (String) FileUtils.readLines(file).get(0);
    String split1[] = s1.split("=");
    contact_id = split1[1];

    s2 = (String) FileUtils.readLines(file).get(1);
    String split2[] = s2.split("=");
    confidence_level = Float.valueOf(split2[1]);

    s3 = (String) FileUtils.readLines(file).get(2);
    String split3[] = s3.split("=");
    if (split3[1].equals(" Found")) {
        is_matched = true;
    } else {
        is_matched = false;
    }

    System.out.println("Read from text file:");
    System.out.println("contact_id =" + contact_id);
    System.out.println("confidence_level = " + confidence_level);
    System.out.println("is_matched = " + is_matched);

    FacialRecognition fr = new FacialRecognition();
    fr.setContactID(contact_id);
    fr.setConfidenceLevel(confidence_level);
    fr.setIsMatched(is_matched);
    Gson gson = new GsonBuilder().setPrettyPrinting().create();

    //convert java object to JSON format
    String json = gson.toJson(fr);

    //write JSON to a file
    try {
        //write converted json data to a file named "CountryGSON.json"  
        FileWriter writer = new FileWriter(
                "C:/Users/Kevin/Documents/NetBeansProjects/JsonParser/src/jsonparser/test.json", true);
        writer.write("" + json + ",\n");
        writer.close();

    } catch (IOException e) {
        e.printStackTrace();
    }

    //eventually need to change to send over back to client-side
    System.out.println();
    System.out.println("Coverting strings into JSON...");
    System.out.println(json);

}

From source file:com.ifeng.sorter.NginxApp.java

public static void main(String[] args) {

    String input = "src/test/resources/data/nginx_report.txt";

    PrintWriter pw = null;/*from www .ja v a2 s  .  co m*/

    Map<String, List<LogBean>> resultMap = new HashMap<String, List<LogBean>>();
    List<String> ips = new ArrayList<String>();

    try {
        List<String> lines = FileUtils.readLines(new File(input));
        List<LogBean> items = new ArrayList<LogBean>();

        for (String line : lines) {
            String[] values = line.split("\t");

            if (values != null && values.length == 3) {// ip total seria
                try {
                    String ip = values[0].trim();
                    String total = values[1].trim();
                    String seria = values[2].trim();

                    LogBean bean = new LogBean(ip, Integer.parseInt(total), seria);

                    items.add(bean);

                } catch (NumberFormatException e) {
                    e.printStackTrace();
                }
            }
        }

        Collections.sort(items);

        for (LogBean bean : items) {
            String key = bean.getIp();

            if (resultMap.containsKey(key)) {
                resultMap.get(key).add(bean);
            } else {
                List<LogBean> keyList = new ArrayList<LogBean>();
                keyList.add(bean);
                resultMap.put(key, keyList);

                ips.add(key);
            }
        }

        pw = new PrintWriter("src/test/resources/output/result.txt", "UTF-8");

        for (String ip : ips) {
            List<LogBean> list = resultMap.get(ip);

            for (LogBean bean : list) {
                pw.append(bean.toString());
                pw.println();
            }
        }

    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        pw.flush();
        pw.close();
    }
}

From source file:com.l2jfree.tools.ProjectSettingsSynchronizer.java

public static void main(String[] args) throws IOException {
    final File src = new File(".").getCanonicalFile();
    System.out.println("Copying from: " + src);
    System.out.println();/*  w  w w . j av  a2  s .  c o  m*/

    final List<File> destinations = new ArrayList<File>();
    for (File dest : src.getParentFile().listFiles()) {
        if (dest.isHidden() || !dest.isDirectory())
            continue;

        destinations.add(dest);
        System.out.println("Copying to: " + dest);
    }
    System.out.println();

    // .project
    System.out.println(".project");
    System.out.println("================================================================================");
    {
        final List<String> lines = FileUtils.readLines(new File(src, ".project"));

        for (File dest : destinations) {
            lines.set(2, lines.get(2).replaceAll(src.getName(), dest.getName()));
            writeLines(dest, ".project", lines);
            lines.set(2, lines.get(2).replaceAll(dest.getName(), src.getName()));
        }
    }
    System.out.println();

    // .classpath
    System.out.println(".classpath");
    System.out.println("================================================================================");
    {
        final List<String> lines = FileUtils.readLines(new File(src, ".classpath"));

        for (File dest : destinations) {
            if (dest.getName().endsWith("-main") || dest.getName().endsWith("-datapack")) {
                final ArrayList<String> tmp = new ArrayList<String>();

                for (String line : lines)
                    if (!line.contains("classpathentry"))
                        tmp.add(line);

                writeLines(dest, ".classpath", tmp);
                continue;
            }

            writeLines(dest, ".classpath", lines);
        }
    }
    System.out.println();

    // .settings
    System.out.println(".settings");
    System.out.println("================================================================================");
    for (File settingsFile : new File(src, ".settings").listFiles()) {
        if (settingsFile.getName().endsWith(".prefs")) {
            System.out.println(".settings/" + settingsFile.getName());
            System.out.println(
                    "--------------------------------------------------------------------------------");

            final List<String> lines = FileUtils.readLines(settingsFile);

            if (lines.get(0).startsWith("#"))
                lines.remove(0);

            for (File dest : destinations) {
                writeLines(new File(dest, ".settings"), settingsFile.getName(), lines);
            }
            System.out.println();
        }
    }
    System.out.println();
}

From source file:edu.bigdata.training.fileformats.compress.SequenceFileWriter.java

public static void main(String[] args) throws IOException {
    String uri = "output";
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path path = new Path(uri);
    IntWritable key = new IntWritable();
    Text value = new Text();
    File infile = new File("src/main/resources/input.txt");
    SequenceFile.Writer writer = null;
    try {//from w w  w  . j  a  v  a 2  s .  c  o  m
        writer = SequenceFile.createWriter(conf, Writer.file(path), Writer.keyClass(key.getClass()),
                Writer.valueClass(value.getClass()),
                Writer.bufferSize(fs.getConf().getInt("io.file.buffer.size", 4096)),
                Writer.replication(fs.getDefaultReplication()), Writer.blockSize(1073741824),
                Writer.compression(SequenceFile.CompressionType.BLOCK, new DefaultCodec()),
                Writer.progressable(null), Writer.metadata(new Metadata()));
        int ctr = 100;
        List<String> lines = FileUtils.readLines(infile);
        for (String line : lines) {
            key.set(ctr++);
            value.set(line);
            if (ctr < 150) {
                System.out.printf("[%s]\t%s\t%s\n", writer.getLength(), key, value);
            }
            writer.append(key, value);
        }
    } finally {
        IOUtils.closeStream(writer);
    }
}

From source file:eu.annocultor.analyzers.SolrPropertyHitsAnalyzer.java

/**
 * @param args/*from  ww  w.j  a  va 2s .  co  m*/
 */
public static void main(String[] args) throws Exception {

    String solrUrl = args[0];
    SolrServer solr = new CommonsHttpSolrServer(solrUrl);

    String prefixOne = args[1];
    String prefixTwo = args[2];

    long prefixOneCount = 0;
    long prefixTwoCount = 0;

    long totalPassedCount = 0;

    for (File logLocation : FileUtils.listFiles(new File(args[3]), null, true)) {
        System.out.println("Parsing " + logLocation);

        for (String line : FileUtils.readLines(logLocation)) {
            if (StringUtils.contains(line, "FULL_RESULT_HMTL")) {
                line = StringUtils.substringAfter(line, "europeana_uri=");
                String solrDocumentId = StringUtils.substringBefore(line, ",");
                String query = extractQuery(line);
                if (StringUtils.startsWith(solrDocumentId, "http://") && isLongEnoughToCount(query)) {

                    SolrQuery solrQuery = new SolrQuery("europeana_uri:\"" + solrDocumentId + "\"");
                    QueryResponse response = solr.query(solrQuery);
                    SolrDocumentList sourceDocs = response.getResults();
                    if (sourceDocs.isEmpty()) {
                        System.out.println("Could not find object " + solrDocumentId);
                    } else {
                        SolrDocument document = sourceDocs.get(0);

                        if (hasWord(document, prefixOne, query)) {
                            prefixOneCount++;
                        } else {
                            if (hasWord(document, prefixTwo, query)) {
                                prefixTwoCount++;
                            }
                        }
                    }
                }
                totalPassedCount++;
            }
        }
        System.out.println(prefixOne + " : " + prefixOneCount + " " + prefixTwo + " : " + prefixTwoCount
                + " of total passed entries " + totalPassedCount);
    }
}