Example usage for org.apache.commons.io.filefilter SuffixFileFilter SuffixFileFilter

List of usage examples for org.apache.commons.io.filefilter SuffixFileFilter SuffixFileFilter

Introduction

In this page you can find the example usage for org.apache.commons.io.filefilter SuffixFileFilter SuffixFileFilter.

Prototype

public SuffixFileFilter(List suffixes) 

Source Link

Document

Constructs a new Suffix file filter for a list of suffixes.

Usage

From source file:org.netxilia.api.impl.utils.DynamicClassLoader.java

public DynamicClassLoader() {
    try {//from  ww  w. ja va2s .  c  o m
        File home = new File(System.getProperty("user.home"), "netxilia");
        if (!home.exists()) {
            if (!home.mkdir()) {
                log.error("Could not create Netxilia storage directory:" + home);
                return;
            }
        }
        addFile(home);
        for (File jar : home.listFiles((FilenameFilter) new SuffixFileFilter(".jar"))) {
            addFile(jar);
        }
    } catch (IOException e) {
        log.error("ERROR:" + e);
    }

}

From source file:org.onesec.raven.ivr.impl.ExternalTextToSpeechEngineNode.java

private void readCache() throws Exception {
    File cacheFile = new File(cacheDir);
    if (!cacheFile.exists() || !cacheFile.isDirectory() || !cacheFile.canRead() || !cacheFile.canWrite())
        throw new Exception(String.format("Not valid path for cache (%s). It's not a directory or "
                + "file not exists or can't read/write to this directory", cacheDir));
    File[] textFiles = cacheFile.listFiles((FilenameFilter) new SuffixFileFilter(".txt"));
    if (textFiles != null && textFiles.length > 0)
        for (File textFile : textFiles) {
            String text = FileUtils.readFileToString(textFile, textFileEncoding.name());
            String wavFileName = FilenameUtils.removeExtension(textFile.getPath()) + ".wav";
            File wavFile = new File(wavFileName);
            if (!(wavFile.exists())) {
                if (isLogLevelEnabled(LogLevel.ERROR))
                    getLogger().error("Found text file ({}) but not found WAV file ({}). "
                            + "Removing invalid cached entry", textFile.getPath(), wavFileName);
                textFile.delete();/*from w w w  . j  av a2 s . c o m*/
            } else
                cache.put(text, new FileInputStreamSource(wavFile, this));
        }
}

From source file:org.opensextant.howler.test.OWL2Text.java

public static void main(String[] args) throws IOException {

    File inputDirsFile = new File(args[0]);
    File resultsDir = new File(args[1]);

    FileStructure outFormat = FileStructure.DOCUMENT_PER_LINE;

    try {/*from   w ww .ja  v  a 2 s .  com*/
        outFormat = FileStructure.valueOf(args[2]);
    } catch (Exception e) {
        System.err.println("Bad value for format:" + args[2]);
    }

    boolean ignoreBadImports = Boolean.valueOf(args[3]);

    // the to and from converters
    FromOWL fromOWL = new FromOWL(ignoreBadImports);
    ToText toText = new ToText();

    fromOWL.setRewriteAllAsSubclass(false);
    fromOWL.setMaxPairs(-1);
    fromOWL.setFlattenSingleSet(false);
    fromOWL.setNegNormal(false);

    List<String> ontoDirs = FileUtils.readLines(inputDirsFile, "UTF-8");

    File results = new File(resultsDir, "sentences.txt");
    File byOntoDir = new File(resultsDir, "byOnto");
    byOntoDir.mkdirs();

    FileUtils.writeStringToFile(results, "OntoFile\tAxiomType\tAxiom\tText\n", "UTF-8", false);

    File baseOntoTestDir = inputDirsFile.getParentFile();
    // File to the total set of Words seen
    File wDump = new File(resultsDir, "wordDump.txt");

    for (String ontoDir : ontoDirs) {

        // skip comments
        if (ontoDir.startsWith("#") || ontoDir.trim().isEmpty()) {
            continue;
        }

        File inputDir = new File(baseOntoTestDir, ontoDir);

        // find all the ontology files in the input dir
        String[] exts = { ".owl", ".ttl", ".rdf" };
        FilenameFilter filter = new SuffixFileFilter(exts);
        File[] ontos = inputDir.listFiles(filter);

        for (File ontoFile : ontos) {
            System.out.println("Converting " + ontoFile);

            TextDocument backText = toText.convert(fromOWL.convertOWL(ontoFile));

            // create a name from the input ontology file
            String ontoNameBase = ontoFile.getName().split("\\.")[0];
            File outText = new File(byOntoDir, ontoNameBase + ".txt");
            FileUtils.writeStringToFile(outText, backText.toString(outFormat, true), "UTF-8");
        }
    }

    // dump all words seen during all ontology conversions
    WordManager.getWordManager().dumpWordsToFile(wDump);
}

From source file:org.opensextant.howler.test.OWL2Text2OWLTest.java

public static void main(String[] args) throws IOException, OWLOntologyCreationException {

    File inputDirsFile = new File(args[0]);
    File resultsDir = new File(args[1]);
    boolean ignoreBadImports = Boolean.valueOf(args[2]);
    File resourceDir = new File(args[3]);

    File posDir = new File(resourceDir, "pos");

    File lexFile = new File(posDir, "lexicon.txt");
    File gramFile = new File(posDir, "ngrams.txt");
    File typeInfoFile = new File(resourceDir, "typeInfo.txt");
    File phraseFile = new File(resourceDir, "phrases.txt");

    // the to and from converters
    ToOWL toOWL = new ToOWL();
    toOWL.setUseHasValue(false);//from  ww  w.  j av  a 2  s  .c o m

    FromOWL fromOWL = new FromOWL(ignoreBadImports);
    fromOWL.setRewriteAllAsSubclass(false);
    fromOWL.setMaxPairs(-1);
    fromOWL.setFlattenSingleSet(false);
    fromOWL.setNegNormal(false);

    ToText toText = new ToText();
    FromText fromText = new FromText(lexFile, gramFile, typeInfoFile, phraseFile);

    List<String> ontoDirs = FileUtils.readLines(inputDirsFile, "UTF-8");

    // create and write header to Summary file
    File summary = new File(resultsDir, "Summary.txt");
    FileUtils.writeStringToFile(summary,
            "Ontology File\tOntology Name\tVersion\tAxiom (Missing)\tAxiom (Extra)\t Axiom (NYI)" + "\n",
            "UTF-8", false);

    File baseOntoTestDir = inputDirsFile.getParentFile();
    // File to the total set of Words seen

    File totalResults = new File(resultsDir, "total_AxiomCompare.txt");

    FileUtils.writeStringToFile(totalResults, "Ontology Name\tStatus\tAxiom Type\tAxiom" + "\n", "UTF-8",
            false);
    for (String ontoDir : ontoDirs) {

        // skip comments
        if (ontoDir.startsWith("#") || ontoDir.trim().isEmpty()) {
            continue;
        }

        File inputDir = new File(baseOntoTestDir, ontoDir);

        // find all the ontology files in the input dir
        String[] exts = { ".owl", ".ttl", ".rdf" };
        FilenameFilter filter = new SuffixFileFilter(exts);
        File[] ontos = inputDir.listFiles(filter);

        File results = new File(resultsDir, inputDir.getName() + "_AxiomCompare.txt");
        File wDump = new File(resultsDir, inputDir.getName() + "_wordDump.txt");
        // write header to result files
        FileUtils.writeStringToFile(results, "Ontology Name\tStatus\tAxiom Type\tAxiom" + "\n", "UTF-8", false);
        for (File ontoFile : ontos) {
            String ontoName = ontoFile.getName();

            System.out.println();
            System.out.println("Loading Ontology\t" + ontoFile);
            OWLOntology originalOnto = fromOWL.loadOWL(ontoFile);

            // expand the nary axioms to pairwise (which is what the
            // conversion does)
            expandNary(originalOnto);
            // convert ontology to text and back to ontology
            OWLOntology backOnto = toOWL
                    .convert(fromText.convertText(toText.convert(fromOWL.convertOWL(originalOnto))));
            OWLOntologyID origID = originalOnto.getOntologyID();

            int axiomErrorsMissing = 0;
            int axiomErrorsExtra = 0;
            int axiomErrorsNYI = 0;
            // compare axioms

            // compare each axiom in original to see if it appears in back
            // converted ontology
            for (OWLAxiom originalAx : OWLAPIStreamUtils.asList(originalOnto.axioms())) {
                if (!compare(backOnto, originalAx)) {

                    String axString = originalAx.toString().replaceAll("[\\n\\r\\t]", "<WHITE_SPACE>");
                    if (nyi(originalAx)) {
                        axiomErrorsNYI++;
                        FileUtils.writeStringToFile(results, ontoName + "\t" + "NYI" + "\t"
                                + originalAx.getAxiomType() + "\t" + axString + "\n", "UTF-8", true);
                        FileUtils.writeStringToFile(totalResults, ontoName + "\t" + "NYI" + "\t"
                                + originalAx.getAxiomType() + "\t" + axString + "\n", "UTF-8", true);
                    } else {
                        axiomErrorsMissing++;
                        FileUtils.writeStringToFile(results, ontoName + "\t" + "Missing" + "\t"
                                + originalAx.getAxiomType() + "\t" + axString + "\n", "UTF-8", true);
                        FileUtils.writeStringToFile(totalResults, ontoName + "\t" + "Missing" + "\t"
                                + originalAx.getAxiomType() + "\t" + axString + "\n", "UTF-8", true);
                    }
                }
            }

            // compare each axiom in back converted ontology to see if it
            // appears in original ontology
            for (OWLAxiom backAx : OWLAPIStreamUtils.asList(backOnto.axioms())) {
                if (!compare(originalOnto, backAx)) {
                    axiomErrorsExtra++;
                    String axString = backAx.toString().replaceAll("[\\n\\r\\t]", "<WHITE_SPACE>");
                    FileUtils.writeStringToFile(results,
                            ontoName + "\t" + "Extra" + "\t" + backAx.getAxiomType() + "\t" + axString + "\n",
                            "UTF-8", true);
                    FileUtils.writeStringToFile(totalResults,
                            ontoName + "\t" + "Extra" + "\t" + backAx.getAxiomType() + "\t" + axString + "\n",
                            "UTF-8", true);
                }
            }

            // write the summary
            FileUtils.writeStringToFile(summary,
                    ontoFile + "\t" + origID.getOntologyIRI().orElse(null) + "\t"
                            + origID.getVersionIRI().orElse(null) + "\t" + axiomErrorsMissing + "\t"
                            + axiomErrorsExtra + "\t" + axiomErrorsNYI + "\n",
                    "UTF-8", true);
        }
        // dump the vocabulary from the WordManager
        WordManager.getWordManager().dumpWordsToFile(wDump);
        WordManager.getWordManager().reset();
    }

}

From source file:org.opensextant.howler.test.OWLTest.java

public static void main(String[] args) throws IOException, OWLOntologyCreationException {

    File inputDirsFile = new File(args[0]);
    File resultsDir = new File(args[1]);
    boolean ignoreBadImports = Boolean.valueOf(args[2]);

    // the to and from converters
    FromOWL fromOWL = new FromOWL(ignoreBadImports);
    ToOWL toOWL = new ToOWL();
    // use same manager for both to and from
    toOWL.setOwlOntologyManager(fromOWL.getOntologyManager());

    // expand n-ary axioms including all pairs
    fromOWL.setMaxPairs(-1);/*from ww  w.ja  v  a  2 s  .c om*/

    // rewrite all axioms as subclass axioms?
    fromOWL.setRewriteAllAsSubclass(false);

    fromOWL.setFlattenSingleSet(false);

    // convert axiom to Negation Normal Form
    fromOWL.setNegNormal(false);

    // if only one individual in set, use HasValue not SomeValuesFrom
    toOWL.setUseHasValue(false);

    List<String> ontoDirs = FileUtils.readLines(inputDirsFile, "UTF-8");

    // create and write header to Summary file
    File summary = new File(resultsDir, "Summary.txt");
    FileUtils.writeStringToFile(summary,
            "Ontology File\tOntology Name\tVersion\tAxiom (Missing)\tAxiom (Extra)\t Axiom (NYI)" + "\n",
            "UTF-8", false);

    File baseOntoTestDir = inputDirsFile.getParentFile();
    // File to the total set of Words seen
    File wDump = new File(resultsDir, "wordDump.txt");

    for (String ontoDir : ontoDirs) {

        // skip comments
        if (ontoDir.startsWith("#") || ontoDir.trim().isEmpty()) {
            continue;
        }

        File inputDir = new File(baseOntoTestDir, ontoDir);

        // find all the ontology files in the input dir
        String[] exts = { ".owl", ".ttl", ".rdf" };
        FilenameFilter filter = new SuffixFileFilter(exts);
        File[] ontos = inputDir.listFiles(filter);

        File results = new File(resultsDir, inputDir.getName() + "_AxiomCompare.txt");

        // write header to result files
        FileUtils.writeStringToFile(results, "Ontology Name\tStatus\tAxiom Type\tAxiom" + "\n", "UTF-8", false);

        for (File ontoFile : ontos) {
            String ontoName = ontoFile.getName();

            System.out.println();
            System.out.println("Loading Ontology\t" + ontoFile);
            AutoIRIMapper mapper = new AutoIRIMapper(ontoFile.getParentFile(), true);
            fromOWL.getOntologyManager().getIRIMappers().add(mapper);
            OWLOntology originalOnto = fromOWL.getOntologyManager().loadOntologyFromOntologyDocument(ontoFile);
            // expand the nary axioms to pairwise (which is what the
            // conversion does)
            expandNary(originalOnto);
            OWLOntologyID origID = originalOnto.getOntologyID();

            // convert ontology to abstraction and back to ontology
            OWLOntology backOnto = toOWL.convert(fromOWL.convertOWL(originalOnto));

            int axiomErrorsMissing = 0;
            int axiomErrorsExtra = 0;
            int axiomErrorsNYI = 0;
            // compare axioms

            // compare each axiom in original to see if it appears in back
            // converted ontology
            for (OWLAxiom originalAx : OWLAPIStreamUtils.asList(originalOnto.axioms())) {
                if (!compare(backOnto, originalAx)) {

                    if (nyi(originalAx)) {
                        axiomErrorsNYI++;
                        FileUtils.writeStringToFile(results, ontoName + "\t" + "NYI" + "\t"
                                + originalAx.getAxiomType() + "\t" + originalAx + "\n", "UTF-8", true);
                    } else {
                        axiomErrorsMissing++;
                        FileUtils.writeStringToFile(results, ontoName + "\t" + "Missing" + "\t"
                                + originalAx.getAxiomType() + "\t" + originalAx + "\n", "UTF-8", true);
                    }
                }
            }

            // compare each axiom in back converted ontology to see if it
            // appears in original ontology
            for (OWLAxiom backAx : OWLAPIStreamUtils.asList(backOnto.axioms())) {
                if (!compare(originalOnto, backAx)) {
                    axiomErrorsExtra++;
                    FileUtils.writeStringToFile(results,
                            ontoName + "\t" + "Extra" + "\t" + backAx.getAxiomType() + "\t" + backAx + "\n",
                            "UTF-8", true);
                }
            }

            // write the summary
            FileUtils.writeStringToFile(summary,
                    ontoFile + "\t" + origID.getOntologyIRI().orElse(null) + "\t"
                            + origID.getVersionIRI().orElse(null) + "\t" + axiomErrorsMissing + "\t"
                            + axiomErrorsExtra + "\t" + axiomErrorsNYI + "\n",
                    "UTF-8", true);
        }
    }
    // dump the vocabulary from the WordManager
    WordManager.getWordManager().dumpWordsToFile(wDump);
}

From source file:org.opensextant.howler.test.ParserTest.java

public static void main(String[] args) throws IOException, OWLOntologyCreationException {

    File inputDirsFile = new File(args[0]);
    FileStructure fileMode = FileStructure.valueOf(args[1]);
    File resultsDir = new File(args[2]);
    File resourceDir = new File(args[3]);

    File posDir = new File(resourceDir, "pos");

    List<String> textDirs = FileUtils.readLines(inputDirsFile, "UTF-8");

    File lexFile = new File(posDir, "lexicon.txt");
    File gramFile = new File(posDir, "ngrams.txt");
    File typeInfoFile = new File(resourceDir, "typeInfo.txt");
    File phraseFile = new File(resourceDir, "phrases.txt");

    // FromText from = new FromText(lexFile, gramFile, typeInfoFile, phraseFile);

    File parseResults = new File(resultsDir, "ParserSentences.txt");
    File baseTextTestDir = inputDirsFile.getParentFile();

    // File to the total set of Words seen
    File wDump = new File(resultsDir, "wordDump.txt");

    FileUtils.writeStringToFile(parseResults,
            "File\toriginal text\tParse Type\tParse\tNormalized text\tPOS sequence\n", "UTF-8", false);

    for (String textDir : textDirs) {

        // skip comments
        if (textDir.startsWith("#") || textDir.trim().isEmpty()) {
            continue;
        }/* www.  j a  va2 s. c  o  m*/

        File inputDir = new File(baseTextTestDir, textDir);

        // find all the text files in the input dir
        String[] exts = { ".txt" };
        FilenameFilter filter = new SuffixFileFilter(exts);
        File[] textFiles = inputDir.listFiles(filter);

        for (File textFile : textFiles) {
            String txtFileName = textFile.getName();

            // WordManager.getWordManager().reset();
            FromText from = new FromText(lexFile, gramFile, typeInfoFile, phraseFile);

            System.out.println();
            System.out.println("Loading Text File " + textFile);
            List<String> originalTextDocs = DocumentFactory.createTextDocument(textFile, fileMode);
            System.out.println("Loaded " + originalTextDocs.size() + " text documents from " + textFile);

            for (String originalTextDoc : originalTextDocs) {

                // skip comments
                if (originalTextDoc.startsWith("//") || originalTextDoc.trim().isEmpty()) {
                    continue;
                }

                // convert and convert the document
                IRI docIRI = IRI.create("http://example.org", "testDocument");

                Document doc = from.convertText(originalTextDoc, docIRI, "testDocument");

                // get the statements
                List<Statement> statements = doc.getStatements();

                // for each statement in converted document
                for (Statement statement : statements) {

                    // create sequence of views of the token sequence
                    StringBuilder posBldr = new StringBuilder();
                    StringBuilder normBldr = new StringBuilder();

                    for (Word w : statement.getWords()) {
                        normBldr.append(w.getNormalForm());
                        normBldr.append(" ");

                        posBldr.append(w.getPOS());
                        posBldr.append(" ");
                    }

                    String normSeq = normBldr.toString().trim();
                    String posSeq = posBldr.toString().trim();

                    String tree = statement.getSource();
                    // flatten tree to list of nodes (Strings)
                    List<String> nodes = Arrays.asList(tree.split(" +"));

                    // see how the statement was parsed
                    String parseType = nodes.get(0);
                    String typeString = "";
                    for (String t : nodes.subList(1, nodes.size())) {
                        typeString = typeString + " " + t;
                    }

                    // write the details to results
                    String txt = "";
                    if (!fileMode.equals(FileStructure.SINGLE_BLOCK)) {
                        txt = originalTextDoc;
                    }

                    FileUtils.writeStringToFile(parseResults,
                            txtFileName + "\t" + txt.replaceAll("[\n\r\t]", " ") + "\t" + parseType + "\t"
                                    + typeString.trim() + "\t" + normSeq.replaceAll("[\n\r\t]", " ") + "\t"
                                    + posSeq + "\n",
                            "UTF-8", true);
                }

            }
            // WordManager.getWordManager().reset();
        }
    }
    WordManager.getWordManager().dumpWordsToFile(wDump);
}

From source file:org.opensextant.howler.test.Text2OWL.java

public static void main(String[] args) throws IOException, OWLOntologyCreationException {

    File inputDirsFile = new File(args[0]);
    File resultsDir = new File(args[1]);
    File resourceDir = new File(args[2]);

    File posDir = new File(resourceDir, "pos");

    List<String> textDirs = FileUtils.readLines(inputDirsFile, "UTF-8");

    File lexFile = new File(posDir, "lexicon.txt");
    File gramFile = new File(posDir, "ngrams.txt");
    File typeInfoFile = new File(resourceDir, "typeInfo.txt");
    File phraseFile = new File(resourceDir, "phrases.txt");

    File baseTextTestDir = inputDirsFile.getParentFile();

    FromText from = new FromText(lexFile, gramFile, typeInfoFile, phraseFile);
    ToOWL toOWL = new ToOWL();

    for (String textDir : textDirs) {

        // skip comments
        if (textDir.startsWith("#") || textDir.trim().isEmpty()) {
            continue;
        }//from  w  ww . j a  va 2s  .c o  m

        File inputDir = new File(baseTextTestDir, textDir);

        // find all the text files in the input dir
        String[] exts = { ".txt" };
        FilenameFilter filter = new SuffixFileFilter(exts);
        File[] textFiles = inputDir.listFiles(filter);

        for (File textFile : textFiles) {
            String txtFileName = textFile.getName().split("\\.")[0];

            System.out.println();
            System.out.println("Loading Text File " + textFile);
            String txt = FileUtils.readFileToString(textFile, "UTF-8");

            IRI docIRI = IRI.create("http://example.org", "testDocument");
            // convert to ontology
            Document doc = from.convertText(txt, docIRI, "testDocument");
            System.out.println("\tConverted Text to abstraction");
            OWLOntology onto = toOWL.convert(doc);
            System.out.println("\tConverted abstraction to OWL");
            File out = new File(resultsDir, txtFileName + ".owl");
            toOWL.saveOntology(onto, out);

        }
        WordManager.getWordManager().reset();
    }
}

From source file:org.opensextant.howler.test.TextTest.java

public static void main(String[] args) throws IOException {
    File inputDirsFile = new File(args[0]);
    FileStructure fileMode = FileStructure.valueOf(args[1]);
    File resultsDir = new File(args[2]);
    File resourceDir = new File(args[3]);

    File posDir = new File(resourceDir, "pos");

    List<String> textDirs = FileUtils.readLines(inputDirsFile, "UTF-8");

    File lexFile = new File(posDir, "lexicon.txt");
    File gramFile = new File(posDir, "ngrams.txt");
    File typeInfoFile = new File(resourceDir, "typeInfo.txt");
    File phraseFile = new File(resourceDir, "phrases.txt");

    FromText from = new FromText(lexFile, gramFile, typeInfoFile, phraseFile);
    ToText to = new ToText();

    File results = new File(resultsDir, "textBack.txt");
    File wordManagerDump = new File(resultsDir, "wordManagerDump.txt");

    File baseTextTestDir = inputDirsFile.getParentFile();

    // write header to results
    FileUtils.writeStringToFile(results, "File\tStatement Type\tMatches\tOriginal text\tBack Text\n", "UTF-8",
            false);// w  ww.j av a  2  s.  c om

    for (String textDir : textDirs) {

        // skip comments
        if (textDir.startsWith("#") || textDir.trim().isEmpty()) {
            continue;
        }

        File inputDir = new File(baseTextTestDir, textDir);

        // find all the text files in the input dir
        String[] exts = { ".txt" };
        FilenameFilter filter = new SuffixFileFilter(exts);
        File[] textFiles = inputDir.listFiles(filter);

        for (File textFile : textFiles) {
            String txtFileName = textFile.getName();

            System.out.println();
            System.out.println("Loading Text File " + textFile);
            List<String> originalTextDocs = DocumentFactory.createTextDocument(textFile, fileMode);
            System.out.println("Loaded " + originalTextDocs.size() + " text documents from " + textFile);

            for (String originalTextDoc : originalTextDocs) {

                // skip comments
                if (originalTextDoc.startsWith("//") || originalTextDoc.trim().isEmpty()) {
                    continue;
                }

                IRI docIRI = IRI.create("http://example.org", "testDocument");
                // convert text to abstraction and back to text
                TextDocument doc = to.convert(from.convertText(originalTextDoc, docIRI, "testDocument"));
                List<Sentence> sentences = doc.getSentences();

                // for each statement in converted document
                for (Sentence sentence : sentences) {
                    String backText = sentence.toString().trim();
                    boolean matchText = compare(originalTextDoc, backText);
                    // String clean = clean(originalTextDoc, backText);
                    String parse = sentence.getParseTree();
                    String sentType = sentType(parse);

                    // write the details to results
                    FileUtils.writeStringToFile(results, txtFileName + "\t" + sentType + "\t" + matchText + "\t"
                            + originalTextDoc + "\t" + backText + "\t" + parse + "\n", "UTF-8", true);
                }

            }

        }
    }
    WordManager.getWordManager().dumpWordsToFile(wordManagerDump);
}

From source file:org.sakaiproject.archive.tool.ArchiveAction.java

/**
* build the context for batch archive confirm
*///www  .  j a  v  a2  s  .  c o m
public String buildDownloadContext(VelocityPortlet portlet, Context context, RunData rundata,
        SessionState state) {
    context.put("tlang", rb);
    buildMenu(context);

    //get list of existing archives
    Collection<File> files = Collections.<File>emptySet();
    File archiveBaseDir = new File(
            serverConfigurationService.getString("archive.storage.path", "sakai/archive"));

    if (archiveBaseDir.exists() && archiveBaseDir.isDirectory()) {
        files = FileUtils.listFiles(archiveBaseDir, new SuffixFileFilter(".zip"), null);
    }

    List<SparseFile> zips = new ArrayList<SparseFile>();

    SimpleDateFormat dateFormatIn = new SimpleDateFormat("yyyyMMddHHmmss");
    SimpleDateFormat dateFormatOut = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

    Calendar calendar = Calendar.getInstance();

    //porcess the list. also get the hash for the file if it exists 
    for (File f : files) {

        String absolutePath = f.getAbsolutePath();

        SparseFile sf = new SparseFile();
        sf.setFilename(f.getName());
        sf.setAbsolutePath(absolutePath);
        sf.setSize(FileUtils.byteCountToDisplaySize(f.length()));

        //get the datetime string, its the last part of the file name, convert back to a date that we can display
        String dateTimeStr = StringUtils.substringAfterLast(StringUtils.removeEnd(f.getName(), ".zip"), "-");

        try {
            Date date = dateFormatIn.parse(dateTimeStr);
            sf.setDateCreated(dateFormatOut.format(date));
        } catch (ParseException pe) {
            //ignore, just don't set the date
        }

        //get siteId, first part of name
        String siteId = StringUtils.substringBeforeLast(f.getName(), "-");
        sf.setSiteId(siteId);

        //try to get site title if the site still exists
        try {
            Site site = siteService.getSite(siteId);
            sf.setSiteTitle(site.getTitle());
        } catch (IdUnusedException e) {
            //ignore, no site available
        }

        //get the hash. need to read it from the file. Same filename but diff extension
        String hashFilePath = StringUtils.removeEnd(absolutePath, ".zip");
        hashFilePath = hashFilePath + ".sha1";

        File hashFile = new File(hashFilePath);
        try {
            String hash = FileUtils.readFileToString(hashFile);
            sf.setHash(hash);
        } catch (IOException e) {
            //ignore, dont use the hash
        }

        zips.add(sf);
    }

    context.put("archives", zips);

    return "-download";
}

From source file:org.seasar.uruma.eclipath.mojo.CheckCleanMojo.java

protected List<File> getExistingFiles(List<File> dirs) {
    List<File> result = new ArrayList<File>();
    for (File dir : dirs) {
        if (!dir.exists() || !dir.isDirectory()) {
            break;
        }/*from   w ww . j a va 2 s .co  m*/
        File[] files = dir.listFiles((FileFilter) (new SuffixFileFilter(".jar")));
        Collections.addAll(result, files);
    }
    return result;
}