Example usage for org.apache.commons.io FileUtils toFile

List of usage examples for org.apache.commons.io FileUtils toFile

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils toFile.

Prototype

public static File toFile(URL url) 

Source Link

Document

Convert from a URL to a File.

Usage

From source file:de.dfki.km.perspecting.obie.experiments.ProperNameExperiment.java

/**
 * Test method for/*from www  .j a  v a  2s. c  o m*/
 * {@link de.dfki.km.perspecting.obie.dixi.service.SimpleScobieService#extractInformationFromURL(java.lang.String, java.lang.String)}
 * .
 */
@Test
public void testExtractInformationFromURL() {
    try {
        StringBuffer b = new StringBuffer();

        for (int i = 0; i < 1; i++) {

            Document document = pipeline.createDocument(
                    FileUtils.toFile(new URL("http://en.wikipedia.org/wiki/Special:Random")),
                    new URI("http://en.wikipedia.org/wiki/Special:Random"), MediaType.HTML,
                    "SELECT * WHERE {?s ?p ?o}", Language.EN);

            Evaluator evaluator = new Evaluator(pipeline);

            for (int step = 0; pipeline.hasNext(step) && step <= 5; step = pipeline.execute(step, document)) {
                System.out.println(step);
            }

            HashSet<String> wordsOfPhrases = new HashSet<String>();
            HashSet<String> wordsOfDocument = new HashSet<String>();

            for (Token token : document.getTokens()) {
                wordsOfDocument.add(token.toString());
            }

            int count = 0;
            for (TokenSequence<String> np : document.getNounPhrases()) {
                String[] words = np.toString().split("[\\s]+");
                count += words.length;
                wordsOfPhrases.addAll(Arrays.asList(words));
            }

            b.append(document.getTokens().size() + "\t" + document.getNounPhrases().size() + "\t" + count + "\t"
                    + wordsOfPhrases.size() + "\t" + wordsOfDocument.size() + "\n");

        }
        System.out.println("tok in doc\tnp in doc\ttok in nps\tdistinct tok in nps\tdistinct tok in doc");
        System.out.println(b);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:com.itemanalysis.psychometrics.irt.estimation.ItemResponseFileSummary.java

private ItemResponseVector[] readTapData() {
    byte[][] tap = new byte[35][18];
    try {/*from w  ww  .jav  a 2  s  .co m*/
        File f = FileUtils.toFile(this.getClass().getResource("/testdata/tap-data.txt"));
        BufferedReader br = new BufferedReader(new FileReader(f));
        String line = "";
        String[] s = null;
        int row = 0;
        while ((line = br.readLine()) != null) {
            s = line.split(",");
            for (int j = 0; j < s.length; j++) {
                tap[row][j] = Byte.parseByte(s[j]);
            }
            row++;
        }
        br.close();

    } catch (IOException ex) {
        ex.printStackTrace();
    }

    Frequency freq = new Frequency();
    for (int i = 0; i < tap.length; i++) {
        freq.addValue(Arrays.toString(tap[i]));
    }

    ItemResponseVector[] responseData = new ItemResponseVector[freq.getUniqueCount()];
    ItemResponseVector irv = null;
    Iterator<Comparable<?>> iter = freq.valuesIterator();
    int index = 0;

    //create array of ItemResponseVector objects
    while (iter.hasNext()) {
        //get response string from frequency summary and convert to byte array
        Comparable<?> value = iter.next();
        String s = value.toString();
        s = s.substring(1, s.lastIndexOf("]"));
        String[] sa = s.split(",");
        byte[] rv = new byte[sa.length];
        for (int i = 0; i < sa.length; i++) {
            rv[i] = Byte.parseByte(sa[i].trim());
        }

        //create response vector objects
        irv = new ItemResponseVector(rv, Long.valueOf(freq.getCount(value)).doubleValue());
        responseData[index] = irv;
        index++;
    }
    //        //display results of summary
    //        for(int i=0;i<responseData.length;i++){
    //            System.out.println(responseData[i].toString() + ": " + responseData[i].getFrequency());
    //        }

    return responseData;
}

From source file:de.dfki.km.perspecting.obie.experiments.PhraseExperiment.java

/**
 * Test method for//from  w  ww  .  j a v  a  2s  .  c om
 * {@link de.dfki.km.perspecting.obie.dixi.service.SimpleScobieService#extractInformationFromURL(java.lang.String, java.lang.String)}
 * .
 */
@Test
public void analyseTokenPhraseFrequencies() {
    final String template = "SELECT * WHERE {?s ?p ?o}";

    try {
        final BufferedWriter bw = new BufferedWriter(
                new FileWriter($SCOOBIE_HOME + "results/token_phrase_frequency_wikipedia.csv"));

        final String randomWikipediaPage = "http://en.wikipedia.org/wiki/Special:Random";

        bw.append("tok in doc\tnp in doc\ttok in nps\tdistinct tok in nps\tdistinct tok in doc");
        for (int i = 0; i < 100; i++) {

            Document document = pipeline.createDocument(FileUtils.toFile(new URL(randomWikipediaPage)),
                    new URI(randomWikipediaPage), MediaType.HTML, template, Language.EN);

            for (int step = 0; pipeline.hasNext(step) && step <= 5; step = pipeline.execute(step, document)) {
                System.out.println(step);
            }

            HashSet<String> wordsOfPhrases = new HashSet<String>();
            HashSet<String> wordsOfDocument = new HashSet<String>();

            for (Token token : document.getTokens()) {
                wordsOfDocument.add(token.toString());
            }

            int count = 0;
            for (TokenSequence<String> np : document.getNounPhrases()) {
                String[] words = np.toString().split("[\\s]+");
                count += words.length;
                wordsOfPhrases.addAll(Arrays.asList(words));
            }

            bw.append(document.getTokens().size() + "\t" + document.getNounPhrases().size() + "\t" + count
                    + "\t" + wordsOfPhrases.size() + "\t" + wordsOfDocument.size());
            bw.newLine();

        }
        bw.close();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    try {
        final BufferedWriter bw = new BufferedWriter(
                new FileWriter($SCOOBIE_HOME + "results/token_phrase_frequency_reuters.csv"));

        final TextCorpus corpus = new TextCorpus(new File("../corpora/reuters/reuters.zip"), MediaType.ZIP,
                MediaType.HTML, Language.EN);

        bw.append("tok in doc\tnp in doc\ttok in nps\tdistinct tok in nps\tdistinct tok in doc");

        corpus.forEach(new DocumentProcedure<URI>() {

            @Override
            public URI process(Reader reader, URI uri) throws Exception {

                Document document = pipeline.createDocument(reader, uri, corpus.getMediatype(), template,
                        corpus.getLanguage());

                for (int step = 0; pipeline.hasNext(step)
                        && step <= 5; step = pipeline.execute(step, document)) {
                    System.out.println(step);
                }

                HashSet<String> wordsOfPhrases = new HashSet<String>();
                HashSet<String> wordsOfDocument = new HashSet<String>();

                for (Token token : document.getTokens()) {
                    wordsOfDocument.add(token.toString());
                }

                int count = 0;
                for (TokenSequence<String> np : document.getNounPhrases()) {
                    String[] words = np.toString().split("[\\s]+");
                    count += words.length;
                    wordsOfPhrases.addAll(Arrays.asList(words));
                }

                bw.append(document.getTokens().size() + "\t" + document.getNounPhrases().size() + "\t" + count
                        + "\t" + wordsOfPhrases.size() + "\t" + wordsOfDocument.size());
                bw.newLine();
                return uri;
            }
        });

        bw.close();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:net.sf.eclipsecs.core.config.configtypes.ConfigurationType.java

/**
 * Gets the property resolver for this configuration type used to expand
 * property values within the checkstyle configuration.
 * /*w ww .  ja  v a 2  s .  c o m*/
 * @param checkConfiguration
 *            the actual check configuration
 * @return the property resolver
 * @throws IOException
 *             error creating the property resolver
 */
protected PropertyResolver getPropertyResolver(ICheckConfiguration config,
        CheckstyleConfigurationFile configFile) throws IOException {

    MultiPropertyResolver multiResolver = new MultiPropertyResolver();
    multiResolver.addPropertyResolver(new ResolvablePropertyResolver(config));

    File f = FileUtils.toFile(configFile.getResolvedConfigFileURL());
    if (f != null) {
        multiResolver.addPropertyResolver(new StandardPropertyResolver(f.toString()));
    } else {
        multiResolver.addPropertyResolver(
                new StandardPropertyResolver(configFile.getResolvedConfigFileURL().toString()));
    }

    multiResolver.addPropertyResolver(new ClasspathVariableResolver());
    multiResolver.addPropertyResolver(new SystemPropertyResolver());

    if (configFile.getAdditionalPropertiesBundleStream() != null) {
        ResourceBundle bundle = new PropertyResourceBundle(configFile.getAdditionalPropertiesBundleStream());
        multiResolver.addPropertyResolver(new ResourceBundlePropertyResolver(bundle));
    }

    return multiResolver;
}

From source file:net.pms.dlna.DLNAMediaSubtitleTest.java

@Test
public void testSetExternalFile_bitmapSubs() throws Exception {
    File file_cp1251 = FileUtils.toFile(CLASS.getResource("../util/russian-cp1251.srt"));

    DLNAMediaSubtitle sub1 = new DLNAMediaSubtitle();
    sub1.setType(VOBSUB);/*from  w w w.j  a  v  a2 s .c  om*/
    sub1.setExternalFile(file_cp1251);
    assertThat(sub1.getExternalFileCharacterSet()).isNull();

    DLNAMediaSubtitle sub2 = new DLNAMediaSubtitle();
    sub2.setType(BMP);
    sub2.setExternalFile(file_cp1251);
    assertThat(sub2.getExternalFileCharacterSet()).isNull();

    DLNAMediaSubtitle sub3 = new DLNAMediaSubtitle();
    sub3.setType(DIVX);
    sub3.setExternalFile(file_cp1251);
    assertThat(sub3.getExternalFileCharacterSet()).isNull();

    DLNAMediaSubtitle sub4 = new DLNAMediaSubtitle();
    sub4.setType(PGS);
    sub4.setExternalFile(file_cp1251);
    assertThat(sub4.getExternalFileCharacterSet()).isNull();
}

From source file:net.pms.util.FileUtilTest.java

@Test
public void testConvertFileFromUtf16ToUtf8_inputFileIsUTF16LE() throws Exception {
    File file_utf8le = FileUtils.toFile(CLASS.getResource("russian-utf16-le.srt"));
    File outputFile = new File(file_utf8le.getParentFile(), "output-utf8-from-utf16-le.srt");
    outputFile.delete();//from ww w . ja v a 2 s.  com
    FileUtil.convertFileFromUtf16ToUtf8(file_utf8le, outputFile);
    File file_utf8 = FileUtils.toFile(CLASS.getResource("russian-utf8-without-bom.srt"));
    assertThat(FileUtils.contentEquals(outputFile, file_utf8)).isTrue();
    outputFile.delete();
}

From source file:net.pms.dlna.DLNAMediaSubtitleTest.java

@Test
public void testSetExternalFile_textSubs() throws Exception {
    File file_cp1251 = FileUtils.toFile(CLASS.getResource("../util/russian-cp1251.srt"));

    DLNAMediaSubtitle sub1 = new DLNAMediaSubtitle();
    sub1.setType(SUBRIP);//  w ww .jav  a 2 s  .  com
    sub1.setExternalFile(file_cp1251);
    assertThat(sub1.getExternalFileCharacterSet()).isEqualTo(CHARSET_WINDOWS_1251);

    DLNAMediaSubtitle sub2 = new DLNAMediaSubtitle();
    sub2.setType(ASS);
    sub2.setExternalFile(file_cp1251);
    assertThat(sub2.getExternalFileCharacterSet()).isEqualTo(CHARSET_WINDOWS_1251);
}

From source file:com.dhenton9000.filedownloader.FileDownloader.java

/**
* convert a classpath reference to a file on the drive system
* @param path//w  w  w  . j  a v  a  2s.c o  m
* @return
* @throws java.io.FileNotFoundException if the file at the path does
* not exist
*/
public File convertClassPathToFileRef(String path) throws FileNotFoundException {

    if (this.getClass().getResource(path) != null)

        return new File(FileUtils.toFile(getClass().getResource(path)).getAbsolutePath());
    else {
        String info = String.format("unable to find file at '%s'", path);
        throw new FileNotFoundException(info);
    }
}

From source file:com.izforge.izpack.compiler.packager.impl.Packager.java

/**
 * Write manifest in the install jar./*from ww  w . j  a  v  a2 s.c  o  m*/
 */
@Override
public void writeManifest() throws IOException {
    IXMLElement data = resourceFinder.getXMLTree();
    IXMLElement guiPrefsElement = data.getFirstChildNamed("guiprefs");
    // Add splash screen configuration
    List<String> lines = IOUtils.readLines(getClass().getResourceAsStream("MANIFEST.MF"));
    IXMLElement splashNode = guiPrefsElement.getFirstChildNamed("splash");
    if (splashNode != null) {
        // Add splash image to installer jar
        File splashImage = FileUtils
                .toFile(resourceFinder.findProjectResource(splashNode.getContent(), "Resource", splashNode));
        String destination = String.format("META-INF/%s", splashImage.getName());
        mergeManager.addResourceToMerge(splashImage.getAbsolutePath(), destination);
        lines.add(String.format("SplashScreen-Image: %s", destination));
    }
    lines.add("");
    File tempManifest = com.izforge.izpack.util.file.FileUtils.createTempFile("MANIFEST", ".MF");
    FileUtils.writeLines(tempManifest, lines);
    mergeManager.addResourceToMerge(tempManifest.getAbsolutePath(), "META-INF/MANIFEST.MF");
}

From source file:net.pms.util.FileUtilTest.java

@Test
public void testConvertFileFromUtf16ToUtf8_inputFileIsUTF16BE() throws Exception {
    File file_utf8be = FileUtils.toFile(CLASS.getResource("russian-utf16-be.srt"));
    File outputFile = new File(file_utf8be.getParentFile(), "output-utf8-from-utf16-be.srt");
    outputFile.delete();//from   www  .j  a  v  a 2s.c  o m
    FileUtil.convertFileFromUtf16ToUtf8(file_utf8be, outputFile);
    File file_utf8 = FileUtils.toFile(CLASS.getResource("russian-utf8-with-bom.srt"));
    assertThat(FileUtils.contentEquals(outputFile, file_utf8)).isTrue();
    outputFile.delete();
}