Example usage for org.apache.hadoop.fs Path toUri

List of usage examples for org.apache.hadoop.fs Path toUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path toUri.

Prototype

public URI toUri() 

Source Link

Document

Convert this Path to a URI.

Usage

From source file:com.datatorrent.lib.dedup.DeduperBucketEvictionTest.java

License:Open Source License

@AfterClass
public static void teardown() {
    Path root = new Path(applicationPath);
    try {/*  w  w  w .  ja  v a2 s  . c  om*/
        FileSystem fs = FileSystem.newInstance(root.toUri(), new Configuration());
        fs.delete(root, true);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.datatorrent.lib.dedup.DedupUsingCategoricalExpiryTest.java

License:Open Source License

@AfterClass
public static void teardown() {
    Path root = new Path(applicationPath);
    try {/*www. j a v  a2 s . co m*/
        FileSystem fs = FileSystem.newInstance(root.toUri(), new Configuration());
        fs.delete(root, true);
        logger.debug("Deleted path: " + applicationPath);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.datatorrent.lib.io.fs.FSInputModuleAppTest.java

License:Apache License

@Test
public void testApplication() throws Exception {
    app = new Application();
    Configuration conf = new Configuration(false);
    conf.set("dt.operator.hdfsInputModule.prop.files", inputDir);
    conf.set("dt.operator.hdfsInputModule.prop.blockSize", "10");
    conf.set("dt.operator.hdfsInputModule.prop.blocksThreshold", "4");
    conf.set("dt.operator.hdfsInputModule.prop.scanIntervalMillis", "10000");

    LocalMode lma = LocalMode.newInstance();
    lma.prepareDAG(app, conf);/*from ww  w.  java2 s. c  om*/
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(true);
    lc.runAsync();

    long now = System.currentTimeMillis();
    Path outDir = new Path("file://" + new File(outputDir).getAbsolutePath());
    FileSystem fs = FileSystem.newInstance(outDir.toUri(), new Configuration());
    while (!fs.exists(outDir) && System.currentTimeMillis() - now < 20000) {
        Thread.sleep(500);
        LOG.debug("Waiting for {}", outDir);
    }

    Thread.sleep(10000);
    lc.shutdown();

    Assert.assertTrue("output dir does not exist", fs.exists(outDir));

    File dir = new File(outputDir);
    FileFilter fileFilter = new WildcardFileFilter(OUT_METADATA_FILE + "*");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=file1.txt, numberOfBlocks=2, isDirectory=false, relativePath=input/file1.txt]");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=file2.txt, numberOfBlocks=6, isDirectory=false, relativePath=input/file2.txt]");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=dir, numberOfBlocks=0, isDirectory=true, relativePath=input/dir]");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=inner.txt, numberOfBlocks=2, isDirectory=false, relativePath=input/dir/inner.txt]");

    fileFilter = new WildcardFileFilter(OUT_DATA_FILE + "*");
    verifyFileContents(dir.listFiles(fileFilter), FILE_1_DATA);
    verifyFileContents(dir.listFiles(fileFilter), FILE_2_DATA);
}

From source file:com.datatorrent.lib.io.fs.HDFSInputModuleAppTest.java

License:Apache License

@Test
public void testApplication() throws Exception {
    app = new Application();
    Configuration conf = new Configuration(false);
    conf.set("dt.operator.hdfsInputModule.prop.files", inputDir);
    conf.set("dt.operator.hdfsInputModule.prop.blockSize", "10");
    conf.set("dt.operator.hdfsInputModule.prop.scanIntervalMillis", "10000");

    LocalMode lma = LocalMode.newInstance();
    lma.prepareDAG(app, conf);/*from   w w w.ja  v a2 s  .co  m*/
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(true);
    lc.runAsync();

    long now = System.currentTimeMillis();
    Path outDir = new Path("file://" + new File(outputDir).getAbsolutePath());
    FileSystem fs = FileSystem.newInstance(outDir.toUri(), new Configuration());
    while (!fs.exists(outDir) && System.currentTimeMillis() - now < 20000) {
        Thread.sleep(500);
        LOG.debug("Waiting for {}", outDir);
    }

    Thread.sleep(10000);
    lc.shutdown();

    Assert.assertTrue("output dir does not exist", fs.exists(outDir));

    File dir = new File(outputDir);
    FileFilter fileFilter = new WildcardFileFilter(OUT_METADATA_FILE + "*");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=file1.txt, numberOfBlocks=2, isDirectory=false, relativePath=input/file1.txt]");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=file2.txt, numberOfBlocks=6, isDirectory=false, relativePath=input/file2.txt]");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=dir, numberOfBlocks=0, isDirectory=true, relativePath=input/dir]");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=inner.txt, numberOfBlocks=2, isDirectory=false, relativePath=input/dir/inner.txt]");

    fileFilter = new WildcardFileFilter(OUT_DATA_FILE + "*");
    verifyFileContents(dir.listFiles(fileFilter), FILE_1_DATA);
    verifyFileContents(dir.listFiles(fileFilter), FILE_2_DATA);
}

From source file:com.datatorrent.lib.io.fs.S3InputModuleAppTest.java

License:Apache License

@Test
public void testS3Application() throws Exception {
    app = new S3InputModuleAppTest.Application();
    Configuration conf = new Configuration(false);
    conf.set("dt.operator.s3InputModule.prop.files", files);
    conf.set("dt.operator.s3InputModule.prop.blockSize", "10");
    conf.set("dt.operator.s3InputModule.prop.scanIntervalMillis", "10000");

    LocalMode lma = LocalMode.newInstance();
    lma.prepareDAG(app, conf);//from   w  w  w  . jav a 2 s.  c om
    LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(true);
    lc.runAsync();

    long now = System.currentTimeMillis();
    Path outDir = new Path("file://" + new File(outputDir).getAbsolutePath());
    FileSystem fs = FileSystem.newInstance(outDir.toUri(), new Configuration());
    while (!fs.exists(outDir) && System.currentTimeMillis() - now < 20000) {
        Thread.sleep(500);
        LOG.debug("Waiting for {}", outDir);
    }

    Thread.sleep(10000);
    lc.shutdown();

    Assert.assertTrue("output dir does not exist", fs.exists(outDir));

    File dir = new File(outputDir);
    FileFilter fileFilter = new WildcardFileFilter(OUT_METADATA_FILE + "*");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=file1.txt, numberOfBlocks=2, isDirectory=false, relativePath=input/file1.txt]");
    verifyFileContents(dir.listFiles(fileFilter),
            "[fileName=file2.txt, numberOfBlocks=6, isDirectory=false, relativePath=input/file2.txt]");

    fileFilter = new WildcardFileFilter(OUT_DATA_FILE + "*");
    verifyFileContents(dir.listFiles(fileFilter), FILE_1_DATA);
    verifyFileContents(dir.listFiles(fileFilter), FILE_2_DATA);
}

From source file:com.datatorrent.lib.io.HdfsOutputOperator.java

License:Open Source License

/**
 *
 * @param context//from   www  .ja v  a2s . com
 */
@Override
public void setup(OperatorContext context) {
    this.contextId = context.getId();
    try {
        Path filepath = subFilePath(this.fileIndex);
        fs = FileSystem.get(filepath.toUri(), new Configuration());

        if (bytesPerFile > 0) {
            // ensure file path generates unique names
            Path p1 = subFilePath(1);
            Path p2 = subFilePath(2);
            if (p1.equals(p2)) {
                throw new IllegalArgumentException(
                        "Rolling files require %() placeholders for unique names: " + filepath);
            }
        }
        openFile(filepath);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
}

From source file:com.datatorrent.lib.io.IdempotentStorageManagerTest.java

License:Open Source License

@Test
public void testDelete() throws IOException {
    Map<Integer, String> dataOf1 = Maps.newHashMap();
    dataOf1.put(1, "one");
    dataOf1.put(2, "two");
    dataOf1.put(3, "three");

    Map<Integer, String> dataOf2 = Maps.newHashMap();
    dataOf2.put(4, "four");
    dataOf2.put(5, "five");
    dataOf2.put(6, "six");

    Map<Integer, String> dataOf3 = Maps.newHashMap();
    dataOf2.put(7, "seven");
    dataOf2.put(8, "eight");
    dataOf2.put(9, "nine");

    testMeta.storageManager.save(dataOf1, 1, 1);
    testMeta.storageManager.save(dataOf2, 2, 1);
    testMeta.storageManager.save(dataOf3, 3, 1);

    testMeta.storageManager.partitioned(Lists.<IdempotentStorageManager>newArrayList(testMeta.storageManager),
            Sets.newHashSet(2, 3));/*from   w  w  w  .j av a2s .c o  m*/
    testMeta.storageManager.setup(testMeta.context);
    testMeta.storageManager.deleteUpTo(1, 1);

    Path appPath = new Path(testMeta.recoveryPath + '/' + testMeta.context.getValue(DAG.APPLICATION_ID));
    FileSystem fs = FileSystem.newInstance(appPath.toUri(), new Configuration());
    Assert.assertEquals("no data for 1", 0, fs.listStatus(new Path(appPath, Integer.toString(1))).length);
    Assert.assertEquals("no data for 2", false, fs.exists(new Path(appPath, Integer.toString(2))));
    Assert.assertEquals("no data for 3", false, fs.exists(new Path(appPath, Integer.toString(3))));
}

From source file:com.datatorrent.lib.parser.XmlParser.java

License:Apache License

@Override
public void setup(com.datatorrent.api.Context.OperatorContext context) {
    try {/*from ww w .j a  va  2s .  co  m*/
        JAXBContext ctx = JAXBContext.newInstance(getClazz());
        unmarshaller = ctx.createUnmarshaller();
        if (schemaXSDFile != null) {
            Path filePath = new Path(schemaXSDFile);
            Configuration configuration = new Configuration();
            FileSystem fs = FileSystem.newInstance(filePath.toUri(), configuration);
            FSDataInputStream inputStream = fs.open(filePath);

            SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
            Schema schema = factory.newSchema(new StreamSource(inputStream));
            unmarshaller.setSchema(schema);
            validator = schema.newValidator();
            fs.close();
        }
    } catch (SAXException e) {
        DTThrowable.wrapIfChecked(e);
    } catch (JAXBException e) {
        DTThrowable.wrapIfChecked(e);
    } catch (IOException e) {
        DTThrowable.wrapIfChecked(e);
    }
}

From source file:com.datatorrent.lib.util.FSStorageAgent.java

License:Open Source License

public FSStorageAgent(String path, Configuration conf) {
    this.path = path;
    try {/*from   www.  j a  va  2  s.c  om*/
        logger.debug("Initialize storage agent with {}.", path);
        Path lPath = new Path(path);
        fs = FileSystem.newInstance(lPath.toUri(), conf == null ? new Configuration() : conf);

        try {
            if (fs.mkdirs(lPath)) {
                fs.setWorkingDirectory(lPath);
            }
        } catch (IOException e) {
            // some file system (MapR) throw exception if folder exists
            if (!fs.exists(lPath)) {
                throw e;
            }
        }
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
}

From source file:com.datatorrent.lib.util.FSWindowDataManagerTest.java

License:Apache License

@Test
public void testDelete() throws IOException {
    testMeta.storageManager.setup(testMeta.context);
    Map<Integer, String> dataOf1 = Maps.newHashMap();
    dataOf1.put(1, "one");
    dataOf1.put(2, "two");
    dataOf1.put(3, "three");

    Map<Integer, String> dataOf2 = Maps.newHashMap();
    dataOf2.put(4, "four");
    dataOf2.put(5, "five");
    dataOf2.put(6, "six");

    Map<Integer, String> dataOf3 = Maps.newHashMap();
    dataOf2.put(7, "seven");
    dataOf2.put(8, "eight");
    dataOf2.put(9, "nine");

    for (int i = 1; i <= 9; ++i) {
        testMeta.storageManager.save(dataOf1, 1, i);
    }/* w  ww.  j  a v a 2s. c om*/

    testMeta.storageManager.save(dataOf2, 2, 1);
    testMeta.storageManager.save(dataOf3, 3, 1);

    testMeta.storageManager.partitioned(Lists.<WindowDataManager>newArrayList(testMeta.storageManager),
            Sets.newHashSet(2, 3));
    testMeta.storageManager.setup(testMeta.context);
    testMeta.storageManager.deleteUpTo(1, 6);

    Path appPath = new Path(testMeta.applicationPath + '/' + testMeta.storageManager.getRecoveryPath());
    FileSystem fs = FileSystem.newInstance(appPath.toUri(), new Configuration());
    FileStatus[] fileStatuses = fs.listStatus(new Path(appPath, Integer.toString(1)));
    Assert.assertEquals("number of windows for 1", 3, fileStatuses.length);
    TreeSet<String> windows = Sets.newTreeSet();
    for (FileStatus fileStatus : fileStatuses) {
        windows.add(fileStatus.getPath().getName());
    }
    Assert.assertEquals("window list for 1", Sets.newTreeSet(Arrays.asList("7", "8", "9")), windows);
    Assert.assertEquals("no data for 2", false, fs.exists(new Path(appPath, Integer.toString(2))));
    Assert.assertEquals("no data for 3", false, fs.exists(new Path(appPath, Integer.toString(3))));
    testMeta.storageManager.teardown();
}