Example usage for org.apache.hadoop.hdfs DistributedFileSystem DistributedFileSystem

List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem DistributedFileSystem

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DistributedFileSystem DistributedFileSystem.

Prototype

public DistributedFileSystem() 

Source Link

Usage

From source file:com.ebay.jetstream.event.processor.hdfs.HdfsClient.java

License:MIT License

protected void initHdfs() {
    hdpConf = new Configuration();
    final String hdfsUrl = config.getHdfsUrl();
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(config.getUser());

    try {//from w ww. ja v a2  s  . co m
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                hdpConf.set("hadoop.job.ugi", config.getUser());
                hdpConf.set("fs.defaultFS", hdfsUrl);
                if (hdfsUrl.startsWith("hdfs")) {
                    for (Object keyObj : config.getHadoopProperties().keySet()) {
                        String key = (String) keyObj;
                        hdpConf.set(key, config.getHadoopProperties().getProperty(key));
                    }
                    fs = new DistributedFileSystem();
                    fs.initialize(URI.create(hdfsUrl), hdpConf);
                } else {
                    fs = FileSystem.get(hdpConf);
                }
                LOGGER.log(Level.INFO, "Connected to HDFS with the following properties: hdfsUrl " + hdfsUrl);
                return null;
            }

        });
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, "Error initializing HdfsClient. Error:" + e);
    }
}

From source file:com.metamx.druid.indexer.HadoopDruidIndexerConfigTest.java

License:Open Source License

@Test
public void shouldMakeHDFSCompliantSegmentOutputPath() {
    final HadoopDruidIndexerConfig cfg;

    try {//from   www  . j  av a  2 s  .  c o  m
        cfg = jsonReadWriteRead(
                "{" + "\"dataSource\": \"source\"," + " \"granularitySpec\":{" + "   \"type\":\"uniform\","
                        + "   \"gran\":\"hour\"," + "   \"intervals\":[\"2012-07-10/P1D\"]" + " },"
                        + "\"segmentOutputPath\": \"hdfs://server:9100/tmp/druid/datatest\"" + "}",
                HadoopDruidIndexerConfig.class);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }

    cfg.setVersion("some:brand:new:version");

    Bucket bucket = new Bucket(4711, new DateTime(2012, 07, 10, 5, 30), 4712);
    Path path = cfg.makeSegmentOutputPath(new DistributedFileSystem(), bucket);
    Assert.assertEquals(
            "hdfs://server:9100/tmp/druid/datatest/source/20120710T050000.000Z_20120710T060000.000Z/some_brand_new_version/4712",
            path.toString());
}

From source file:gobblin.util.filesystem.InstrumentedHDFSFileSystem.java

License:Apache License

public InstrumentedHDFSFileSystem() {
    super(SCHEME, new DistributedFileSystem());
}

From source file:io.druid.indexer.HadoopDruidIndexerConfigTest.java

License:Apache License

@Test
public void shouldMakeHDFSCompliantSegmentOutputPath() {
    HadoopIngestionSpec schema;/*from  w ww.  j  a v  a2 s  .co  m*/

    try {
        schema = jsonReadWriteRead("{\n" + "    \"dataSchema\": {\n" + "        \"dataSource\": \"source\",\n"
                + "        \"metricsSpec\": [],\n" + "        \"granularitySpec\": {\n"
                + "            \"type\": \"uniform\",\n" + "            \"segmentGranularity\": \"hour\",\n"
                + "            \"intervals\": [\"2012-07-10/P1D\"]\n" + "        }\n" + "    },\n"
                + "    \"ioConfig\": {\n" + "        \"type\": \"hadoop\",\n"
                + "        \"segmentOutputPath\": \"hdfs://server:9100/tmp/druid/datatest\"\n" + "    }\n"
                + "}", HadoopIngestionSpec.class);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }

    HadoopDruidIndexerConfig cfg = new HadoopDruidIndexerConfig(
            schema.withTuningConfig(schema.getTuningConfig().withVersion("some:brand:new:version")));

    Bucket bucket = new Bucket(4711, new DateTime(2012, 07, 10, 5, 30), 4712);
    Path path = JobHelper.makeSegmentOutputPath(new Path(cfg.getSchema().getIOConfig().getSegmentOutputPath()),
            new DistributedFileSystem(), cfg.getSchema().getDataSchema().getDataSource(),
            cfg.getSchema().getTuningConfig().getVersion(),
            cfg.getSchema().getDataSchema().getGranularitySpec().bucketInterval(bucket.time).get(),
            bucket.partitionNum);
    Assert.assertEquals(
            "hdfs://server:9100/tmp/druid/datatest/source/20120710T050000.000Z_20120710T060000.000Z/some_brand_new_version/4712",
            path.toString());
}

From source file:io.druid.storage.hdfs.HdfsDataSegmentPusherTest.java

License:Apache License

@Test
public void shouldMakeHDFSCompliantSegmentOutputPath() {
    HadoopIngestionSpec schema;//  w ww. j  av a2 s .  c o m

    try {
        schema = objectMapper.readValue("{\n" + "    \"dataSchema\": {\n"
                + "        \"dataSource\": \"source\",\n" + "        \"metricsSpec\": [],\n"
                + "        \"granularitySpec\": {\n" + "            \"type\": \"uniform\",\n"
                + "            \"segmentGranularity\": \"hour\",\n"
                + "            \"intervals\": [\"2012-07-10/P1D\"]\n" + "        }\n" + "    },\n"
                + "    \"ioConfig\": {\n" + "        \"type\": \"hadoop\",\n"
                + "        \"segmentOutputPath\": \"hdfs://server:9100/tmp/druid/datatest\"\n" + "    }\n"
                + "}", HadoopIngestionSpec.class);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }

    //DataSchema dataSchema = new DataSchema("dataSource", null, null, Gra)
    //schema = new HadoopIngestionSpec(dataSchema, ioConfig, HadoopTuningConfig.makeDefaultTuningConfig());
    HadoopDruidIndexerConfig cfg = new HadoopDruidIndexerConfig(
            schema.withTuningConfig(schema.getTuningConfig().withVersion("some:brand:new:version")));

    Bucket bucket = new Bucket(4711, new DateTime(2012, 07, 10, 5, 30), 4712);
    Path path = JobHelper
            .makeFileNamePath(new Path(cfg.getSchema().getIOConfig().getSegmentOutputPath()),
                    new DistributedFileSystem(),
                    new DataSegment(cfg.getSchema().getDataSchema().getDataSource(),
                            cfg.getSchema().getDataSchema().getGranularitySpec().bucketInterval(bucket.time)
                                    .get(),
                            cfg.getSchema().getTuningConfig().getVersion(), null, null, null,
                            new NumberedShardSpec(bucket.partitionNum, 5000), -1, -1),
                    JobHelper.INDEX_ZIP, hdfsDataSegmentPusher);
    Assert.assertEquals(
            "hdfs://server:9100/tmp/druid/datatest/source/20120710T050000.000Z_20120710T060000.000Z/some_brand_new_version"
                    + "/4712_index.zip",
            path.toString());

    path = JobHelper
            .makeFileNamePath(new Path(cfg.getSchema().getIOConfig().getSegmentOutputPath()),
                    new DistributedFileSystem(),
                    new DataSegment(cfg.getSchema().getDataSchema().getDataSource(),
                            cfg.getSchema().getDataSchema().getGranularitySpec().bucketInterval(bucket.time)
                                    .get(),
                            cfg.getSchema().getTuningConfig().getVersion(), null, null, null,
                            new NumberedShardSpec(bucket.partitionNum, 5000), -1, -1),
                    JobHelper.DESCRIPTOR_JSON, hdfsDataSegmentPusher);
    Assert.assertEquals(
            "hdfs://server:9100/tmp/druid/datatest/source/20120710T050000.000Z_20120710T060000.000Z/some_brand_new_version"
                    + "/4712_descriptor.json",
            path.toString());

    path = JobHelper
            .makeTmpPath(new Path(cfg.getSchema().getIOConfig().getSegmentOutputPath()),
                    new DistributedFileSystem(),
                    new DataSegment(cfg.getSchema().getDataSchema().getDataSource(),
                            cfg.getSchema().getDataSchema().getGranularitySpec().bucketInterval(bucket.time)
                                    .get(),
                            cfg.getSchema().getTuningConfig().getVersion(), null, null, null,
                            new NumberedShardSpec(bucket.partitionNum, 5000), -1, -1),
                    new TaskAttemptID("abc", 123, TaskType.REDUCE, 1, 0), hdfsDataSegmentPusher);
    Assert.assertEquals(
            "hdfs://server:9100/tmp/druid/datatest/source/20120710T050000.000Z_20120710T060000.000Z/some_brand_new_version"
                    + "/4712_index.zip.0",
            path.toString());

}

From source file:org.apache.carbondata.core.carbon.datastorage.filesystem.AlluxioCarbonFileTest.java

License:Apache License

@Test
public void testListFilesForNullListStatus() {
    alluxioCarbonFile = new AlluxioCarbonFile(fileStatusWithOutDirectoryPermission);
    new MockUp<Path>() {
        @Mock/*  w  w w . j a va  2 s . c  o m*/
        public FileSystem getFileSystem(Configuration conf) throws IOException {
            return new DistributedFileSystem();
        }

    };
    new MockUp<DistributedFileSystem>() {
        @Mock
        public FileStatus[] listStatus(Path var1) throws IOException {

            return null;
        }

    };
    alluxioCarbonFile = new AlluxioCarbonFile(fileStatus);
    assertTrue(alluxioCarbonFile.listFiles().length == 0);
}

From source file:org.apache.carbondata.core.carbon.datastorage.filesystem.AlluxioCarbonFileTest.java

License:Apache License

@Test
public void testListDirectory() {
    alluxioCarbonFile = new AlluxioCarbonFile(fileStatus);
    new MockUp<Path>() {
        @Mock//from  w  w w.j ava2 s  .  c om
        public FileSystem getFileSystem(Configuration conf) throws IOException {
            return new DistributedFileSystem();
        }

    };
    new MockUp<DistributedFileSystem>() {
        @Mock
        public FileStatus[] listStatus(Path var1) throws IOException {

            FileStatus[] fileStatus = new FileStatus[] {
                    new FileStatus(12L, true, 60, 120l, 180L, new Path(fileName)) };
            return fileStatus;
        }

    };

    assertTrue(alluxioCarbonFile.listFiles().length == 1);
}

From source file:org.apache.carbondata.core.carbon.datastorage.filesystem.AlluxioCarbonFileTest.java

License:Apache License

@Test
public void testlistFilesWithoutFilter() {
    CarbonFileFilter carbonFileFilter = new CarbonFileFilter() {

        @Override/*  ww w.  j a v  a  2  s.co m*/
        public boolean accept(CarbonFile file) {
            return false;
        }
    };
    new MockUp<Path>() {
        @Mock
        public FileSystem getFileSystem(Configuration conf) throws IOException {
            return new DistributedFileSystem();
        }

    };
    new MockUp<DistributedFileSystem>() {
        @Mock
        public FileStatus[] listStatus(Path var1) throws IOException {

            FileStatus[] fileStatus = new FileStatus[] {
                    new FileStatus(12L, true, 60, 120l, 180L, new Path(fileName)) };
            return fileStatus;
        }

    };
    alluxioCarbonFile = new AlluxioCarbonFile(fileStatus);
    assertTrue(alluxioCarbonFile.listFiles(carbonFileFilter).length == 0);
}

From source file:org.apache.carbondata.core.carbon.datastorage.filesystem.AlluxioCarbonFileTest.java

License:Apache License

@Test
public void testGetParentFile() {
    new MockUp<Path>() {
        @Mock/*w  ww.ja va  2 s.  c o  m*/
        public FileSystem getFileSystem(Configuration conf) throws IOException {
            return new DistributedFileSystem();
        }

    };
    new MockUp<Path>() {
        @Mock
        public Path getParent() {
            return new Path(file.getAbsolutePath());
        }

    };
    new MockUp<FileStatus>() {
        @Mock
        public Path getPath() {
            return new Path(file.getAbsolutePath());
        }

    };
    new MockUp<DistributedFileSystem>() {
        @Mock
        public FileStatus getFileStatus(Path path) throws IOException {

            return new FileStatus(12L, true, 60, 120l, 180L, new Path(file.getAbsolutePath()));
        }

    };

    alluxioCarbonFile = new AlluxioCarbonFile(fileStatus);
    assertFalse(alluxioCarbonFile.getParentFile().equals(null));
}

From source file:org.apache.carbondata.core.carbon.datastorage.filesystem.AlluxioCarbonFileTest.java

License:Apache License

@Test
public void testrenameForceForDisributedSystem() {
    new MockUp<Path>() {
        @Mock// w  w w .  j  a v  a 2  s  .  c om
        public FileSystem getFileSystem(Configuration conf) throws IOException {
            return new DistributedFileSystem();
        }

    };
    new MockUp<DistributedFileSystem>() {
        @Mock
        public void rename(Path src, Path dst, final Options.Rename... options) throws IOException {

        }

    };

    alluxioCarbonFile = new AlluxioCarbonFile(fileStatus);
    assertTrue(alluxioCarbonFile.renameForce(fileName));

}