Example usage for org.apache.hadoop.hdfs HdfsConfiguration HdfsConfiguration

List of usage examples for org.apache.hadoop.hdfs HdfsConfiguration HdfsConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs HdfsConfiguration HdfsConfiguration.

Prototype

public HdfsConfiguration() 

Source Link

Usage

From source file:INotifyUtil.java

License:Apache License

/**
 * Poll events and output the details.//from  w  ww.  j  a v  a2 s.  co m
 * Ctrl + C to stop polling.
 * @param args the parameter is not used.
 * @throws IOException if configuration error or I/O error happens.
 */
public static void main(String args[]) throws IOException {
    Configuration conf = new HdfsConfiguration();
    DFSClient client = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
    DFSInotifyEventInputStream iStream = client.getInotifyEventStream();
    while (true) {
        try {
            EventBatch eventBatch = iStream.take();
            for (Event event : eventBatch.getEvents()) {
                System.out.println(event.toString());
            }
        } catch (InterruptedException e) {
            System.out.println("Interrupted. Exiting...");
            return;
        } catch (MissingEventsException e) {
            e.printStackTrace();
            return;
        }
    }
}

From source file:co.cask.cdap.common.logging.SyncTest.java

License:Apache License

@Before
public void startDFS() throws IOException {

    File dfsPath = tempFolder.newFolder();
    System.setProperty("test.build.data", dfsPath.toString());
    System.setProperty("test.cache.data", dfsPath.toString());
    System.out.println("Starting up Mini DFS cluster...");
    config = new HdfsConfiguration();
    // config.setInt("dfs.block.size", 4 * 1024);
    dfsCluster = new MiniDFSCluster.Builder(config)
            //    .nameNodePort(0)
            .numDataNodes(2).format(true)
            //    .manageDataDfsDirs(true)
            //    .manageNameDfsDirs(true)
            .build();//from   w  ww. j  ava2 s .  c o  m
    dfsCluster.waitClusterUp();
    System.out.println("Mini DFS is started.");
    config.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString());
}

From source file:co.cask.cdap.gateway.runtime.Main.java

License:Apache License

@Override
public void init(String[] args) {
    // Load our configuration from our resource files
    CConfiguration cConf = CConfiguration.create();
    Configuration hConf = HBaseConfiguration.create(new HdfsConfiguration());

    // Set the HTTP keep alive max connection property to allow more keep-alive connections
    if (System.getProperty("http.maxConnections") == null) {
        System.setProperty("http.maxConnections", cConf.get(Constants.Gateway.STREAM_FLUME_THREADS));
    }/*from ww  w  .  ja  va 2 s. co  m*/

    String zookeeper = cConf.get(Constants.Zookeeper.QUORUM);
    if (zookeeper == null) {
        LOG.error("No zookeeper quorum provided.");
        throw new IllegalStateException("No zookeeper quorum provided.");
    }

    // Set up our Guice injections
    Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new AuthModule(), new IOModule(),
            new ZKClientModule(), new KafkaClientModule(), new LocationRuntimeModule().getDistributedModules(),
            new DiscoveryRuntimeModule().getDistributedModules(),
            new MetricsClientRuntimeModule().getDistributedModules(),
            new GatewayModule().getDistributedModules(), new DataFabricModules().getDistributedModules(),
            new DataSetsModules().getDistributedModule(), new LoggingModules().getDistributedModules());

    zkClientService = injector.getInstance(ZKClientService.class);
    kafkaClientService = injector.getInstance(KafkaClientService.class);

    // Get the metrics collection service
    metricsCollectionService = injector.getInstance(MetricsCollectionService.class);

    // Get our fully wired Flume Collector
    flumeCollector = injector.getInstance(NettyFlumeCollector.class);

}

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private Configuration startMiniHadoop() throws Exception {
    int clusterNodes = getConf().getInt(MINI_CLUSTER_NODES_KEY, 1);
    if (System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA) == null) {
        String testBuildData = new File("target").getAbsolutePath();
        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testBuildData);
    }/*from   w  w  w. j a va2  s .c  o  m*/
    //to trigger hdfs-site.xml registration as default resource
    new HdfsConfiguration();
    Configuration conf = new YarnConfiguration();
    String llamaProxyUser = System.getProperty("user.name");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".hosts", "*");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".groups", "*");
    String[] userGroups = new String[] { "g" };
    UserGroupInformation.createUserForTesting(llamaProxyUser, userGroups);

    int hdfsPort = 0;
    String fsUri = conf.get("fs.defaultFS");
    if (fsUri != null && !fsUri.equals("file:///")) {
        int i = fsUri.lastIndexOf(":");
        if (i > -1) {
            try {
                hdfsPort = Integer.parseInt(fsUri.substring(i + 1));
            } catch (Exception ex) {
                throw new RuntimeException(
                        "Could not parse port from Hadoop's " + "'fs.defaultFS property: " + fsUri);
            }
        }
    }
    miniHdfs = new MiniDFSCluster(hdfsPort, conf, clusterNodes, !skipDfsFormat, true, null, null);
    miniHdfs.waitActive();
    conf = miniHdfs.getConfiguration(0);
    miniYarn = new MiniYARNCluster("minillama", clusterNodes, 1, 1);
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

    miniYarn.init(conf);
    miniYarn.start();
    conf = miniYarn.getConfig();

    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    return conf;
}

From source file:com.github.sakserv.minicluster.impl.KdcLocalClusterHdfsIntegrationTest.java

License:Apache License

@BeforeClass
public static void setUp() throws Exception {

    //System.setProperty("sun.security.krb5.debug", "true");

    // KDC/*w  ww  . ja  va2  s .  c  o m*/
    kdcLocalCluster = new KdcLocalCluster.Builder()
            .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_PORT_KEY)))
            .setHost(propertyParser.getProperty(ConfigVars.KDC_HOST_KEY))
            .setBaseDir(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY))
            .setOrgDomain(propertyParser.getProperty(ConfigVars.KDC_ORG_DOMAIN_KEY))
            .setOrgName(propertyParser.getProperty(ConfigVars.KDC_ORG_NAME_KEY))
            .setPrincipals(propertyParser.getProperty(ConfigVars.KDC_PRINCIPALS_KEY).split(","))
            .setKrbInstance(propertyParser.getProperty(ConfigVars.KDC_KRBINSTANCE_KEY))
            .setInstance(propertyParser.getProperty(ConfigVars.KDC_INSTANCE_KEY))
            .setTransport(propertyParser.getProperty(ConfigVars.KDC_TRANSPORT))
            .setMaxTicketLifetime(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_TICKET_LIFETIME_KEY)))
            .setMaxRenewableLifetime(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_RENEWABLE_LIFETIME)))
            .setDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.KDC_DEBUG))).build();
    kdcLocalCluster.start();

    Configuration baseConf = kdcLocalCluster.getBaseConf();

    //HDFS
    Configuration hdfsConfig = new HdfsConfiguration();
    hdfsConfig.addResource(baseConf);
    hdfsLocalCluster = new HdfsLocalCluster.Builder()
            .setHdfsNamenodePort(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_PORT_KEY)))
            .setHdfsNamenodeHttpPort(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY)))
            .setHdfsTempDir(propertyParser.getProperty(ConfigVars.HDFS_TEMP_DIR_KEY))
            .setHdfsNumDatanodes(
                    Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NUM_DATANODES_KEY)))
            .setHdfsEnablePermissions(
                    Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_ENABLE_PERMISSIONS_KEY)))
            .setHdfsFormat(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_FORMAT_KEY)))
            .setHdfsEnableRunningUserAsProxyUser(Boolean.parseBoolean(
                    propertyParser.getProperty(ConfigVars.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER)))
            .setHdfsConfig(hdfsConfig).build();
    hdfsLocalCluster.start();
}

From source file:com.mellanox.r4h.MiniDFSClusterManager.java

License:Apache License

/**
 * Parses arguments and fills out the member variables.
 * /*from   w w w. j  a  v a 2  s .c o  m*/
 * @param args
 *            Command-line arguments.
 * @return true on successful parse; false to indicate that the
 *         program should exit.
 */
private boolean parseArguments(String[] args) {
    Options options = makeOptions();
    CommandLine cli;
    try {
        CommandLineParser parser = new GnuParser();
        cli = parser.parse(options, args);
    } catch (ParseException e) {
        LOG.warn("options parsing failed:  " + e.getMessage());
        new HelpFormatter().printHelp("...", options);
        return false;
    }

    if (cli.hasOption("help")) {
        new HelpFormatter().printHelp("...", options);
        return false;
    }

    if (cli.getArgs().length > 0) {
        for (String arg : cli.getArgs()) {
            LOG.error("Unrecognized option: " + arg);
            new HelpFormatter().printHelp("...", options);
            return false;
        }
    }

    // HDFS
    numDataNodes = intArgument(cli, "datanodes", 1);
    nameNodePort = intArgument(cli, "nnport", 0);
    if (cli.hasOption("format")) {
        dfsOpts = StartupOption.FORMAT;
        format = true;
    } else {
        dfsOpts = StartupOption.REGULAR;
        format = false;
    }

    // Runner
    writeDetails = cli.getOptionValue("writeDetails");
    writeConfig = cli.getOptionValue("writeConfig");

    // General
    conf = new HdfsConfiguration();
    updateConfiguration(conf, cli.getOptionValues("D"));

    return true;
}

From source file:com.mellanox.r4h.TestFSOutputSummer.java

License:Apache License

private void doTestFSOutputSummer(String checksumType) throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    fileSys = cluster.getFileSystem();// ww w  .j  a va2  s.  c o m
    try {
        Path file = new Path("try.dat");
        Random rand = new Random(seed);
        rand.nextBytes(expected);
        writeFile1(file);
        writeFile2(file);
        writeFile3(file);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestFSOutputSummer.java

License:Apache License

@Test
public void TestDFSCheckSumType() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    fileSys = cluster.getFileSystem();// w  ww.  j  av a 2  s  .co m
    try {
        Path file = new Path("try.dat");
        Random rand = new Random(seed);
        rand.nextBytes(expected);
        writeFile1(file);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * The test uses//w w  w .  j  av a2 s.  c  om
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a standard block size
 */
@Test
public void hFlush_01() throws IOException {
    doTheJob(new HdfsConfiguration(), fName, MiniDFSClusterBridge.getAppendTestUtil_BLOCK_SIZE(), (short) 2,
            false, EnumSet.noneOf(SyncFlag.class));
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * The test uses/*from   ww w  . ja  va 2 s  .  c  o m*/
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a custom block size so the writes will be 
 * happening across block' boundaries
 */
@Test
public void hFlush_02() throws IOException {
    Configuration conf = new HdfsConfiguration();
    int customPerChecksumSize = 512;
    int customBlockSize = customPerChecksumSize * 3;
    // Modify defaul filesystem settings
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

    doTheJob(conf, fName, customBlockSize, (short) 2, false, EnumSet.noneOf(SyncFlag.class));
}