Example usage for org.apache.hadoop.fs FileSystem getHomeDirectory

List of usage examples for org.apache.hadoop.fs FileSystem getHomeDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getHomeDirectory.

Prototype

public Path getHomeDirectory() 

Source Link

Document

Return the current user's home directory in this FileSystem.

Usage

From source file:PerformanceEvaluation.java

License:Apache License

private void runTest(final Class<? extends Test> cmd)
        throws IOException, InterruptedException, ClassNotFoundException {
    MiniHBaseCluster hbaseMiniCluster = null;
    MiniDFSCluster dfsCluster = null;//from   w w  w  .  jav a  2 s  . c  o  m
    MiniZooKeeperCluster zooKeeperCluster = null;
    if (this.miniCluster) {
        dfsCluster = new MiniDFSCluster(conf, 2, true, (String[]) null);
        zooKeeperCluster = new MiniZooKeeperCluster();
        int zooKeeperPort = zooKeeperCluster.startup(new File(System.getProperty("java.io.tmpdir")));

        // mangle the conf so that the fs parameter points to the minidfs we
        // just started up
        FileSystem fs = dfsCluster.getFileSystem();
        conf.set("fs.default.name", fs.getUri().toString());
        conf.set("hbase.zookeeper.property.clientPort", Integer.toString(zooKeeperPort));
        Path parentdir = fs.getHomeDirectory();
        conf.set(HConstants.HBASE_DIR, parentdir.toString());
        fs.mkdirs(parentdir);
        FSUtils.setVersion(fs, parentdir);
        hbaseMiniCluster = new MiniHBaseCluster(this.conf, N);
    }

    try {
        if (N == 1) {
            // If there is only one client and one HRegionServer, we assume nothing
            // has been set up at all.
            runNIsOne(cmd);
        } else {
            // Else, run
            runNIsMoreThanOne(cmd);
        }
    } finally {
        if (this.miniCluster) {
            if (hbaseMiniCluster != null)
                hbaseMiniCluster.shutdown();
            if (zooKeeperCluster != null)
                zooKeeperCluster.shutdown();
            HBaseTestCase.shutdownDfs(dfsCluster);
        }
    }
}

From source file:JavaCustomReceiver.java

License:Apache License

/** Create a socket connection and receive data until receiver is stopped */
private void receive() {
    Socket socket = null;// www.ja v a  2 s .  c o m
    String userInput = null;

    try {
        // connect to the server
        socket = new Socket(host, port);

        //   BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream()));

        //      Path pt=new Path("hdfs://192.168.0.1:9000/equinox-sanjose.20120119-netflow.txt");
        //      FileSystem fs = FileSystem.get(new Configuration());
        //      BufferedReader in=new BufferedReader(new InputStreamReader(fs.open(pt)));
        Path pt = new Path("hdfs://192.168.0.1:9000/user/hduser/equinox-sanjose.20120119-netflow.txt");

        Configuration conf = new Configuration();
        conf.addResource(new Path("/usr/local/hadoop/conf/core-site.xml"));
        conf.addResource(new Path("/usr/local/hadoop/conf/hdfs-site.xml"));
        //      FileSystem fs = FileSystem.get(conf);
        FileSystem fs = pt.getFileSystem(conf);
        System.out.println(fs.getHomeDirectory());
        BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(pt)));

        //      BufferedReader in = new BufferedReader(
        //            new FileReader(
        //                  "/home/hduser/spark_scratchPad/equinox-sanjose.20120119-netflow.txt"));
        //      
        // Until stopped or connection broken continue reading
        while (!isStopped() && (userInput = in.readLine()) != null) {
            System.out.println("Received data '" + userInput + "'");
            store(userInput);
        }
        in.close();
        socket.close();

        // Restart in an attempt to connect again when server is active again
        restart("Trying to connect again");
    } catch (ConnectException ce) {
        // restart if could not connect to server
        restart("Could not connect", ce);
    } catch (Throwable t) {
        restart("Error receiving data", t);
    }
}

From source file:JaqlShell.java

License:Apache License

/**
 * @param dir/*w ww. j  a va 2 s .  c o  m*/
 * @param numNodes
 * @param format
 * @throws Exception
 */
public void init(String dir, int numNodes) throws Exception {
    String vInfo = VersionInfo.getVersion();
    System.setProperty("test.build.data", dir);
    m_conf = new Configuration();

    // setup conf according to the Hadoop version
    if (vInfo.indexOf("0.20") < 0) {
        throw new Exception("Unsupported Hadoop version: " + vInfo);
    }

    // setup the mini dfs cluster
    m_fs = new MiniDFSCluster(m_conf, numNodes, true, (String[]) null);
    FileSystem filesystem = m_fs.getFileSystem();
    m_conf.set("fs.default.name", filesystem.getUri().toString());
    Path parentdir = filesystem.getHomeDirectory();
    filesystem.mkdirs(parentdir);
    //FSUtils.setVersion(filesystem, parentdir);

    // setup hbase cluster (only if OS is not windows)
    //    if(!System.getProperty("os.name").toLowerCase().contains("win")) {
    //      m_conf.set(HConstants.HBASE_DIR, parentdir.toString());      
    //      Path hdfsTestDir = filesystem.makeQualified(new Path(m_conf.get(HConstants.HBASE_DIR)));
    //
    //      // prime the hdfs for hbase information...
    //      HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, hdfsTestDir, (HBaseConfiguration)m_conf);
    //      HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, hdfsTestDir, (HBaseConfiguration)m_conf);
    //      HRegion.addRegionToMETA(root, meta);
    //
    //      // ... and close the root and meta
    //      if (meta != null) {
    //        meta.close();
    //        meta.getLog().closeAndDelete();
    //      }
    //      if (root != null) {
    //        root.close();
    //        root.getLog().closeAndDelete();
    //      }
    //
    //      try
    //      {
    //        this.zooKeeperCluster = new MiniZooKeeperCluster();
    //        File testDir = new File(dir);
    //        int clientPort = this.zooKeeperCluster.startup(testDir);
    //        m_conf.set("hbase.zookeeper.property.clientPort", Integer.toString(clientPort));
    //      } catch(Exception e) {
    //        LOG.error("Unable to startup zookeeper");
    //        throw new IOException(e);
    //      }
    //      try {
    //        // start the mini cluster
    //        m_base = new MiniHBaseCluster((HBaseConfiguration)m_conf, numNodes);
    //      } catch(Exception e) {
    //        LOG.error("Unable to startup hbase");
    //        throw new IOException(e);
    //      }
    //      try {
    //        // opening the META table ensures that cluster is running
    //        new HTable((HBaseConfiguration)m_conf, HConstants.META_TABLE_NAME);        
    //
    //        //setupOverride(conf);
    //      }
    //      catch (Exception e)
    //      {
    //        LOG.warn("Could not verify that hbase is up", e);
    //      }
    //      setupOverride();
    //    }

    m_mr = startMRCluster(numNodes, m_fs.getFileSystem().getName(), m_conf);

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);

    // make the home directory if it does not exist
    Path hd = fs.getWorkingDirectory();
    if (!fs.exists(hd))
        fs.mkdirs(hd);

    // make the $USER/_temporary directory if it does not exist
    Path tmpPath = new Path(hd, "_temporary");
    if (!fs.exists(tmpPath))
        fs.mkdirs(tmpPath);

    //    if (m_base != null)
    //    {
    //      try {
    //        m_admin = new HBaseAdmin((HBaseConfiguration) m_conf);
    //        HTableDescriptor[] tables = m_admin.listTables();
    //        if (tables != null)
    //        {
    //          for (int i = 0; i < tables.length; i++)
    //          {
    //            m_admin.enableTable(tables[i].getName());
    //          }
    //        }
    //      } catch(Exception e) {
    //        LOG.warn("failed to enable hbase tables");
    //      }
    //    }
}

From source file:azkaban.viewer.hdfs.HdfsBrowserServlet.java

License:Apache License

private String getHomeDir(FileSystem fs) {
    String homeDirString = fs.getHomeDirectory().toString();
    if (homeDirString.startsWith("file:")) {
        return homeDirString.substring("file:".length());
    }/*w w  w. j  av  a 2  s.c  o  m*/
    return homeDirString.substring(fs.getUri().toString().length());
}

From source file:classTest.fileTest.java

public static void main(String args[]) throws IOException {
    hbaseDB connHB = new hbaseDB("/Users/andresbenitez/Documents/app/ABTViewer3/srvConf.properties", "HBConf2");

    FileSystem hdfs = org.apache.hadoop.fs.FileSystem.get(connHB.getHcfg());

    JOptionPane.showMessageDialog(null, hdfs.getHomeDirectory().toString());

    JOptionPane.showMessageDialog(null, hdfs.getWorkingDirectory());

    hdfs.setWorkingDirectory(new Path("hdfs://hortonserver.com:8020/user/guest/"));

    System.out.println(hdfs.getWorkingDirectory().toString());

    String dirName = "TestDirectory";
    Path destPath = new Path(
            "hdfs://hortonserver.e-contact.cl:8020/user/guest/20160413_000118_00011008887674_98458726_TTR42-1460516478.154581.WAV");
    Path sr1 = new Path("hdfs://hortonserver.com:8020/user/guest/Test");

    //hdfs.mkdirs(sr1);

    //FileSystem lhdfs = LocalFileSystem.get(hbconf);

    //System.out.println(lhdfs.getWorkingDirectory().toString());
    //System.out.println(hdfs.getWorkingDirectory().toString());

    //Path sourcePath = new Path("/Users/andresbenitez/Documents/Apps/test.txt");

    //Path destPath = new Path("/Users/andresbenitez/Documents/Apps/test4.txt");

    //hdfs.copyFromLocalFile(sourcePath, destPath);

    //hdfs.copyToLocalFile(false, new Path("hdfs://sandbox.hortonworks.com:8020/user/guest/installupload.log"), new Path("/Users/andresbenitez/Documents/instaldown3.log"), true);

    //hdfs.copyToLocalFile(false, new Path("/Users/andresbenitez/Documents/instaldown.log"), new Path("hdfs://sandbox.hortonworks.com:8020/user/guest/installupload.log"), false);

    //File f=new File("http://srv-gui-g.e-contact.cl/e-recorder/audio/20160413/08/01_20160413_084721_90010990790034__1460548041.4646.wav");
    URL url = new URL(
            "http://grabacionesclaro.e-contact.cl/2011/2016041300/20160413_000118_00011008887674_98458726_TTR42-1460516478.154581.WAV");

    File filePaso = new File("/Users/andresbenitez/Documents/paso/JOJOJO.WAV");

    File f2 = new File(
            "/grabacionesclaro.e-contact.cl/2011/2016041300/20160413_000118_00011008887674_98458726_TTR42-1460516478.154581.WAV");

    org.apache.commons.io.FileUtils.copyURLToFile(url, filePaso);

    //org.apache.commons.io.FileUtils.copyFile(f2, filePaso);

    //&hdfs.copyToLocalFile(false, new Path("/Users/andresbenitez/Documents/paso/JOJOJO.mp3"), destPath);

    //hdfs.copyFromLocalFile(false, new Path("/Users/andresbenitez/Documents/paso/JOJOJO.WAV"), destPath);

}

From source file:cn.edu.buaa.act.petuumOnYarn.ApplicationMaster.java

License:Apache License

private void processMachineFile(List<String> allocatedIpList) {
    try {//from w ww.  ja v a 2  s  .  c o m
        String text = "";
        String lineTxt = "";
        for (int i = 0; i < allocatedIpList.size(); i++) {
            lineTxt = i + " " + allocatedIpList.get(i) + " " + startPort;
            text = text + lineTxt + "\n";
        }
        LOG.info("server text:" + text.trim());
        FileSystem fs = FileSystem.get(conf);
        if (petuumHDFSPathPrefix.equals("")) {
            hostfileHDFSPath = new Path(fs.getHomeDirectory(), hostfileIdentifier).toUri().toString();
        } else {
            hostfileHDFSPath = new Path(fs.getHomeDirectory(), petuumHDFSPathPrefix + hostfileIdentifier)
                    .toUri().toString();
        }
        LOG.info("Hostfile being writen to " + hostfileHDFSPath);
        YarnUtil.writeFileHDFS(fs, hostfileHDFSPath, text.trim());
    } catch (Exception e) {
        System.out.println("read file error");
        e.printStackTrace();
    }
}

From source file:co.cask.tigon.data.hbase.HBaseTestBase.java

License:Apache License

public Path createHBaseRootDir(Configuration conf) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    Path hbaseRootdir = new Path(fs.makeQualified(fs.getHomeDirectory()), "hbase");
    conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
    fs.mkdirs(hbaseRootdir);/*from ww  w.j  a va  2  s . co  m*/
    FSUtils.setVersion(fs, hbaseRootdir);
    return hbaseRootdir;
}

From source file:co.nubetech.hiho.common.HihoTestCase.java

License:Apache License

public void createTextFileInHDFS(String inputData, String filePath, String nameOfFile) throws IOException {
    FileSystem fs = getFileSystem();
    FSDataOutputStream out = null;/* w w w .  j  a va  2  s  . c o m*/
    Path inputFile = new Path(filePath + "/" + nameOfFile);
    try {
        out = fs.create(inputFile, false);
        out.write(inputData.getBytes(), 0, inputData.getBytes().length);
        out.close();
        out = null;
        // Cheking input file exists or not.
        Path inputPath = new Path(fs.getHomeDirectory(), filePath + "/" + nameOfFile);
        assertTrue(fs.exists(inputPath));
    } finally {
        if (out != null) {
            out.close();
        }
    }
}

From source file:co.nubetech.hiho.dedup.TestDedupJob.java

License:Apache License

@Test
public void testDedupByValueWithDelimitedTextInputFormat() throws Exception {
    final String inputData1 = "Xavier Wilson,Mason Holloway,Carlos Johnston,Martin Noel,Drake Mckinney\n"
            + "Drake Mckinney,Murphy Baird,Theodore Lindsey,Nehru Wilcox,Harper Klein\n"
            + "Kennedy Bailey,Jerome Perry,David Cabrera,Edan Fleming,Orlando Tyson";
    final String inputData2 = "Zephania Bauer,Jermaine Gordon,Vincent Moon,Steven Pierce,Jasper Campos\n"
            + "Drake Mckinney,Murphy Baird,Theodore Lindsey,Nehru Wilcox,Harper Klein\n"
            + "Kennedy Bailey,Plato Atkinson,Stuart Guy,Rooney Levy,Judah Benson";
    createTextFileInHDFS(inputData1, "/input1", "testFile1.txt");
    createTextFileInHDFS(inputData2, "/input2", "testFile2.txt");
    String[] args = new String[] { "-inputFormat", "co.nubetech.hiho.dedup.DelimitedTextInputFormat",
            "-inputKeyClassName", "org.apache.hadoop.io.Text", "-inputValueClassName",
            "org.apache.hadoop.io.Text", "-inputPath", "/input1,/input2", "-outputPath", "output", "-delimeter",
            ",", "-column", "1", "-dedupBy", "value" };
    DedupJob job = runDedupJob(args);/*w w w .  j a  va 2s  .  co m*/
    assertEquals(6, job.getTotalRecordsRead());
    assertEquals(0, job.getBadRecords());
    assertEquals(5, job.getOutput());
    assertEquals(1, job.getDuplicateRecords());

    FileSystem outputFS = getFileSystem();
    Path outputPath = new Path(outputFS.getHomeDirectory(), "output");
    FileStatus[] status = outputFS.listStatus(outputPath, getOutputPathFilter());
    assertTrue(outputFS.exists(outputPath));
    List<String> expectedOutput = new ArrayList<String>();
    expectedOutput.add("Xavier Wilson,Mason Holloway,Carlos Johnston,Martin Noel,Drake Mckinney");
    expectedOutput.add("Zephania Bauer,Jermaine Gordon,Vincent Moon,Steven Pierce,Jasper Campos");
    expectedOutput.add("Drake Mckinney,Murphy Baird,Theodore Lindsey,Nehru Wilcox,Harper Klein");
    expectedOutput.add("Kennedy Bailey,Jerome Perry,David Cabrera,Edan Fleming,Orlando Tyson");
    expectedOutput.add("Kennedy Bailey,Plato Atkinson,Stuart Guy,Rooney Levy,Judah Benson");
    int count = 0;
    for (FileStatus fileStat : status) {
        logger.debug("File status is " + fileStat.getPath() + " and is it a dir? " + fileStat.isDirectory());
        FSDataInputStream in = outputFS.open(fileStat.getPath());
        String line = null;
        while ((line = in.readLine()) != null) {
            logger.debug("Output is " + line);
            assertTrue("Matched output " + line, expectedOutput.contains(line));
            expectedOutput.remove(line);
            count++;
        }
        in.close();
    }
    assertEquals(5, count);
}

From source file:co.nubetech.hiho.dedup.TestDedupJob.java

License:Apache License

@Test
public void testDedupByValueWithTextInputFormat() throws Exception {
    final String inputData1 = "Xavier Wilson,Mason Holloway,Carlos Johnston,Martin Noel,Drake Mckinney\n"
            + "Drake Mckinney,Murphy Baird,Theodore Lindsey,Nehru Wilcox,Harper Klein\n"
            + "Kennedy Bailey,Jerome Perry,David Cabrera,Edan Fleming,Orlando Tyson";
    final String inputData2 = "Zephania Bauer,Jermaine Gordon,Vincent Moon,Steven Pierce,Jasper Campos\n"
            + "Drake Mckinney,Murphy Baird,Theodore Lindsey,Nehru Wilcox,Harper Klein\n"
            + "Kennedy Bailey,Plato Atkinson,Stuart Guy,Rooney Levy,Judah Benson";
    createTextFileInHDFS(inputData1, "/input1", "testFile1.txt");
    createTextFileInHDFS(inputData2, "/input2", "testFile2.txt");
    String[] args = new String[] { "-inputFormat", "org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
            "-inputPath", "/input1,/input2", "-outputPath", "output", "-outputFormat",
            "co.nubetech.hiho.mapreduce.lib.output.NoKeyOnlyValueOutputFormat", "-dedupBy", "value" };
    DedupJob job = runDedupJob(args);/*  w w w  . j a va 2s . com*/
    assertEquals(6, job.getTotalRecordsRead());
    assertEquals(0, job.getBadRecords());
    assertEquals(5, job.getOutput());
    assertEquals(1, job.getDuplicateRecords());

    FileSystem outputFS = getFileSystem();
    Path outputPath = new Path(outputFS.getHomeDirectory(), "output");
    FileStatus[] status = outputFS.listStatus(outputPath, getOutputPathFilter());
    assertTrue(outputFS.exists(outputPath));
    List<String> expectedOutput = new ArrayList<String>();
    expectedOutput.add("Xavier Wilson,Mason Holloway,Carlos Johnston,Martin Noel,Drake Mckinney");
    expectedOutput.add("Zephania Bauer,Jermaine Gordon,Vincent Moon,Steven Pierce,Jasper Campos");
    expectedOutput.add("Drake Mckinney,Murphy Baird,Theodore Lindsey,Nehru Wilcox,Harper Klein");
    expectedOutput.add("Kennedy Bailey,Jerome Perry,David Cabrera,Edan Fleming,Orlando Tyson");
    expectedOutput.add("Kennedy Bailey,Plato Atkinson,Stuart Guy,Rooney Levy,Judah Benson");
    int count = 0;
    for (FileStatus fileStat : status) {
        logger.debug("File status is " + fileStat.getPath() + " and is it a dir? " + fileStat.isDirectory());
        FSDataInputStream in = outputFS.open(fileStat.getPath());
        String line = null;
        while ((line = in.readLine()) != null) {
            logger.debug("Output is " + line);
            assertTrue("Matched output " + line, expectedOutput.contains(line));
            expectedOutput.remove(line);
            count++;
        }
        in.close();
    }
    assertEquals(5, count);
}