Example usage for org.apache.hadoop.fs FileSystem setWorkingDirectory

List of usage examples for org.apache.hadoop.fs FileSystem setWorkingDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setWorkingDirectory.

Prototype

public abstract void setWorkingDirectory(Path new_dir);

Source Link

Document

Set the current working directory for the given FileSystem.

Usage

From source file:classTest.fileTest.java

public static void main(String args[]) throws IOException {
    hbaseDB connHB = new hbaseDB("/Users/andresbenitez/Documents/app/ABTViewer3/srvConf.properties", "HBConf2");

    FileSystem hdfs = org.apache.hadoop.fs.FileSystem.get(connHB.getHcfg());

    JOptionPane.showMessageDialog(null, hdfs.getHomeDirectory().toString());

    JOptionPane.showMessageDialog(null, hdfs.getWorkingDirectory());

    hdfs.setWorkingDirectory(new Path("hdfs://hortonserver.com:8020/user/guest/"));

    System.out.println(hdfs.getWorkingDirectory().toString());

    String dirName = "TestDirectory";
    Path destPath = new Path(
            "hdfs://hortonserver.e-contact.cl:8020/user/guest/20160413_000118_00011008887674_98458726_TTR42-1460516478.154581.WAV");
    Path sr1 = new Path("hdfs://hortonserver.com:8020/user/guest/Test");

    //hdfs.mkdirs(sr1);

    //FileSystem lhdfs = LocalFileSystem.get(hbconf);

    //System.out.println(lhdfs.getWorkingDirectory().toString());
    //System.out.println(hdfs.getWorkingDirectory().toString());

    //Path sourcePath = new Path("/Users/andresbenitez/Documents/Apps/test.txt");

    //Path destPath = new Path("/Users/andresbenitez/Documents/Apps/test4.txt");

    //hdfs.copyFromLocalFile(sourcePath, destPath);

    //hdfs.copyToLocalFile(false, new Path("hdfs://sandbox.hortonworks.com:8020/user/guest/installupload.log"), new Path("/Users/andresbenitez/Documents/instaldown3.log"), true);

    //hdfs.copyToLocalFile(false, new Path("/Users/andresbenitez/Documents/instaldown.log"), new Path("hdfs://sandbox.hortonworks.com:8020/user/guest/installupload.log"), false);

    //File f=new File("http://srv-gui-g.e-contact.cl/e-recorder/audio/20160413/08/01_20160413_084721_90010990790034__1460548041.4646.wav");
    URL url = new URL(
            "http://grabacionesclaro.e-contact.cl/2011/2016041300/20160413_000118_00011008887674_98458726_TTR42-1460516478.154581.WAV");

    File filePaso = new File("/Users/andresbenitez/Documents/paso/JOJOJO.WAV");

    File f2 = new File(
            "/grabacionesclaro.e-contact.cl/2011/2016041300/20160413_000118_00011008887674_98458726_TTR42-1460516478.154581.WAV");

    org.apache.commons.io.FileUtils.copyURLToFile(url, filePaso);

    //org.apache.commons.io.FileUtils.copyFile(f2, filePaso);

    //&hdfs.copyToLocalFile(false, new Path("/Users/andresbenitez/Documents/paso/JOJOJO.mp3"), destPath);

    //hdfs.copyFromLocalFile(false, new Path("/Users/andresbenitez/Documents/paso/JOJOJO.WAV"), destPath);

}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testWorkingdirectory() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path workingDir = fs.getWorkingDirectory();
    fs.close();/*from w w w  .  j  a  va2  s .c o m*/

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    Path hoopWorkingDir = fs.getWorkingDirectory();
    fs.close();
    Assert.assertEquals(hoopWorkingDir.toUri().getPath(), workingDir.toUri().getPath());

    conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    fs.setWorkingDirectory(new Path("/tmp"));
    workingDir = fs.getWorkingDirectory();
    fs.close();
    Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
}

From source file:com.hdfs.concat.crush.CrushOptionParsingTest.java

License:Apache License

@Before
public void before() throws IOException {
    crush = new Crush();

    JobConf job = new JobConf(false);
    crush.setConf(job);/*ww  w  . j a v a  2s.com*/

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.setInt("mapred.reduce.tasks", 20);
    job.setLong("dfs.block.size", 1024 * 1024 * 64);

    FileSystem fs = FileSystem.get(job);
    fs.setWorkingDirectory(new Path(tmp.getRoot().getAbsolutePath()));

    crush.setFileSystem(fs);
}

From source file:com.ibm.jaql.JaqlScriptTestCase.java

License:Apache License

protected void runScript(Mode mode) throws Exception {
    String testLabel = script + "." + mode;
    try {//w w  w .j  ava  2s  . com
        String runMode = System.getProperty("test." + mode, "true");
        if (!runMode.equals("true")) {
            System.err.println("\nSkipping disabled jaql test " + testLabel + " (test." + mode + "=" + runMode
                    + " != true)\n");
            return;
        }

        String jaqlHome = System.getProperty("jaql.home", ".");
        jaqlHome = new File(jaqlHome).getAbsolutePath().toString().replace('\\', '/') + "/";

        String scriptDir = jaqlHome + getScriptDir();
        String[] moduleDirs = getModuleDirs();
        String queriesName = scriptDir + script + "Queries.txt";
        String goldName = scriptDir + testLabel + ".gold";

        if (!new File(goldName).exists()) {
            // look for the mode-independent gold file
            if (mode == Mode.COUNT) {
                System.err.println("\nSkipping jaql count test " + testLabel + " (no gold file)\n");
                return;
            }
            goldName = scriptDir + script + ".gold";
            if (!new File(goldName).exists()) {
                Assert.fail("\nNo gold file for jaql test " + testLabel + "at path: " + goldName);
                return;
            }
        }

        System.err.println("\nRunning jaql test " + testLabel + "\n");

        String outDir = jaqlHome + "build/test/";
        String workDir = outDir + "run." + testLabel + "/";
        String outName = workDir + testLabel + ".out";
        new File(workDir).mkdirs();

        // Set the default directories
        System.setProperty("jaql.local.dir", workDir);
        Configuration conf = new Configuration();
        LocalFileSystem lfs = FileSystem.getLocal(conf);
        lfs.setWorkingDirectory(new Path(workDir));
        FileSystem fs = FileSystem.get(conf);
        if (!(fs instanceof LocalFileSystem)) {
            String user = UnixUserGroupInformation.login(conf).getUserName();
            fs.setWorkingDirectory(new Path("/temp/" + user + "/com.ibm.jaql/test/" + script));
        }
        // mapred.working.dir is automatically set from the fs, but only once. 
        // When running multiple tests in the same JVM, it only picks up the first setting.
        if (Globals.getJobConf() != null) {
            Globals.getJobConf().setWorkingDirectory(fs.getWorkingDirectory());
        }

        // make tests work the same on windows as unix.
        System.setProperty("line.separator", "\n");
        final FastPrintWriter resultStream = new FastPrintWriter(new FileWriter(outName));
        Reader queryReader = new InputStreamReader(new FileInputStream(queriesName), "UTF-8");
        queryReader = new EchoedReader(queryReader, new FastPrintStream(System.err));
        queryReader = new EchoedReader(queryReader, resultStream);

        ClassLoaderMgr.reset();

        Jaql jaql = new Jaql(queriesName, queryReader);
        jaql.setModulePath(moduleDirs);

        if (mode == Mode.COUNT) {
            final Class<?>[] exprsToCount = new Class<?>[] { AbstractReadExpr.class, AbstractWriteExpr.class,
                    MapReduceBaseExpr.class, };
            jaql.setExplainHandler(new CountExplainHandler(resultStream, exprsToCount));
            jaql.setExplainOnly(true);
        } else if (mode == Mode.DECOMPILE) {
            jaql.setExplainHandler(new DecompileExplainHandler(System.err));
            jaql.setExplainOnly(true);
        }

        jaql.setExceptionHandler(new TestExceptionHandler(resultStream, jaqlHome));
        jaql.enableRewrite(mode != Mode.NO_REWRITE);
        boolean schemaPrinting = "schemaPrinting".equals(script);
        jaql.setJaqlPrinter(new TestPrinter(resultStream, schemaPrinting));

        String extJar = getExtensionJar();
        if (extJar != null)
            jaql.addJar(jaqlHome + extJar);
        jaql.setVar(DATADIR_NAME, DATADIR_VALUE);

        // run the script
        jaql.run();

        // finish up
        jaql.close();
        queryReader.close();
        resultStream.close();

        // compare with expected output
        boolean diff = compareResults(outName, goldName);
        if (diff) {
            String msg = "Found differences during jaql test " + testLabel;
            System.err.println("\n" + msg);
            Assert.fail(msg);
        }

        System.err.println("\nSuccessfully ran jaql test " + testLabel + "\n");
    } catch (Exception e) {
        e.printStackTrace(System.err);
        System.err.println("\n\nFailure of jaql test " + testLabel);
        Assert.fail(e.getMessage());
    }
}

From source file:com.m6d.filecrush.crush.CrushOptionParsingTest.java

License:Apache License

@Before
public void before() throws IOException {
    crush = new Crush();

    JobConf job = new JobConf(false);
    crush.setConf(job);/*from w  w w .  jav  a  2 s  .  c om*/

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.setInt("mapreduce.job.reduces", 20);
    job.setLong("dfs.blocksize", 1024 * 1024 * 64);

    FileSystem fs = FileSystem.get(job);
    fs.setWorkingDirectory(new Path(tmp.getRoot().getAbsolutePath()));

    crush.setFileSystem(fs);
}

From source file:org.anon.smart.d2cache.store.fileStore.hadoop.HadoopStore.java

License:Open Source License

@Override
public void create(String name, Class cls) throws CtxException {
    FileSystem fs = ((HadoopFileStoreConnection) _connection).getHadoopFS();
    String repo = ((HadoopFileStoreConnection) _connection).getHadoopConf().baseDirectory();

    String baseDir = repo + "/" + name;

    try {/*from  ww w  .j a  v a2s.  c o m*/

        Path wDir = new Path(baseDir);
        if (!fs.exists(wDir))
            fs.mkdirs(wDir);
        fs.setWorkingDirectory(wDir);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:org.apache.ignite.internal.processors.hadoop.impl.delegate.HadoopBasicFileSystemFactoryDelegate.java

License:Apache License

/**
 * Internal file system creation routine, invoked in correct class loader context.
 *
 * @param usrName User name./* www .j a v a 2s. c o  m*/
 * @return File system.
 * @throws IOException If failed.
 * @throws InterruptedException if the current thread is interrupted.
 */
protected FileSystem create(String usrName) throws IOException, InterruptedException {
    FileSystem fs = FileSystem.get(fullUri, cfg, usrName);

    if (workDir != null)
        fs.setWorkingDirectory(workDir);

    return fs;
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.delegate.HadoopKerberosFileSystemFactoryDelegate.java

License:Apache License

/** {@inheritDoc} */
@Override/*from   ww  w.  j  a va 2s . c o  m*/
protected FileSystem create(String usrName) throws IOException, InterruptedException {
    UserGroupInformation proxyUgi = UserGroupInformation.createProxyUser(usrName,
            UserGroupInformation.getLoginUser());

    return proxyUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
        @Override
        public FileSystem run() throws Exception {
            FileSystem fs = FileSystem.get(fullUri, cfg);

            if (workDir != null)
                fs.setWorkingDirectory(workDir);

            return fs;
        }
    });
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigInputFormat.java

License:Apache License

@SuppressWarnings({ "unchecked", "rawtypes" })
@Override//from  w ww  . j  a  v  a 2 s .  co  m
public List<InputSplit> getSplits(JobContext jobcontext) throws IOException, InterruptedException {

    Configuration conf = jobcontext.getConfiguration();

    ArrayList<FileSpec> inputs;
    ArrayList<ArrayList<OperatorKey>> inpTargets;
    PigContext pigContext;
    try {
        inputs = (ArrayList<FileSpec>) ObjectSerializer.deserialize(conf.get("pig.inputs"));
        inpTargets = (ArrayList<ArrayList<OperatorKey>>) ObjectSerializer
                .deserialize(conf.get("pig.inpTargets"));
        pigContext = (PigContext) ObjectSerializer.deserialize(conf.get("pig.pigContext"));
        PigContext.setPackageImportList(
                (ArrayList<String>) ObjectSerializer.deserialize(conf.get("udf.import.list")));
        MapRedUtil.setupUDFContext(conf);
    } catch (Exception e) {
        int errCode = 2094;
        String msg = "Unable to deserialize object.";
        throw new ExecException(msg, errCode, PigException.BUG, e);
    }

    ArrayList<InputSplit> splits = new ArrayList<InputSplit>();
    for (int i = 0; i < inputs.size(); i++) {
        try {
            Path path = new Path(inputs.get(i).getFileName());

            FileSystem fs;
            boolean isFsPath = true;
            try {
                fs = path.getFileSystem(conf);
            } catch (Exception e) {
                // If an application specific
                // scheme was used
                // (e.g.: "hbase://table") we will fail
                // getting the file system. That's
                // ok, we just use the dfs in that case.
                fs = new Path("/").getFileSystem(conf);
                isFsPath = false;
            }

            // if the execution is against Mapred DFS, set
            // working dir to /user/<userid>
            if (!Utils.isLocal(pigContext, conf)) {
                fs.setWorkingDirectory(jobcontext.getWorkingDirectory());
            }

            // first pass input location to the loader - for this send a
            // clone of the configuration we have - this is so that if the
            // loader (or the inputformat of the loader) decide to store the
            // input location into the configuration (for example,
            // FileInputFormat stores this in mapred.input.dir in the conf),
            // then for different inputs, the loader's don't end up
            // over-writing the same conf.
            FuncSpec loadFuncSpec = inputs.get(i).getFuncSpec();
            LoadFunc loadFunc = (LoadFunc) PigContext.instantiateFuncFromSpec(loadFuncSpec);
            boolean combinable = !(loadFunc instanceof MergeJoinIndexer || loadFunc instanceof IndexableLoadFunc
                    || (loadFunc instanceof CollectableLoadFunc && loadFunc instanceof OrderedLoadFunc));
            if (combinable)
                combinable = !conf.getBoolean("pig.noSplitCombination", false);
            JobConf confClone = new JobConf(conf);
            Job inputSpecificJob = new Job(confClone);
            // Pass loader signature to LoadFunc and to InputFormat through
            // the conf
            passLoadSignature(loadFunc, i, inputSpecificJob.getConfiguration());
            loadFunc.setLocation(inputs.get(i).getFileName(), inputSpecificJob);
            // The above setLocation call could write to the conf within
            // the inputSpecificJob - use this updated conf

            // get the InputFormat from it and ask for splits
            InputFormat inpFormat = loadFunc.getInputFormat();
            List<InputSplit> oneInputSplits = inpFormat.getSplits(
                    HadoopShims.createJobContext(inputSpecificJob.getConfiguration(), jobcontext.getJobID()));
            List<InputSplit> oneInputPigSplits = getPigSplits(oneInputSplits, i, inpTargets.get(i),
                    HadoopShims.getDefaultBlockSize(fs, isFsPath ? path : fs.getWorkingDirectory()), combinable,
                    confClone);
            splits.addAll(oneInputPigSplits);
        } catch (ExecException ee) {
            throw ee;
        } catch (Exception e) {
            int errCode = 2118;
            String msg = "Unable to create input splits for: " + inputs.get(i).getFileName();
            if (e.getMessage() != null && (!e.getMessage().isEmpty())) {
                throw new ExecException(e.getMessage(), errCode, PigException.BUG, e);
            } else {
                throw new ExecException(msg, errCode, PigException.BUG, e);
            }
        }
    }

    // XXX hadoop 20 new API integration: get around a hadoop 20 bug by
    // passing total # of splits to each split so that it can be retrieved
    // in the RecordReader method when called by mapreduce framework later.
    int n = splits.size();
    // also passing the multi-input flag to the back-end so that
    // the multi-input record counters can be created
    int m = inputs.size();

    boolean disableCounter = conf.getBoolean("pig.disable.counter", false);
    if ((m > 1) && disableCounter) {
        log.info("Disable Pig custom input counters");
    }

    for (InputSplit split : splits) {
        ((PigSplit) split).setTotalSplits(n);
        if (m > 1)
            ((PigSplit) split).setMultiInputs(true);
        ((PigSplit) split).setDisableCounter(disableCounter);
    }

    return splits;
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigInputSplitFormat.java

License:Apache License

@SuppressWarnings({ "unchecked", "rawtypes" })
@Override/*from  www  . j a v  a2s. c  o  m*/
public List<InputSplit> getSplits(JobContext jobcontext) throws IOException {

    Configuration conf = jobcontext.getConfiguration();

    ArrayList<FileSpec> inputs;
    ArrayList<ArrayList<OperatorKey>> inpTargets;
    PigContext pigContext;
    try {
        inputs = (ArrayList<FileSpec>) ObjectSerializer.deserialize(conf.get("pig.inputs"));
        inpTargets = (ArrayList<ArrayList<OperatorKey>>) ObjectSerializer
                .deserialize(conf.get("pig.inpTargets"));
        pigContext = (PigContext) ObjectSerializer.deserialize(conf.get("pig.pigContext"));
        PigContext.setPackageImportList(
                (ArrayList<String>) ObjectSerializer.deserialize(conf.get("udf.import.list")));
        MapRedUtil.setupUDFContext(conf);
    } catch (Exception e) {
        int errCode = 2094;
        String msg = "Unable to deserialize object.";
        throw new ExecException(msg, errCode, PigException.BUG, e);
    }

    ArrayList<InputSplit> splits = new ArrayList<InputSplit>();
    for (int i = 0; i < inputs.size(); i++) {
        try {
            Path path = new Path(inputs.get(i).getFileName());

            FileSystem fs;
            boolean isFsPath = true;
            try {
                fs = path.getFileSystem(conf);
            } catch (Exception e) {
                // If an application specific
                // scheme was used
                // (e.g.: "hbase://table") we will fail
                // getting the file system. That's
                // ok, we just use the dfs in that case.
                fs = new Path("/").getFileSystem(conf);
                isFsPath = false;
            }

            // if the execution is against Mapred DFS, set
            // working dir to /user/<userid>
            if (!Utils.isLocal(pigContext, conf)) {
                fs.setWorkingDirectory(jobcontext.getWorkingDirectory());
            }

            // first pass input location to the loader - for this send a
            // clone of the configuration we have - this is so that if the
            // loader (or the inputformat of the loader) decide to store the
            // input location into the configuration (for example,
            // FileInputFormat stores this in mapred.input.dir in the conf),
            // then for different inputs, the loader's don't end up
            // over-writing the same conf.
            FuncSpec loadFuncSpec = inputs.get(i).getFuncSpec();
            LoadFunc loadFunc = (LoadFunc) PigContext.instantiateFuncFromSpec(loadFuncSpec);
            boolean combinable = !(loadFunc instanceof MergeJoinIndexer || loadFunc instanceof IndexableLoadFunc
                    || (loadFunc instanceof CollectableLoadFunc && loadFunc instanceof OrderedLoadFunc));
            if (combinable)
                combinable = !conf.getBoolean("pig.noSplitCombination", false);
            Configuration confClone = new Configuration(conf);
            Job inputSpecificJob = new Job(confClone);
            // Pass loader signature to LoadFunc and to InputFormat through
            // the conf
            passLoadSignature(loadFunc, i, inputSpecificJob.getConfiguration());
            loadFunc.setLocation(inputs.get(i).getFileName(), inputSpecificJob);
            // The above setLocation call could write to the conf within
            // the inputSpecificJob - use this updated conf

            // get the InputFormat from it and ask for splits
            InputFormat inpFormat = loadFunc.getInputFormat();
            // List<InputSplit> oneInputSplits = inpFormat.getSplits(
            // HadoopShims.createJobContext(inputSpecificJob.getConfiguration(),
            // jobcontext.getJobID()));

            List<InputSplit> oneInputSplits = getSplitsSample(jobcontext);

            List<InputSplit> oneInputPigSplits = getPigSplits(oneInputSplits, i, inpTargets.get(i),
                    HadoopShims.getDefaultBlockSize(fs, isFsPath ? path : fs.getWorkingDirectory()), combinable,
                    confClone);
            splits.addAll(oneInputPigSplits);
        } catch (ExecException ee) {
            throw ee;
        } catch (Exception e) {
            int errCode = 2118;
            String msg = "Unable to create input splits for: " + inputs.get(i).getFileName();
            if (e.getMessage() != null && (!e.getMessage().isEmpty())) {
                throw new ExecException(e.getMessage(), errCode, PigException.BUG, e);
            } else {
                throw new ExecException(msg, errCode, PigException.BUG, e);
            }
        }
    }

    // XXX hadoop 20 new API integration: get around a hadoop 20 bug by
    // passing total # of splits to each split so that it can be retrieved
    // in the RecordReader method when called by mapreduce framework later.
    int n = splits.size();
    // also passing the multi-input flag to the back-end so that
    // the multi-input record counters can be created
    int m = inputs.size();

    boolean disableCounter = conf.getBoolean("pig.disable.counter", false);
    if ((m > 1) && disableCounter) {
        log.info("Disable Pig custom input counters");
    }

    for (InputSplit split : splits) {
        ((PigSplit) split).setTotalSplits(n);
        if (m > 1)
            ((PigSplit) split).setMultiInputs(true);
        ((PigSplit) split).setDisableCounter(disableCounter);
    }
    // shuffle --> return splits
    return splits;
}