Example usage for org.apache.hadoop.fs Path toString

List of usage examples for org.apache.hadoop.fs Path toString

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:com.ibm.bi.dml.runtime.transform.TfUtils.java

License:Open Source License

public static boolean isPartFileWithHeader(JobConf job) throws IOException {
    FileSystem fs = FileSystem.get(job);

    String thisfile = getPartFileName(job);
    Path smallestFilePath = new Path(job.get(MRJobConfiguration.TF_SMALLEST_FILE)).makeQualified(fs);

    if (thisfile.toString().equals(smallestFilePath.toString()))
        return true;
    else/* ww w  . j av  a 2 s .  c  o  m*/
        return false;
}

From source file:com.ibm.bi.dml.runtime.transform.TfUtils.java

License:Open Source License

private Reader initOffsetsReader(JobConf job) throws IOException {
    Path path = new Path(job.get(CSVReblockMR.ROWID_FILE_NAME));
    FileSystem fs = FileSystem.get(job);
    Path[] files = MatrixReader.getSequenceFilePaths(fs, path);
    if (files.length != 1)
        throw new IOException("Expecting a single file under counters file: " + path.toString());

    Reader reader = new SequenceFile.Reader(fs, files[0], job);

    return reader;
}

From source file:com.ibm.bi.dml.runtime.util.MapReduceTool.java

License:Open Source License

public static MatrixCharacteristics[] processDimsFiles(String dir, MatrixCharacteristics[] stats)
        throws IOException {
    Path pt = new Path(dir);
    FileSystem fs = FileSystem.get(_rJob);

    if (!fs.exists(pt))
        return stats;

    FileStatus fstat = fs.getFileStatus(pt);

    if (fstat.isDirectory()) {
        FileStatus[] files = fs.listStatus(pt);
        for (int i = 0; i < files.length; i++) {
            Path filePath = files[i].getPath();
            //System.out.println("Processing dims file: " + filePath.toString());
            BufferedReader br = setupInputFile(filePath.toString());

            String line = "";
            while ((line = br.readLine()) != null) {
                String[] parts = line.split(" ");
                int resultIndex = Integer.parseInt(parts[0]);
                long maxRows = Long.parseLong(parts[1]);
                long maxCols = Long.parseLong(parts[2]);

                stats[resultIndex].setDimension(
                        (stats[resultIndex].getRows() < maxRows ? maxRows : stats[resultIndex].getRows()),
                        (stats[resultIndex].getCols() < maxCols ? maxCols : stats[resultIndex].getCols()));
            }//from   www  .  j  a  v  a2 s .co  m

            br.close();
        }
    } else {
        throw new IOException(dir + " is expected to be a folder!");
    }

    return stats;
}

From source file:com.ibm.bi.dml.yarn.DMLYarnClient.java

License:Open Source License

/**
 *    /* w  w  w  . jav a 2  s . c  o  m*/
 * @param appId
 * @throws ParseException
 * @throws IOException
 * @throws DMLRuntimeException
 * @throws InterruptedException 
 */
@SuppressWarnings("deprecation")
private void copyResourcesToHdfsWorkingDir(YarnConfiguration yconf, String hdfsWD)
        throws ParseException, IOException, DMLRuntimeException, InterruptedException {
    FileSystem fs = FileSystem.get(yconf);

    //create working directory
    MapReduceTool.createDirIfNotExistOnHDFS(hdfsWD, DMLConfig.DEFAULT_SHARED_DIR_PERMISSION);

    //serialize the dml config to HDFS file 
    //NOTE: we do not modify and ship the absolute scratch space path of the current user
    //because this might result in permission issues if the app master is run with a different user
    //(runtime plan migration during resource reoptimizations now needs to use qualified names
    //for shipping/reading intermediates) TODO modify resource reoptimizer on prototype integration.
    Path confPath = new Path(hdfsWD, DML_CONFIG_NAME);
    FSDataOutputStream fout = fs.create(confPath, true);
    //_dmlConfig.makeQualifiedScratchSpacePath(); 
    fout.writeBytes(_dmlConfig.serializeDMLConfig() + "\n");
    fout.close();
    _hdfsDMLConfig = confPath.makeQualified(fs).toString();
    LOG.debug("DML config written to HDFS file: " + _hdfsDMLConfig + "");

    //serialize the dml script to HDFS file
    Path scriptPath = new Path(hdfsWD, DML_SCRIPT_NAME);
    FSDataOutputStream fout2 = fs.create(scriptPath, true);
    fout2.writeBytes(_dmlScript);
    fout2.close();
    _hdfsDMLScript = scriptPath.makeQualified(fs).toString();
    LOG.debug("DML script written to HDFS file: " + _hdfsDMLScript + "");

    // copy local jar file to HDFS (try to get the original jar filename)
    String fname = getLocalJarFileNameFromEnvConst();
    if (fname == null) {
        //get location of unpacked jar classes and repackage (if required)
        String lclassFile = DMLYarnClient.class.getProtectionDomain().getCodeSource().getLocation().getPath()
                .toString();
        File flclassFile = new File(lclassFile);
        if (!flclassFile.isDirectory()) //called w/ jar 
            fname = lclassFile;
        else //called w/ unpacked jar (need to be repackaged)   
            fname = createJar(lclassFile);
    }
    Path srcPath = new Path(fname);
    Path dstPath = new Path(hdfsWD, srcPath.getName());
    FileUtil.copy(FileSystem.getLocal(yconf), srcPath, fs, dstPath, false, true, yconf);
    _hdfsJarFile = dstPath.makeQualified(fs).toString();
    LOG.debug(
            "Jar file copied from local file: " + srcPath.toString() + " to HDFS file: " + dstPath.toString());
}

From source file:com.ibm.crail.hdfs.CrailHadoopFileSystem.java

License:Apache License

@Override
public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize,
        short replication, long blockSize, Progressable progress) throws IOException {
    CrailFile fileInfo = null;//w w  w .  jav a  2  s  .  c  om
    try {
        fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                CrailLocationClass.PARENT).get().asFile();
    } catch (Exception e) {
        if (e.getMessage().contains(RpcErrors.messages[RpcErrors.ERR_PARENT_MISSING])) {
            fileInfo = null;
        } else {
            throw new IOException(e);
        }
    }

    if (fileInfo == null) {
        Path parent = path.getParent();
        this.mkdirs(parent, FsPermission.getDirDefault());
        try {
            fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                    CrailLocationClass.PARENT).get().asFile();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    CrailBufferedOutputStream outputStream = null;
    if (fileInfo != null) {
        try {
            fileInfo.syncDir();
            outputStream = fileInfo.getBufferedOutputStream(Integer.MAX_VALUE);
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    if (outputStream != null) {
        return new CrailHDFSOutputStream(outputStream, statistics);
    } else {
        throw new IOException("Failed to create file, path " + path.toString());
    }
}

From source file:com.ibm.crail.hdfs.CrailHDFS.java

License:Apache License

@Override
public FSDataOutputStream createInternal(Path path, EnumSet<CreateFlag> flag, FsPermission absolutePermission,
        int bufferSize, short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt,
        boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException,
        ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException {
    CrailFile fileInfo = null;/* w  w  w .  j  av  a2 s.  c  om*/
    try {
        fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                CrailLocationClass.PARENT).get().asFile();
    } catch (Exception e) {
        if (e.getMessage().contains(RpcErrors.messages[RpcErrors.ERR_PARENT_MISSING])) {
            fileInfo = null;
        } else {
            throw new IOException(e);
        }
    }

    if (fileInfo == null) {
        Path parent = path.getParent();
        this.mkdir(parent, FsPermission.getDirDefault(), true);
        try {
            fileInfo = dfs.create(path.toUri().getRawPath(), CrailNodeType.DATAFILE, CrailStorageClass.PARENT,
                    CrailLocationClass.PARENT).get().asFile();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    CrailBufferedOutputStream outputStream = null;
    if (fileInfo != null) {
        try {
            fileInfo.syncDir();
            outputStream = fileInfo.getBufferedOutputStream(Integer.MAX_VALUE);
        } catch (Exception e) {
            throw new IOException(e);
        }
    } else {
        throw new IOException("Failed to create file, path " + path.toString());
    }

    if (outputStream != null) {
        return new CrailHDFSOutputStream(outputStream, statistics);
    } else {
        throw new IOException("Failed to create file, path " + path.toString());
    }
}

From source file:com.ibm.crail.hdfs.CrailHDFS.java

License:Apache License

@Override
public FSDataInputStream open(Path path, int bufferSize)
        throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    CrailFile fileInfo = null;/*from ww w  .  j  av a 2  s.  c  om*/
    try {
        fileInfo = dfs.lookup(path.toUri().getRawPath()).get().asFile();
    } catch (Exception e) {
        throw new IOException(e);
    }

    CrailBufferedInputStream inputStream = null;
    if (fileInfo != null) {
        try {
            inputStream = fileInfo.getBufferedInputStream(fileInfo.getCapacity());
        } catch (Exception e) {
            throw new IOException(e);
        }
    }

    if (inputStream != null) {
        return new CrailHDFSInputStream(inputStream);
    } else {
        throw new IOException("Failed to open file, path " + path.toString());
    }
}

From source file:com.ibm.jaql.io.hadoop.DirectFileOutputCommiter.java

License:Apache License

@Override
public void setupJob(JobContext context) throws IOException {
    // Create the path to the file, if needed.
    JobConf conf = context.getJobConf();
    Path outputPath = FileOutputFormat.getOutputPath(conf);
    if (outputPath != null) {
        Path tmpDir = outputPath.getParent();
        FileSystem fileSys = outputPath.getFileSystem(conf);
        if (!fileSys.mkdirs(outputPath.getParent())) {
            throw new IOException("Mkdirs failed to create " + tmpDir.toString());
        }// w ww.j  av  a2  s  . c om
    }
}

From source file:com.ibm.stocator.fs.common.StocatorPath.java

License:Open Source License

public boolean isTemporaryPathContain(Path path) {
    if (path.toString().contains(tempIdentifier)) {
        return true;
    }/*from  ww  w.  jav a  2s . c om*/
    return false;
}

From source file:com.ibm.stocator.fs.common.StocatorPath.java

License:Open Source License

public boolean isTemporaryPathTaget(Path path) {
    if (path.toString().endsWith(tempIdentifier)) {
        return true;
    }/*from  w  w w  .jav  a 2  s .com*/
    return false;
}