Example usage for org.apache.hadoop.fs Path makeQualified

List of usage examples for org.apache.hadoop.fs Path makeQualified

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path makeQualified.

Prototype

@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
public Path makeQualified(URI defaultUri, Path workingDir) 

Source Link

Document

Returns a qualified path object.

Usage

From source file:org.apache.sentry.tests.e2e.hive.fs.TestFSBase.java

License:Apache License

/**
 * Return a full path starting with scheme and authority
 * hdfs:/nameserver/relativePath; s3a://bucketname/relativePath
 * @param relativePath//from ww  w  .ja  v  a2  s  .  c  om
 * @return full path
 */
protected static Path getFullPathWithSchemeAndAuthority(Path relativePath) {
    return relativePath.makeQualified(defaultStorageUri, relativePath);
}

From source file:org.apache.tez.dag.utils.RelocalizationUtils.java

License:Apache License

private static Path downloadResource(String destName, URI uri, Configuration conf, String destDir)
        throws IOException {
    FileSystem fs = FileSystem.get(uri, conf);
    Path cwd = new Path(destDir);
    Path dFile = new Path(cwd, destName);
    Path srcPath = new Path(uri);
    fs.copyToLocalFile(srcPath, dFile);//w w w.  j a  v  a 2  s .c  om
    return dFile.makeQualified(FileSystem.getLocal(conf).getUri(), cwd);
}

From source file:org.apache.twill.filesystem.FileContextLocationFactory.java

License:Apache License

@Override
public Location create(String path) {
    if (path.startsWith("/")) {
        path = path.substring(1);//from w  ww  .j a  v a2 s. com
    }
    Path locationPath;
    if (path.isEmpty()) {
        locationPath = pathBase;
    } else {
        locationPath = new Path(path);
    }
    locationPath = locationPath.makeQualified(fc.getDefaultFileSystem().getUri(), pathBase);
    return new FileContextLocation(this, fc, locationPath);
}

From source file:org.kitesdk.apps.spi.oozie.OozieScheduling.java

License:Apache License

public static void writeBundle(Class appClass, AppContext context, Path appPath, List<Schedule> schedules,
        OutputStream output) throws IOException {

    Configuration conf = context.getHadoopConf();

    XmlStreamWriter streamWriter = WriterFactory.newXmlWriter(output);

    PrettyPrintXMLWriter writer = new PrettyPrintXMLWriter(streamWriter);

    writer.startElement(BUNDLE_ELEMENT);
    writer.addAttribute("name", appClass.getCanonicalName());
    writer.addAttribute("xmlns", OOZIE_BUNDLE_NS);

    writer.startElement("parameters");

    // Default to the HDFS scheme for the root path if none is provided.
    Path qualifiedPath = appPath.toUri().getScheme() == null
            ? appPath.makeQualified(URI.create("hdfs:/"), appPath)
            : appPath;/*ww w.j a  v  a2  s. co  m*/

    property(writer, "kiteAppRoot", qualifiedPath.toString());

    property(writer, "oozie.libpath", "${kiteAppRoot}/lib");
    property(writer, "nameNode", conf.get("fs.default.name"));

    String resourceManager = conf.get("yarn.resourcemanager.address");

    // MR2 uses YARN for the job tracker, but some Hadoop deployments
    // don't have the resoure manager setting visible. We work around this
    // by grabbing the job tracker setting and swapping to the resource
    // manager port.
    // TODO: is there a better way to deal with this?
    if (resourceManager == null) {

        String jobTracker = conf.get("mapred.job.tracker");

        if (jobTracker != null)
            resourceManager = jobTracker.replace("8021", "8032");
    }

    if (resourceManager != null)
        property(writer, "jobTracker", resourceManager);

    // TODO: handle application configuration.
    //    if (appConfigPath != null)
    //     property(writer, "appConfigPath", appConfigPath.toString());
    writer.endElement(); // parameters

    int i = 0;

    for (Schedule schedule : schedules) {
        writer.startElement("coordinator");
        writer.addAttribute("name", schedule.getName());

        element(writer, "app-path", "${kiteAppRoot}/" + coordPath(schedule));
        writer.endElement(); // coordinator
    }

    writer.endElement(); // bundle
    streamWriter.flush();
}

From source file:org.trafodion.sql.HBaseAccess.HBaseClient.java

License:Apache License

public boolean cleanSnpScanTmpLocation(String pathStr) throws Exception {
    if (logger.isDebugEnabled())
        logger.debug("HbaseClient.cleanSnpScanTmpLocation() - start - Path: " + pathStr);
    try {/*from w w  w .  j  a  v  a2  s.c o m*/
        Path delPath = new Path(pathStr);
        delPath = delPath.makeQualified(delPath.toUri(), null);
        FileSystem fs = FileSystem.get(delPath.toUri(), config);
        fs.delete(delPath, true);
    } catch (IOException e) {
        if (logger.isDebugEnabled())
            logger.debug("HbaseClient.cleanSnpScanTmpLocation() --exception:" + e);
        throw e;
    }

    return true;
}

From source file:org.trafodion.sql.HBaseAccess.HBulkLoadClient.java

License:Apache License

public boolean doCreateHFile() throws IOException, URISyntaxException {
    if (logger.isDebugEnabled())
        logger.debug("HBulkLoadClient.doCreateHFile() called.");

    if (hFileLocation == null)
        throw new NullPointerException(hFileLocation + " is not set");
    if (hFileName == null)
        throw new NullPointerException(hFileName + " is not set");

    closeHFile();/*from  ww  w .j a  v a2s .  c  om*/

    if (fileSys == null)
        fileSys = FileSystem.get(config);

    Path hfilePath = new Path(new Path(hFileLocation), hFileName + "_" + System.currentTimeMillis());
    hfilePath = hfilePath.makeQualified(hfilePath.toUri(), null);

    if (logger.isDebugEnabled())
        logger.debug("HBulkLoadClient.createHFile Path: " + hfilePath);

    try {
        HFileContext hfileContext = new HFileContextBuilder().withBlockSize(blockSize)
                .withCompression(Compression.getCompressionAlgorithmByName(compression))
                .withDataBlockEncoding(dataBlockEncoding).build();

        writer = HFile.getWriterFactory(config, new CacheConfig(config)).withPath(fileSys, hfilePath)
                .withFileContext(hfileContext).withComparator(KeyValue.COMPARATOR).create();
        if (logger.isDebugEnabled())
            logger.debug("HBulkLoadClient.createHFile Path: " + writer.getPath() + "Created");
    } catch (IOException e) {
        if (logger.isDebugEnabled())
            logger.debug("HBulkLoadClient.doCreateHFile Exception" + e.getMessage());
        throw e;
    }
    return true;
}

From source file:org.trafodion.sql.HBaseAccess.HBulkLoadClient.java

License:Apache License

public boolean doBulkLoad(String prepLocation, String tableName, boolean quasiSecure, boolean snapshot)
        throws Exception {
    if (logger.isDebugEnabled())
        logger.debug("HBulkLoadClient.doBulkLoad() - start");
    if (logger.isDebugEnabled())
        logger.debug("HBulkLoadClient.doBulkLoad() - Prep Location: " + prepLocation + ", Table Name:"
                + tableName + ", quasisecure : " + quasiSecure + ", snapshot: " + snapshot);

    HTable table = new HTable(config, tableName);
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(config);
    Path prepPath = new Path(prepLocation);
    prepPath = prepPath.makeQualified(prepPath.toUri(), null);
    FileSystem prepFs = FileSystem.get(prepPath.toUri(), config);

    Path[] hFams = FileUtil.stat2Paths(prepFs.listStatus(prepPath));

    if (quasiSecure) {
        throw new Exception(
                "HBulkLoadClient.doBulkLoad() - cannot perform load. Trafodion on secure HBase mode is not implemented yet");
    } else {/*w w  w  .  java 2 s .c  om*/
        if (logger.isDebugEnabled())
            logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfiles permissions");
        for (Path hfam : hFams) {
            Path[] hfiles = FileUtil.stat2Paths(prepFs.listStatus(hfam));
            prepFs.setPermission(hfam, PERM_ALL_ACCESS);
            for (Path hfile : hfiles) {
                if (logger.isDebugEnabled())
                    logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfile permissions:" + hfile);
                prepFs.setPermission(hfile, PERM_ALL_ACCESS);

            }
            //create _tmp dir used as temp space for Hfile processing
            FileSystem.mkdirs(prepFs, new Path(hfam, "_tmp"), PERM_ALL_ACCESS);
        }
        if (logger.isDebugEnabled())
            logger.debug(
                    "HBulkLoadClient.doBulkLoad() - bulk load started. Loading directly from preparation directory");
        doSnapshotNBulkLoad(prepPath, tableName, table, loader, snapshot);
        if (logger.isDebugEnabled())
            logger.debug("HBulkLoadClient.doBulkLoad() - bulk load is done ");
    }
    return true;
}

From source file:org.trafodion.sql.HBaseAccess.HBulkLoadClient.java

License:Apache License

public boolean bulkLoadCleanup(String location) throws Exception {
    Path dir = new Path(location);
    dir = dir.makeQualified(dir.toUri(), null);
    FileSystem fs = FileSystem.get(dir.toUri(), config);
    fs.delete(dir, true);// w  w w.  ja va2s. c om

    return true;

}

From source file:org.trafodion.sql.HBaseAccess.SequenceFileWriter.java

License:Apache License

public boolean hdfsMergeFiles(String srcPathStr, String dstPathStr) throws Exception {
    if (logger.isDebugEnabled())
        logger.debug("SequenceFileWriter.hdfsMergeFiles() - start");
    if (logger.isDebugEnabled())
        logger.debug("SequenceFileWriter.hdfsMergeFiles() - source Path: " + srcPathStr + ", destination File:"
                + dstPathStr);/*w w w  .  j  ava 2s  .  c o  m*/
    try {
        Path srcPath = new Path(srcPathStr);
        srcPath = srcPath.makeQualified(srcPath.toUri(), null);
        FileSystem srcFs = FileSystem.get(srcPath.toUri(), conf);

        Path dstPath = new Path(dstPathStr);
        dstPath = dstPath.makeQualified(dstPath.toUri(), null);
        FileSystem dstFs = FileSystem.get(dstPath.toUri(), conf);

        if (dstFs.exists(dstPath)) {
            if (logger.isDebugEnabled())
                logger.debug("SequenceFileWriter.hdfsMergeFiles() - destination files exists");
            // for this prototype we just delete the file-- will change in next code drops
            dstFs.delete(dstPath, false);
            // The caller should already have checked existence of file-- throw exception 
            //throw new FileAlreadyExistsException(dstPath.toString());
        }

        Path tmpSrcPath = new Path(srcPath, "tmp");

        FileSystem.mkdirs(srcFs, tmpSrcPath, srcFs.getFileStatus(srcPath).getPermission());
        logger.debug("SequenceFileWriter.hdfsMergeFiles() - tmp folder created.");
        Path[] files = FileUtil.stat2Paths(srcFs.listStatus(srcPath));
        for (Path f : files) {
            srcFs.rename(f, tmpSrcPath);
        }
        // copyMerge and use false for the delete option since it removes the whole directory
        if (logger.isDebugEnabled())
            logger.debug("SequenceFileWriter.hdfsMergeFiles() - copyMerge");
        FileUtil.copyMerge(srcFs, tmpSrcPath, dstFs, dstPath, false, conf, null);

        if (logger.isDebugEnabled())
            logger.debug("SequenceFileWriter.hdfsMergeFiles() - delete intermediate files");
        srcFs.delete(tmpSrcPath, true);
    } catch (IOException e) {
        if (logger.isDebugEnabled())
            logger.debug("SequenceFileWriter.hdfsMergeFiles() --exception:" + e);
        throw e;
    }

    return true;
}

From source file:org.trafodion.sql.HBaseAccess.SequenceFileWriter.java

License:Apache License

public boolean hdfsCleanUnloadPath(String uldPathStr
/*, boolean checkExistence, String mergeFileStr*/) throws Exception {
    if (logger.isDebugEnabled())
        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - start");
    logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - unload Path: " + uldPathStr);

    try {/*from  ww w .  j  a  v a  2 s .c o m*/
        Path uldPath = new Path(uldPathStr);
        uldPath = uldPath.makeQualified(uldPath.toUri(), null);
        FileSystem srcFs = FileSystem.get(uldPath.toUri(), conf);
        if (!srcFs.exists(uldPath)) {
            //unload location does not exist. hdfscreate will create it later
            //nothing to do 
            logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -- unload location does not exist.");
            return true;
        }

        Path[] files = FileUtil.stat2Paths(srcFs.listStatus(uldPath));
        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - delete files");
        for (Path f : files) {
            srcFs.delete(f, false);
        }
    } catch (IOException e) {
        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -exception:" + e);
        throw e;
    }

    return true;
}