Example usage for org.apache.hadoop.fs Path isAbsolute

List of usage examples for org.apache.hadoop.fs Path isAbsolute

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path isAbsolute.

Prototype

public boolean isAbsolute() 

Source Link

Document

Returns true if the path component (i.e.

Usage

From source file:org.apache.gobblin.data.management.retention.profile.ConfigurableGlobDatasetFinder.java

License:Apache License

public ConfigurableGlobDatasetFinder(FileSystem fs, Properties jobProps, Config config) {
    for (String property : requiredProperties()) {
        Preconditions.checkArgument(config.hasPath(property) || config.hasPath(DEPRECATIONS.get(property)),
                String.format("Missing required property %s", property));
    }// ww w .j  av a 2s . com

    if (ConfigUtils.hasNonEmptyPath(config, DATASET_BLACKLIST_KEY)) {
        this.blacklist = Optional.of(Pattern.compile(config.getString(DATASET_BLACKLIST_KEY)));
    } else if (ConfigUtils.hasNonEmptyPath(config, DATASET_FINDER_BLACKLIST_KEY)) {
        this.blacklist = Optional.of(Pattern.compile(config.getString(DATASET_FINDER_BLACKLIST_KEY)));
    } else {
        this.blacklist = Optional.absent();
    }

    if (ConfigUtils.hasNonEmptyPath(config, DATASET_FINDER_GLOB_BLACKLIST_KEY)) {
        this.globPatternBlacklist = Optional
                .of(GlobPattern.compile(config.getString(DATASET_FINDER_GLOB_BLACKLIST_KEY)));
    } else {
        this.globPatternBlacklist = Optional.absent();
    }

    this.fs = fs;

    Path tmpDatasetPattern;
    if (config.hasPath(DATASET_FINDER_PATTERN_KEY)) {
        tmpDatasetPattern = new Path(config.getString(DATASET_FINDER_PATTERN_KEY));
    } else {
        tmpDatasetPattern = new Path(config.getString(DATASET_PATTERN_KEY));
    }
    this.datasetPattern = tmpDatasetPattern.isAbsolute() ? tmpDatasetPattern
            : new Path(this.fs.getWorkingDirectory(), tmpDatasetPattern);

    this.commonRoot = PathUtils.deepestNonGlobPath(this.datasetPattern);
    this.props = jobProps;
}

From source file:org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.java

License:Apache License

/**
 * Convert Hadoop path into IGFS path.//from www. java 2s . c  o m
 *
 * @param path Hadoop path.
 * @return IGFS path.
 */
@Nullable
private IgfsPath convert(@Nullable Path path) {
    if (path == null)
        return null;

    return path.isAbsolute() ? new IgfsPath(path.toUri().getPath())
            : new IgfsPath(convert(workingDir.get()), path.toUri().getPath());
}

From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java

License:Apache License

/**
 * Convert Hadoop path into IGFS path./* ww w .j a  v  a 2s . c o  m*/
 *
 * @param path Hadoop path.
 * @return IGFS path.
 */
@Nullable
private IgfsPath convert(Path path) {
    if (path == null)
        return null;

    return path.isAbsolute() ? new IgfsPath(path.toUri().getPath())
            : new IgfsPath(workingDir, path.toUri().getPath());
}

From source file:org.apache.ignite.internal.processors.hadoop.fs.GridHadoopRawLocalFileSystem.java

License:Apache License

/**
 * Converts Hadoop path to local path.//from   www. ja v a2  s  .  com
 *
 * @param path Hadoop path.
 * @return Local path.
 */
File convert(Path path) {
    checkPath(path);

    if (path.isAbsolute())
        return new File(path.toUri().getPath());

    return new File(getWorkingDirectory().toUri().getPath(), path.toUri().getPath());
}

From source file:org.apache.kylin.common.KylinConfigBase.java

License:Apache License

public String getHdfsWorkingDirectory() {
    if (cachedHdfsWorkingDirectory != null)
        return cachedHdfsWorkingDirectory;

    String root = getOptional("kylin.env.hdfs-working-dir", "/kylin");

    Path path = new Path(root);
    if (!path.isAbsolute())
        throw new IllegalArgumentException("kylin.env.hdfs-working-dir must be absolute, but got " + root);

    // make sure path is qualified
    try {//from w w  w .j  av a 2  s.com
        FileSystem fs = path.getFileSystem(HadoopUtil.getCurrentConfiguration());
        path = fs.makeQualified(path);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // append metadata-url prefix
    root = new Path(path, StringUtils.replaceChars(getMetadataUrlPrefix(), ':', '-')).toString();

    if (!root.endsWith("/"))
        root += "/";

    cachedHdfsWorkingDirectory = root;
    if (cachedHdfsWorkingDirectory.startsWith("file:")) {
        cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace("file:", "file://");
    } else if (cachedHdfsWorkingDirectory.startsWith("maprfs:")) {
        cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace("maprfs:", "maprfs://");
    }
    return cachedHdfsWorkingDirectory;
}

From source file:org.apache.kylin.engine.mr.common.AbstractHadoopJob.java

License:Apache License

private void setJobTmpJarsAndFiles(Job job, String kylinDependency) {
    if (StringUtils.isBlank(kylinDependency))
        return;/*from  www  .j  a v  a2s  .co m*/

    String[] fNameList = kylinDependency.split(",");

    try {
        Configuration jobConf = job.getConfiguration();
        FileSystem localfs = FileSystem.getLocal(jobConf);
        FileSystem hdfs = HadoopUtil.getWorkingFileSystem(jobConf);

        StringBuilder jarList = new StringBuilder();
        StringBuilder fileList = new StringBuilder();

        for (String fileName : fNameList) {
            Path p = new Path(fileName);
            if (p.isAbsolute() == false) {
                logger.warn("The directory of kylin dependency '" + fileName + "' is not absolute, skip");
                continue;
            }
            FileSystem fs;
            if (exists(hdfs, p)) {
                fs = hdfs;
            } else if (exists(localfs, p)) {
                fs = localfs;
            } else {
                logger.warn("The directory of kylin dependency '" + fileName + "' does not exist, skip");
                continue;
            }

            if (fs.getFileStatus(p).isDirectory()) {
                appendTmpDir(job, fs, p, jarList, fileList);
                continue;
            }

            StringBuilder list = (p.getName().endsWith(".jar")) ? jarList : fileList;
            if (list.length() > 0)
                list.append(",");
            list.append(fs.getFileStatus(p).getPath());
        }

        appendTmpFiles(fileList.toString(), jobConf);
        appendTmpJars(jarList.toString(), jobConf);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.lens.cube.metadata.Storage.java

License:Apache License

/**
 * Add given partitions in the underlying hive table and update latest partition links
 *
 * @param client                hive client instance
 * @param factOrDimTable        fact or dim name
 * @param updatePeriod          update period of partitions.
 * @param storagePartitionDescs all partitions to be added
 * @param latestInfos           new latest info. atleast one partition for the latest value exists for each part
 *                              column/*from w  w  w  .j av a  2s  .c  om*/
 * @throws HiveException
 */
public List<Partition> addPartitions(Hive client, String factOrDimTable, UpdatePeriod updatePeriod,
        List<StoragePartitionDesc> storagePartitionDescs, Map<Map<String, String>, LatestInfo> latestInfos,
        String tableName) throws HiveException {
    preAddPartitions(storagePartitionDescs);
    Map<Map<String, String>, Map<String, Integer>> latestPartIndexForPartCols = Maps.newHashMap();
    boolean success = false;
    try {
        String dbName = SessionState.get().getCurrentDatabase();
        AddPartitionDesc addParts = new AddPartitionDesc(dbName, tableName, true);
        Table storageTbl = client.getTable(dbName, tableName);
        for (StoragePartitionDesc addPartitionDesc : storagePartitionDescs) {
            String location = null;
            if (addPartitionDesc.getLocation() != null) {
                Path partLocation = new Path(addPartitionDesc.getLocation());
                if (partLocation.isAbsolute()) {
                    location = addPartitionDesc.getLocation();
                } else {
                    location = new Path(storageTbl.getPath(), partLocation).toString();
                }
            }
            Map<String, String> partParams = addPartitionDesc.getPartParams();
            if (partParams == null) {
                partParams = new HashMap<String, String>();
            }
            partParams.put(MetastoreConstants.PARTITION_UPDATE_PERIOD,
                    addPartitionDesc.getUpdatePeriod().name());
            addParts.addPartition(addPartitionDesc.getStoragePartSpec(), location);
            int curIndex = addParts.getPartitionCount() - 1;
            addParts.getPartition(curIndex).setPartParams(partParams);
            addParts.getPartition(curIndex).setInputFormat(addPartitionDesc.getInputFormat());
            addParts.getPartition(curIndex).setOutputFormat(addPartitionDesc.getOutputFormat());
            addParts.getPartition(curIndex).setNumBuckets(addPartitionDesc.getNumBuckets());
            addParts.getPartition(curIndex).setCols(addPartitionDesc.getCols());
            addParts.getPartition(curIndex).setSerializationLib(addPartitionDesc.getSerializationLib());
            addParts.getPartition(curIndex).setSerdeParams(addPartitionDesc.getSerdeParams());
            addParts.getPartition(curIndex).setBucketCols(addPartitionDesc.getBucketCols());
            addParts.getPartition(curIndex).setSortCols(addPartitionDesc.getSortCols());
            if (latestInfos != null && latestInfos.get(addPartitionDesc.getNonTimePartSpec()) != null) {
                for (Map.Entry<String, LatestPartColumnInfo> entry : latestInfos
                        .get(addPartitionDesc.getNonTimePartSpec()).latestParts.entrySet()) {
                    if (addPartitionDesc.getTimePartSpec().containsKey(entry.getKey()) && entry.getValue()
                            .get(MetastoreUtil.getLatestPartTimestampKey(entry.getKey())).equals(updatePeriod
                                    .format(addPartitionDesc.getTimePartSpec().get(entry.getKey())))) {
                        if (latestPartIndexForPartCols.get(addPartitionDesc.getNonTimePartSpec()) == null) {
                            latestPartIndexForPartCols.put(addPartitionDesc.getNonTimePartSpec(),
                                    Maps.<String, Integer>newHashMap());
                        }
                        latestPartIndexForPartCols.get(addPartitionDesc.getNonTimePartSpec())
                                .put(entry.getKey(), curIndex);
                    }
                }
            }
        }
        if (latestInfos != null) {
            for (Map.Entry<Map<String, String>, LatestInfo> entry1 : latestInfos.entrySet()) {
                Map<String, String> nonTimeParts = entry1.getKey();
                LatestInfo latestInfo = entry1.getValue();
                for (Map.Entry<String, LatestPartColumnInfo> entry : latestInfo.latestParts.entrySet()) {
                    // symlink this partition to latest
                    List<Partition> latest;
                    String latestPartCol = entry.getKey();
                    try {
                        latest = client.getPartitionsByFilter(storageTbl,
                                StorageConstants.getLatestPartFilter(latestPartCol, nonTimeParts));
                    } catch (Exception e) {
                        throw new HiveException("Could not get latest partition", e);
                    }
                    if (!latest.isEmpty()) {
                        client.dropPartition(storageTbl.getTableName(), latest.get(0).getValues(), false);
                    }
                    if (latestPartIndexForPartCols.get(nonTimeParts).containsKey(latestPartCol)) {
                        AddPartitionDesc.OnePartitionDesc latestPartWithFullTimestamp = addParts
                                .getPartition(latestPartIndexForPartCols.get(nonTimeParts).get(latestPartCol));
                        addParts.addPartition(StorageConstants
                                .getLatestPartSpec(latestPartWithFullTimestamp.getPartSpec(), latestPartCol),
                                latestPartWithFullTimestamp.getLocation());
                        int curIndex = addParts.getPartitionCount() - 1;
                        addParts.getPartition(curIndex).setPartParams(
                                entry.getValue().getPartParams(latestPartWithFullTimestamp.getPartParams()));
                        addParts.getPartition(curIndex)
                                .setInputFormat(latestPartWithFullTimestamp.getInputFormat());
                        addParts.getPartition(curIndex)
                                .setOutputFormat(latestPartWithFullTimestamp.getOutputFormat());
                        addParts.getPartition(curIndex)
                                .setNumBuckets(latestPartWithFullTimestamp.getNumBuckets());
                        addParts.getPartition(curIndex).setCols(latestPartWithFullTimestamp.getCols());
                        addParts.getPartition(curIndex)
                                .setSerializationLib(latestPartWithFullTimestamp.getSerializationLib());
                        addParts.getPartition(curIndex)
                                .setSerdeParams(latestPartWithFullTimestamp.getSerdeParams());
                        addParts.getPartition(curIndex)
                                .setBucketCols(latestPartWithFullTimestamp.getBucketCols());
                        addParts.getPartition(curIndex).setSortCols(latestPartWithFullTimestamp.getSortCols());
                    }
                }
            }
        }
        client = Hive.get();

        List<Partition> partitionsAdded = client.createPartitions(addParts);
        success = true;
        return partitionsAdded;
    } finally {
        if (success) {
            commitAddPartitions(storagePartitionDescs);
        } else {
            rollbackAddPartitions(storagePartitionDescs);
        }
    }
}

From source file:org.apache.oozie.action.hadoop.FsActionExecutor.java

License:Apache License

Path resolveToFullPath(Path nameNode, Path path, boolean withScheme) throws ActionExecutorException {
    Path fullPath;//from  www .j  a v  a 2 s  .com

    // If no nameNode is given, validate the path as-is and return it as-is
    if (nameNode == null) {
        validatePath(path, withScheme);
        fullPath = path;
    } else {
        // If the path doesn't have a scheme or authority, use the nameNode which should have already been verified earlier
        String pathScheme = path.toUri().getScheme();
        String pathAuthority = path.toUri().getAuthority();
        if (pathScheme == null || pathAuthority == null) {
            if (path.isAbsolute()) {
                String nameNodeSchemeAuthority = nameNode.toUri().getScheme() + "://"
                        + nameNode.toUri().getAuthority();
                fullPath = new Path(nameNodeSchemeAuthority + path.toString());
            } else {
                throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "FS011",
                        "Path [{0}] cannot be relative", path);
            }
        } else {
            // If the path has a scheme and authority, but its not the nameNode then validate the path as-is and return it as-is
            // If it is the nameNode, then it should have already been verified earlier so return it as-is
            if (!nameNode.toUri().getScheme().equals(pathScheme)
                    || !nameNode.toUri().getAuthority().equals(pathAuthority)) {
                validatePath(path, withScheme);
            }
            fullPath = path;
        }
    }
    return fullPath;
}

From source file:org.apache.oozie.action.hadoop.JarFilter.java

License:Apache License

/**
 * @param listUris List of URIs to be filtered
 * @param jarPath Application jar/*from  w ww  . j  a v  a2  s. c  o  m*/
 * @throws IOException
 * @throws URISyntaxException
 */
JarFilter(final Collection<URI> listUris, final String jarPath) throws URISyntaxException, IOException {
    this.listUris = listUris;
    applicationJar = jarPath;
    final Path p = new Path(jarPath);
    if (p.isAbsolute()) {
        applicationJar = HadoopUriFinder.getFixedUri(p.toUri()).toString();
    }
}

From source file:org.apache.oozie.action.hadoop.JavaActionExecutor.java

License:Apache License

public static void parseJobXmlAndConfiguration(Context context, Element element, Path appPath,
        Configuration conf)/*ww w  . ja v a  2 s.  c om*/
        throws IOException, ActionExecutorException, HadoopAccessorException, URISyntaxException {
    Namespace ns = element.getNamespace();
    Iterator<Element> it = element.getChildren("job-xml", ns).iterator();
    HashMap<String, FileSystem> filesystemsMap = new HashMap<String, FileSystem>();
    HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
    while (it.hasNext()) {
        Element e = it.next();
        String jobXml = e.getTextTrim();
        Path pathSpecified = new Path(jobXml);
        Path path = pathSpecified.isAbsolute() ? pathSpecified : new Path(appPath, jobXml);
        FileSystem fs;
        if (filesystemsMap.containsKey(path.toUri().getAuthority())) {
            fs = filesystemsMap.get(path.toUri().getAuthority());
        } else {
            if (path.toUri().getAuthority() != null) {
                fs = has.createFileSystem(context.getWorkflow().getUser(), path.toUri(),
                        has.createJobConf(path.toUri().getAuthority()));
            } else {
                fs = context.getAppFileSystem();
            }
            filesystemsMap.put(path.toUri().getAuthority(), fs);
        }
        Configuration jobXmlConf = new XConfiguration(fs.open(path));
        try {
            String jobXmlConfString = XmlUtils.prettyPrint(jobXmlConf).toString();
            jobXmlConfString = XmlUtils.removeComments(jobXmlConfString);
            jobXmlConfString = context.getELEvaluator().evaluate(jobXmlConfString, String.class);
            jobXmlConf = new XConfiguration(new StringReader(jobXmlConfString));
        } catch (ELEvaluationException ex) {
            throw new ActionExecutorException(ActionExecutorException.ErrorType.TRANSIENT, "EL_EVAL_ERROR",
                    ex.getMessage(), ex);
        } catch (Exception ex) {
            context.setErrorInfo("EL_ERROR", ex.getMessage());
        }
        checkForDisallowedProps(jobXmlConf, "job-xml");
        XConfiguration.copy(jobXmlConf, conf);
    }
    Element e = element.getChild("configuration", ns);
    if (e != null) {
        String strConf = XmlUtils.prettyPrint(e).toString();
        XConfiguration inlineConf = new XConfiguration(new StringReader(strConf));
        checkForDisallowedProps(inlineConf, "inline configuration");
        XConfiguration.copy(inlineConf, conf);
    }
}