Example usage for org.apache.hadoop.fs Path getPathWithoutSchemeAndAuthority

List of usage examples for org.apache.hadoop.fs Path getPathWithoutSchemeAndAuthority

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getPathWithoutSchemeAndAuthority.

Prototype

public static Path getPathWithoutSchemeAndAuthority(Path path) 

Source Link

Document

Return a version of the given Path without the scheme information.

Usage

From source file:org.apache.drill.exec.store.parquet.ParquetGroupScan.java

License:Apache License

private void getFiles(String path, List<FileStatus> fileStatuses) throws IOException {
    Path p = Path.getPathWithoutSchemeAndAuthority(new Path(path));
    FileStatus fileStatus = fs.getFileStatus(p);
    if (fileStatus.isDirectory()) {
        for (FileStatus f : fs.listStatus(p, new DrillPathFilter())) {
            getFiles(f.getPath().toString(), fileStatuses);
        }//  ww w .j a va  2  s  .  c  o  m
    } else {
        fileStatuses.add(fileStatus);
    }
}

From source file:org.apache.drill.exec.store.parquet.ParquetGroupScan.java

License:Apache License

@Override
public String toString() {
    String cacheFileString = "";
    if (usedMetadataCache) {
        // For EXPLAIN, remove the URI prefix from cacheFileRoot.  If cacheFileRoot is null, we
        // would have read the cache file from selectionRoot
        String str = (cacheFileRoot == null)
                ? Path.getPathWithoutSchemeAndAuthority(new Path(selectionRoot)).toString()
                : Path.getPathWithoutSchemeAndAuthority(new Path(cacheFileRoot)).toString();
        cacheFileString = ", cacheFileRoot=" + str;
    }/*from w  w w.j ava 2  s  . c o  m*/
    return "ParquetGroupScan [entries=" + entries + ", selectionRoot=" + selectionRoot + ", numFiles="
            + getEntries().size() + ", usedMetadataFile=" + usedMetadataCache + cacheFileString + ", columns="
            + columns + "]";
}

From source file:org.apache.falcon.extensions.store.ExtensionStore.java

License:Apache License

public Map<String, String> getExtensionResources(final String extensionName) throws StoreAccessException {
    Map<String, String> extensionFileMap = new HashMap<>();
    try {//from   w ww .  j a va  2s.  co m
        Path extensionPath = new Path(storePath, extensionName.toLowerCase());

        Path resourcesPath = null;
        FileStatus[] files = fs.listStatus(extensionPath);

        for (FileStatus fileStatus : files) {
            if (fileStatus.getPath().getName().equalsIgnoreCase(RESOURCES_DIR)) {
                resourcesPath = fileStatus.getPath();
                break;
            }
        }

        if (resourcesPath == null) {
            throw new StoreAccessException(" For extension " + extensionName + " there is no " + RESOURCES_DIR
                    + "at the extension store path " + storePath);
        }
        RemoteIterator<LocatedFileStatus> fileStatusListIterator = fs.listFiles(resourcesPath, true);
        while (fileStatusListIterator.hasNext()) {
            LocatedFileStatus fileStatus = fileStatusListIterator.next();
            Path filePath = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
            extensionFileMap.put(filePath.getName(), filePath.toString());
        }
    } catch (IOException e) {
        throw new StoreAccessException(e);
    }
    return extensionFileMap;
}

From source file:org.apache.falcon.extensions.store.ExtensionStore.java

License:Apache License

public String getExtensionLibPath(final String extensionName) throws StoreAccessException {
    try {/*  w w  w . j  ava 2s .c om*/
        Path extensionPath = new Path(storePath, extensionName.toLowerCase());

        Path libsPath = null;
        FileStatus[] files = fs.listStatus(extensionPath);

        for (FileStatus fileStatus : files) {
            if (fileStatus.getPath().getName().equalsIgnoreCase(LIBS_DIR)) {
                libsPath = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
                break;
            }
        }

        if (libsPath == null) {
            LOG.info("For extension " + extensionName + " there is no " + LIBS_DIR
                    + "at the extension store path " + extensionPath);
            return null;
        } else {
            return libsPath.toString();
        }
    } catch (IOException e) {
        throw new StoreAccessException(e);
    }
}

From source file:org.apache.falcon.extensions.store.ExtensionStore.java

License:Apache License

private List<String> getTrustedExtensions() throws StoreAccessException {
    List<String> extensionList = new ArrayList<>();
    try {//  www.  j  a  v a2s  . c o m
        FileStatus[] fileStatuses = fs.listStatus(storePath);

        for (FileStatus fileStatus : fileStatuses) {
            if (fileStatus.isDirectory()) {
                Path filePath = Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath());
                extensionList.add(filePath.getName());
            }
        }
    } catch (IOException e) {
        throw new StoreAccessException(e);
    }
    return extensionList;
}

From source file:org.apache.falcon.service.SharedLibraryHostingService.java

License:Apache License

private void pushExtensionArtifactsToCluster(final Cluster cluster, final FileSystem clusterFs)
        throws FalconException {
    if (!Services.get().isRegistered(ExtensionService.SERVICE_NAME)) {
        LOG.info("ExtensionService not registered, return");
        return;//  www  .ja va  2s .c  o  m
    }

    ExtensionStore store = ExtensionStore.get();
    if (!store.isExtensionStoreInitialized()) {
        LOG.info(
                "Extension store not initialized by Extension service. Make sure Extension service is added in "
                        + "start up properties");
        return;
    }

    final String filterPath = "/apps/falcon/extensions/mirroring/";
    Path extensionStorePath = store.getExtensionStorePath();
    LOG.info("extensionStorePath :{}", extensionStorePath);
    FileSystem falconFileSystem = HadoopClientFactory.get().createFalconFileSystem(extensionStorePath.toUri());
    String nameNode = StringUtils
            .removeEnd(falconFileSystem.getConf().get(HadoopClientFactory.FS_DEFAULT_NAME_KEY), File.separator);

    String clusterStorageUrl = StringUtils.removeEnd(ClusterHelper.getStorageUrl(cluster), File.separator);

    // If default fs for Falcon server is same as cluster fs abort copy
    if (nameNode.equalsIgnoreCase(clusterStorageUrl)) {
        LOG.info("clusterStorageUrl :{} same return", clusterStorageUrl);
        return;
    }

    try {
        RemoteIterator<LocatedFileStatus> fileStatusListIterator = falconFileSystem
                .listFiles(extensionStorePath, true);

        while (fileStatusListIterator.hasNext()) {
            LocatedFileStatus srcfileStatus = fileStatusListIterator.next();
            Path filePath = Path.getPathWithoutSchemeAndAuthority(srcfileStatus.getPath());

            if (filePath != null && filePath.toString().startsWith(filterPath)) {
                /* HiveDR uses filter path as store path in DRStatusStore, so skip it. Copy only the extension
                 artifacts */
                continue;
            }

            if (srcfileStatus.isDirectory()) {
                if (!clusterFs.exists(filePath)) {
                    HadoopClientFactory.mkdirs(clusterFs, filePath, srcfileStatus.getPermission());
                }
            } else {
                if (clusterFs.exists(filePath)) {
                    FileStatus targetfstat = clusterFs.getFileStatus(filePath);
                    if (targetfstat.getLen() == srcfileStatus.getLen()) {
                        continue;
                    }
                }

                Path parentPath = filePath.getParent();
                if (!clusterFs.exists(parentPath)) {
                    FsPermission dirPerm = falconFileSystem.getFileStatus(parentPath).getPermission();
                    HadoopClientFactory.mkdirs(clusterFs, parentPath, dirPerm);
                }

                FileUtil.copy(falconFileSystem, srcfileStatus, clusterFs, filePath, false, true,
                        falconFileSystem.getConf());
                FileUtil.chmod(clusterFs.makeQualified(filePath).toString(),
                        srcfileStatus.getPermission().toString());
            }
        }
    } catch (IOException | InterruptedException e) {
        throw new FalconException("Failed to copy extension artifacts to cluster" + cluster.getName(), e);
    }
}

From source file:org.apache.gobblin.compliance.restore.RestorableHivePartitionDataset.java

License:Apache License

private Path getTrashPartitionLocation() {
    Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.TRASH_DIR),
            "Missing required property " + ComplianceConfigurationKeys.TRASH_DIR);
    return new Path(StringUtils.join(Arrays.asList(this.state.getProp(ComplianceConfigurationKeys.TRASH_DIR),
            Path.getPathWithoutSchemeAndAuthority(getLocation()).toString()), '/'));
}

From source file:org.apache.gobblin.compliance.retention.HivePartitionVersionRetentionReaper.java

License:Apache License

private Path getNewVersionLocation() {
    Preconditions.checkArgument(this.state.contains(ComplianceConfigurationKeys.BACKUP_DIR),
            "Missing required property " + ComplianceConfigurationKeys.BACKUP_DIR);
    HivePartitionRetentionVersion version = (HivePartitionRetentionVersion) this.datasetVersion;
    if (PartitionUtils.isUnixTimeStamp(version.getLocation().getName())) {
        return new Path(
                StringUtils.join(Arrays.asList(this.state.getProp(ComplianceConfigurationKeys.BACKUP_DIR),
                        Path.getPathWithoutSchemeAndAuthority(version.getLocation().getParent()).toString(),
                        version.getTimeStamp()), '/'));
    } else {/*w w  w.  j  ava  2  s.  c  om*/
        return new Path(
                StringUtils.join(Arrays.asList(this.state.getProp(ComplianceConfigurationKeys.BACKUP_DIR),
                        Path.getPathWithoutSchemeAndAuthority(version.getLocation()).toString(),
                        version.getTimeStamp()), '/'));
    }
}

From source file:org.apache.gobblin.data.management.conversion.hive.dataset.ConvertibleHiveDataset.java

License:Apache License

private DatasetDescriptor createSourceDataset() {
    try {/*ww w  . ja v  a  2s .com*/
        String sourceTable = getTable().getDbName() + "." + getTable().getTableName();
        DatasetDescriptor source = new DatasetDescriptor(DatasetConstants.PLATFORM_HIVE, sourceTable);
        Path sourcePath = getTable().getDataLocation();
        log.info(String.format("[%s]Source path %s being used in conversion", this.getClass().getName(),
                sourcePath));
        String sourceLocation = Path.getPathWithoutSchemeAndAuthority(sourcePath).toString();
        FileSystem sourceFs = sourcePath.getFileSystem(new Configuration());
        source.addMetadata(DatasetConstants.FS_SCHEME, sourceFs.getScheme());
        source.addMetadata(DatasetConstants.FS_LOCATION, sourceLocation);
        return source;
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.gobblin.data.management.copy.CopyableFile.java

License:Apache License

/**
 * Set file system based source and destination dataset for this {@link CopyableFile}
 *
 * @param originFs {@link FileSystem} where this {@link CopyableFile} origins
 * @param targetFs {@link FileSystem} where this {@link CopyableFile} is copied to
 *//*ww  w. j  a  va2s. co  m*/
public void setFsDatasets(FileSystem originFs, FileSystem targetFs) {
    /*
     * By default, the raw Gobblin dataset for CopyableFile lineage is its parent folder
     * if itself is not a folder
     */
    boolean isDir = origin.isDirectory();

    Path fullSourcePath = Path.getPathWithoutSchemeAndAuthority(origin.getPath());
    String sourceDatasetName = isDir ? fullSourcePath.toString() : fullSourcePath.getParent().toString();
    DatasetDescriptor sourceDataset = new DatasetDescriptor(originFs.getScheme(), sourceDatasetName);
    sourceDataset.addMetadata(DatasetConstants.FS_URI, originFs.getUri().toString());
    sourceData = sourceDataset;

    Path fullDestinationPath = Path.getPathWithoutSchemeAndAuthority(destination);
    String destinationDatasetName = isDir ? fullDestinationPath.toString()
            : fullDestinationPath.getParent().toString();
    DatasetDescriptor destinationDataset = new DatasetDescriptor(targetFs.getScheme(), destinationDatasetName);
    destinationDataset.addMetadata(DatasetConstants.FS_URI, targetFs.getUri().toString());
    destinationData = destinationDataset;
}