List of usage examples for org.apache.hadoop.fs Path depth
public int depth()
From source file:cascading.tap.hadoop.Hfs.java
License:Open Source License
private boolean deleteFullIdentifier(Configuration conf, String fullIdentifier) throws IOException { if (LOG.isDebugEnabled()) LOG.debug("deleting: {}", fullIdentifier); Path fullPath = new Path(fullIdentifier); // do not delete the root directory if (fullPath.depth() == 0) return true; FileSystem fileSystem = getFileSystem(conf); try {/*www . ja v a 2 s.c o m*/ return fileSystem.delete(fullPath, true); } catch (NullPointerException exception) { // hack to get around npe thrown when fs reaches root directory // removes coupling to the new aws hadoop artifacts that may not be deployed if (!(fileSystem.getClass().getSimpleName().equals("NativeS3FileSystem"))) throw exception; } return true; }
From source file:com.aliyun.odps.volume.VolumeFSUtil.java
License:Apache License
/** * Get volume name from a specific {@link Path} * /*from w w w. jav a 2 s. c om*/ * @param path * @throws VolumeException */ public static String getVolumeFromPath(Path path) throws VolumeException { path = Path.getPathWithoutSchemeAndAuthority(path); if (path.depth() == 0) { throw new VolumeException(VolumeFSErrorCode.VolumeMissing, "No volume found!"); } else { String p = path.toUri().getPath(); String volume = p.split(VolumeFSConstants.SEPARATOR)[1]; return volume; } }
From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java
License:Apache License
/** * Returns whether the parent path contains the child path, or not. * If the parent and child is same, this returns {@code false}. * @param parent the parent path/*from w w w. j a v a 2s .co m*/ * @param child the child path * @return {@code true} if parent path strictly contains the child, otherwise {@code false} * @throws IllegalArgumentException if some parameters were {@code null} */ public static boolean contains(Path parent, Path child) { if (parent == null) { throw new IllegalArgumentException("parent must not be null"); //$NON-NLS-1$ } if (child == null) { throw new IllegalArgumentException("child must not be null"); //$NON-NLS-1$ } if (parent.depth() >= child.depth()) { return false; } URI parentUri = parent.toUri(); URI childUri = child.toUri(); URI relative = parentUri.relativize(childUri); if (relative.equals(childUri) == false) { return true; } return false; }
From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java
License:Apache License
private static boolean isSameOrParent(Path parent, Path child) { int parentDepth = parent.depth(); int childDepth = child.depth(); if (parentDepth > childDepth) { return false; }/*from w ww.j a v a2 s . c o m*/ for (int i = childDepth; i > parentDepth; i--) { child = child.getParent(); } return parent.equals(child); }
From source file:com.inmobi.conduit.distcp.DistcpBaseService.java
License:Apache License
protected String getTopicNameFromDestnPath(Path destnPath) { String destnPathAsString = destnPath.toString(); String destnDirAsString = new Path(destCluster.getFinalDestDirRoot()).toString(); String pathWithoutRoot = destnPathAsString.substring(destnDirAsString.length()); Path tmpPath = new Path(pathWithoutRoot); while (tmpPath.depth() != 1) tmpPath = tmpPath.getParent();//from w w w. j a v a2s . c o m return tmpPath.getName(); }
From source file:com.inmobi.conduit.distcp.MergedStreamService.java
License:Apache License
private boolean isValidYYMMDDHHMMPath(Path prefix, Path path) { if (path.depth() < prefix.depth() + 5) return false; return true;//w w w.j a va2 s. com }
From source file:com.inmobi.conduit.distcp.tools.mapred.CopyMapper.java
License:Apache License
/** * Implementation of the Mapper<>::map(). Does the copy. * @param relPath: The target path./* w ww . ja va 2 s . com*/ * @param sourceFileStatus: The source path. * @throws IOException */ @Override public void map(Text relPath, FileStatus sourceFileStatus, Context context) throws IOException, InterruptedException { Path sourcePath = sourceFileStatus.getPath(); Map<Long, Long> received = null; if (context.getConfiguration().getBoolean(ConduitConstants.AUDIT_ENABLED_KEY, true)) { received = new HashMap<Long, Long>(); } if (LOG.isDebugEnabled()) LOG.debug("DistCpMapper::map(): Received " + sourcePath + ", " + relPath); Path target = new Path(targetWorkPath.makeQualified(targetFS) + relPath.toString()); EnumSet<DistCpOptions.FileAttribute> fileAttributes = getFileAttributeSettings(context); final String description = "Copying " + sourcePath + " to " + target; context.setStatus(description); LOG.info(description); try { FileStatus sourceCurrStatus; FileSystem sourceFS; try { sourceFS = sourcePath.getFileSystem(conf); sourceCurrStatus = sourceFS.getFileStatus(sourcePath); } catch (FileNotFoundException e) { throw new IOException(new RetriableFileCopyCommand.CopyReadException(e)); } FileStatus targetStatus = null; try { targetStatus = targetFS.getFileStatus(target); } catch (FileNotFoundException ignore) { } if (targetStatus != null && (targetStatus.isDir() != sourceCurrStatus.isDir())) { throw new IOException("Can't replace " + target + ". Target is " + getFileType(targetStatus) + ", Source is " + getFileType(sourceCurrStatus)); } if (sourceCurrStatus.isDir()) { createTargetDirsWithRetry(description, target, context); return; } if (skipFile(sourceFS, sourceCurrStatus, target)) { LOG.info("Skipping copy of " + sourceCurrStatus.getPath() + " to " + target); updateSkipCounters(context, sourceCurrStatus); } else { String streamName = null; if (!relPath.toString().isEmpty()) { Path relativePath = new Path(relPath.toString()); if (relativePath.depth() > 2) { // path is for mirror service and is of format // /conduit/streams/<streamName>/2013/09/12 Path tmpPath = relativePath; while (tmpPath.getParent() != null && !tmpPath.getParent().getName().equals("streams")) { tmpPath = tmpPath.getParent(); } streamName = tmpPath.getName(); } else { // path is for merge service and of form /<stream name>/filename.gz streamName = relativePath.getParent().getName(); } } copyFileWithRetry(description, sourceCurrStatus, target, context, fileAttributes, received); // generate audit counters if (received != null) { for (Entry<Long, Long> entry : received.entrySet()) { String counterNameValue = getCounterNameValue(streamName, sourcePath.getName(), entry.getKey(), entry.getValue()); context.write(NullWritable.get(), new Text(counterNameValue)); } } } DistCpUtils.preserve(target.getFileSystem(conf), target, sourceCurrStatus, fileAttributes); } catch (IOException exception) { handleFailures(exception, sourceFileStatus, target, context); } }
From source file:com.inmobi.conduit.local.LocalStreamService.java
License:Apache License
public String getTopicNameFromDestnPath(Path destnPath) { String destnPathAsString = destnPath.toString(); String destnDirAsString = new Path(srcCluster.getLocalFinalDestDirRoot()).toString(); String pathWithoutRoot = destnPathAsString.substring(destnDirAsString.length()); Path tmpPath = new Path(pathWithoutRoot); while (tmpPath.depth() != 1) tmpPath = tmpPath.getParent();/*from ww w .j a va2 s. c o m*/ return tmpPath.getName(); }
From source file:com.uber.hoodie.common.model.HoodiePartitionMetadata.java
License:Apache License
/** * Construct metadata object to be written out. *//* www . j a va2 s.co m*/ public HoodiePartitionMetadata(FileSystem fs, String commitTime, Path basePath, Path partitionPath) { this(fs, partitionPath); props.setProperty(COMMIT_TIME_KEY, commitTime); props.setProperty(PARTITION_DEPTH_KEY, String.valueOf(partitionPath.depth() - basePath.depth())); }
From source file:common.NameNode.java
License:Apache License
/** * Check path length does not exceed maximum. Returns true if * length and depth are okay. Returns false if length is too long * or depth is too great./*w w w. jav a 2 s . c o m*/ * */ private boolean checkPathLength(String src) { Path srcPath = new Path(src); return (src.length() <= MAX_PATH_LENGTH && srcPath.depth() <= MAX_PATH_DEPTH); }