List of usage examples for org.apache.hadoop.fs FileStatus getPath
public Path getPath()
From source file:acromusashi.stream.bolt.hdfs.HdfsPreProcessor.java
License:Open Source License
/** * ???/*from ww w.j av a2s . c om*/ * * @param targetTmpFiles ?? */ private static void printTargetPathList(FileStatus[] targetTmpFiles) { StringBuilder builder = new StringBuilder(); builder.append("Preprocess target files:"); String lineSeparator = System.getProperty("line.separator"); for (FileStatus targetFile : targetTmpFiles) { builder.append(targetFile.getPath() + lineSeparator); } logger.info(builder.toString()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Deletes files in the given filesystem. * * @param fs given filesystem/*from w w w .j a va 2 s. c om*/ */ public static void cleanup(org.apache.hadoop.fs.FileSystem fs) throws IOException { FileStatus[] statuses = fs.listStatus(new Path("/")); for (FileStatus f : statuses) { fs.delete(f.getPath(), true); } }
From source file:alluxio.hadoop.AbstractFileSystem.java
License:Apache License
@Override public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException { if (file == null) { return null; }/*from w ww . j a v a2s .c om*/ if (mStatistics != null) { mStatistics.incrementReadOps(1); } AlluxioURI path = new AlluxioURI(HadoopUtils.getPathWithoutScheme(file.getPath())); List<FileBlockInfo> blocks = getFileBlocks(path); List<BlockLocation> blockLocations = new ArrayList<>(); for (FileBlockInfo fileBlockInfo : blocks) { long offset = fileBlockInfo.getOffset(); long end = offset + fileBlockInfo.getBlockInfo().getLength(); // Check if there is any overlapping between [start, start+len] and [offset, end] if (end >= start && offset <= start + len) { ArrayList<String> names = new ArrayList<>(); ArrayList<String> hosts = new ArrayList<>(); // add the existing in-memory block locations for (alluxio.wire.BlockLocation location : fileBlockInfo.getBlockInfo().getLocations()) { HostAndPort address = HostAndPort.fromParts(location.getWorkerAddress().getHost(), location.getWorkerAddress().getDataPort()); names.add(address.toString()); hosts.add(address.getHostText()); } // add under file system locations for (String location : fileBlockInfo.getUfsLocations()) { names.add(location); hosts.add(HostAndPort.fromString(location).getHostText()); } blockLocations.add(new BlockLocation(CommonUtils.toStringArray(names), CommonUtils.toStringArray(hosts), offset, fileBlockInfo.getBlockInfo().getLength())); } } BlockLocation[] ret = new BlockLocation[blockLocations.size()]; blockLocations.toArray(ret); return ret; }
From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
public static void cleanup(org.apache.hadoop.fs.FileSystem fs) throws IOException { FileStatus[] statuses = fs.listStatus(new Path("/")); for (FileStatus f : statuses) { fs.delete(f.getPath(), true); }/* w ww . java 2 s . c om*/ }
From source file:alluxio.hadoop.HadoopUtils.java
License:Apache License
/** * Returns a string representation of a Hadoop {@link FileStatus}. * * @param fs Hadoop {@link FileStatus}//from www . j ava 2 s. com * @return its string representation */ public static String toStringHadoopFileStatus(FileStatus fs) { StringBuilder sb = new StringBuilder(); sb.append("HadoopFileStatus: Path: ").append(fs.getPath()); sb.append(" , Length: ").append(fs.getLen()); // Use isDir instead of isDirectory for compatibility with hadoop 1. sb.append(" , IsDir: ").append(fs.isDir()); sb.append(" , BlockReplication: ").append(fs.getReplication()); sb.append(" , BlockSize: ").append(fs.getBlockSize()); sb.append(" , ModificationTime: ").append(fs.getModificationTime()); sb.append(" , AccessTime: ").append(fs.getAccessTime()); sb.append(" , Permission: ").append(fs.getPermission()); sb.append(" , Owner: ").append(fs.getOwner()); sb.append(" , Group: ").append(fs.getGroup()); return sb.toString(); }
From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java
License:Apache License
@Override public String[] list(String path) throws IOException { FileStatus[] files;//from ww w. j a v a 2 s . co m try { files = mFileSystem.listStatus(new Path(path)); } catch (FileNotFoundException e) { return null; } if (files != null && !isFile(path)) { String[] rtn = new String[files.length]; int i = 0; for (FileStatus status : files) { // only return the relative path, to keep consistent with java.io.File.list() rtn[i++] = status.getPath().getName(); } return rtn; } else { return null; } }
From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java
License:Apache License
@Override public void setOwner(String path, String user, String group) throws IOException { try {//from w w w .j a va2s .c o m FileStatus fileStatus = mFileSystem.getFileStatus(new Path(path)); LOG.info("Changing file '{}' user from: {} to {}, group from: {} to {}", fileStatus.getPath(), fileStatus.getOwner(), user, fileStatus.getGroup(), group); mFileSystem.setOwner(fileStatus.getPath(), user, group); } catch (IOException e) { LOG.error("Fail to set owner for {} with user: {}, group: {}", path, user, group, e); LOG.warn("In order for Alluxio to create HDFS files with the correct user and groups, " + "Alluxio should be added to the HDFS superusers."); throw e; } }
From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java
License:Apache License
@Override public void setMode(String path, short mode) throws IOException { try {/* w w w . j a v a 2s . c o m*/ FileStatus fileStatus = mFileSystem.getFileStatus(new Path(path)); LOG.info("Changing file '{}' permissions from: {} to {}", fileStatus.getPath(), fileStatus.getPermission(), mode); mFileSystem.setPermission(fileStatus.getPath(), new FsPermission(mode)); } catch (IOException e) { LOG.error("Fail to set permission for {} with perm {}", path, mode, e); throw e; } }
From source file:at.illecker.hama.hybrid.examples.summation.SummationBSP.java
License:Apache License
static void printOutput(BSPJob job, BigDecimal sum) throws IOException { FileSystem fs = FileSystem.get(job.getConfiguration()); FileStatus[] listStatus = fs.listStatus(FileOutputFormat.getOutputPath(job)); for (FileStatus status : listStatus) { if (!status.isDir()) { try { SequenceFile.Reader reader = new SequenceFile.Reader(fs, status.getPath(), job.getConfiguration()); Text key = new Text(); DoubleWritable value = new DoubleWritable(); if (reader.next(key, value)) { LOG.info("Output File: " + status.getPath()); LOG.info("key: '" + key + "' value: '" + value + "' expected: '" + sum.doubleValue() + "'"); Assert.assertEquals("Expected value: '" + sum.doubleValue() + "' != '" + value + "'", sum.doubleValue(), value.get(), Math.pow(10, (DOUBLE_PRECISION * -1))); }/*ww w.j a v a 2 s. com*/ reader.close(); } catch (IOException e) { if (status.getLen() > 0) { System.out.println("Output File " + status.getPath()); FSDataInputStream in = fs.open(status.getPath()); IOUtils.copyBytes(in, System.out, job.getConfiguration(), false); in.close(); } } } } // fs.delete(FileOutputFormat.getOutputPath(job), true); }
From source file:azkaban.jobtype.hiveutils.azkaban.hive.actions.Utils.java
License:Apache License
static ArrayList<String> fetchDirectories(FileSystem fs, String location, boolean returnFullPath) throws IOException, HiveViaAzkabanException { LOG.info("Fetching directories in " + location); Path p = new Path(location); FileStatus[] statuses = fs.listStatus(p); if (statuses == null || statuses.length == 0) { throw new HiveViaAzkabanException("Couldn't find any directories in " + location); }/* w ww . jav a 2s . co m*/ ArrayList<String> files = new ArrayList<String>(statuses.length); for (FileStatus status : statuses) { if (!status.isDir()) continue; if (status.getPath().getName().startsWith(".")) continue; files.add(returnFullPath ? status.getPath().toString() : status.getPath().getName()); } return files; }