List of usage examples for org.apache.hadoop.fs.permission FsPermission getFileDefault
public static FsPermission getFileDefault()
From source file:com.ibm.crail.hdfs.CrailHadoopFileSystem.java
License:Apache License
@Override public FileStatus[] listStatus(Path path) throws FileNotFoundException, IOException { try {//from www.j a va2 s . c om CrailNode node = dfs.lookup(path.toUri().getRawPath()).get(); Iterator<String> iter = node.getType() == CrailNodeType.DIRECTORY ? node.asDirectory().listEntries() : node.asMultiFile().listEntries(); ArrayList<FileStatus> statusList = new ArrayList<FileStatus>(); while (iter.hasNext()) { String filepath = iter.next(); CrailNode directFile = dfs.lookup(filepath).get(); if (directFile != null) { FsPermission permission = FsPermission.getFileDefault(); if (directFile.getType().isDirectory()) { permission = FsPermission.getDirDefault(); } FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, new Path(filepath).makeQualified(this.getUri(), this.workingDir)); statusList.add(status); } } FileStatus[] list = new FileStatus[statusList.size()]; statusList.toArray(list); return list; } catch (Exception e) { throw new FileNotFoundException(path.toUri().getRawPath()); } }
From source file:com.ibm.crail.hdfs.CrailHadoopFileSystem.java
License:Apache License
@Override public FileStatus getFileStatus(Path path) throws IOException { CrailNode directFile = null;/*from w w w.j a va2 s . c o m*/ try { directFile = dfs.lookup(path.toUri().getRawPath()).get(); } catch (Exception e) { throw new IOException(e); } if (directFile == null) { throw new FileNotFoundException("File does not exist: " + path); } FsPermission permission = FsPermission.getFileDefault(); if (directFile.getType().isDirectory()) { permission = FsPermission.getDirDefault(); } FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir)); return status; }
From source file:com.ibm.crail.hdfs.CrailHDFS.java
License:Apache License
@Override public FileStatus getFileStatus(Path path) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { CrailNode directFile = null;/*from w ww. ja v a 2s. c om*/ try { directFile = dfs.lookup(path.toUri().getRawPath()).get(); } catch (Exception e) { throw new IOException(e); } if (directFile == null) { throw new FileNotFoundException("filename " + path); } FsPermission permission = FsPermission.getFileDefault(); if (directFile.getType().isDirectory()) { permission = FsPermission.getDirDefault(); } FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir)); return status; }
From source file:com.ibm.crail.hdfs.CrailHDFS.java
License:Apache License
@Override public FileStatus[] listStatus(Path path) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { try {/*from w ww . java 2s. co m*/ CrailNode node = dfs.lookup(path.toUri().getRawPath()).get(); Iterator<String> iter = node.getType() == CrailNodeType.DIRECTORY ? node.asDirectory().listEntries() : node.asMultiFile().listEntries(); ArrayList<FileStatus> statusList = new ArrayList<FileStatus>(); while (iter.hasNext()) { String filepath = iter.next(); CrailNode directFile = dfs.lookup(filepath).get(); if (directFile != null) { FsPermission permission = FsPermission.getFileDefault(); if (directFile.getType().isDirectory()) { permission = FsPermission.getDirDefault(); } FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, new Path(filepath).makeQualified(this.getUri(), workingDir)); statusList.add(status); } } FileStatus[] list = new FileStatus[statusList.size()]; statusList.toArray(list); return list; } catch (Exception e) { throw new FileNotFoundException(path.toUri().getRawPath()); } }
From source file:com.mellanox.r4h.DFSClient.java
License:Apache License
/** * Call {@link #create(String, FsPermission, EnumSet, short, long, Progressable, int, ChecksumOpt)} with default <code>permission</code> * {@link FsPermission#getFileDefault()}. * /*from w ww . ja v a 2 s.c o m*/ * @param src * File name * @param overwrite * overwrite an existing file if true * @param replication * replication factor for the file * @param blockSize * maximum block size * @param progress * interface for reporting client progress * @param buffersize * underlying buffersize * * @return output stream */ public OutputStream create(String src, boolean overwrite, short replication, long blockSize, Progressable progress, int buffersize) throws IOException { return create(src, FsPermission.getFileDefault(), overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress, buffersize, null); }
From source file:com.mellanox.r4h.DFSClient.java
License:Apache License
/** * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long, Progressable, int, ChecksumOpt)} with the addition of favoredNodes * that is/*w w w . j a v a 2 s . c o m*/ * a hint to where the namenode should place the file blocks. * The favored nodes hint is not persisted in HDFS. Hence it may be honored * at the creation time only. HDFS could move the blocks during balancing or * replication, to move the blocks from favored nodes. A value of null means * no favored nodes for this create */ public DFSOutputStream create(String src, FsPermission permission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes) throws IOException { checkOpen(); if (permission == null) { permission = FsPermission.getFileDefault(); } FsPermission masked = permission.applyUMask(dfsClientConf.getuMask()); if (LOG.isDebugEnabled()) { LOG.debug(src + ": masked=" + masked); } final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this, src, masked, flag, createParent, replication, blockSize, progress, buffersize, dfsClientConf.createChecksum(checksumOpt), getFavoredNodesStr(favoredNodes)); beginFileLease(result.getFileId(), result); return result; }
From source file:net.arp7.HdfsPerfTest.WriteFile.java
License:Apache License
/** * Write a single file to HDFS.// w w w.j ava 2 s . c o m * * @param file * @param fs * @param data * @param stats object to accumulate write stats. * @throws IOException * @throws InterruptedException */ private static void writeOneFile(final Path file, final FileSystem fs, final byte[] data, final FileIoStats stats) throws IOException, InterruptedException { final long startTime = System.nanoTime(); final EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, OVERWRITE); if (params.isLazyPersist()) { createFlags.add(LAZY_PERSIST); } LOG.info("Writing file " + file.toString()); final FSDataOutputStream os = fs.create(file, FsPermission.getFileDefault(), createFlags, Constants.BUFFER_SIZE, params.getReplication(), params.getBlockSize(), null); final long createEndTime = System.nanoTime(); stats.addCreateTime(createEndTime - startTime); final boolean isThrottled = params.maxWriteBps() > 0; final long expectedIoTimeNs = (isThrottled ? (((long) data.length * 1_000_000_000) / params.maxWriteBps()) : 0); try { long lastLoggedPercent = 0; long writeStartTime = System.nanoTime(); for (long j = 0; j < params.getFileSize() / params.getIoSize(); ++j) { final long ioStartTimeNs = (isThrottled ? System.nanoTime() : 0); os.write(data, 0, data.length); if (params.isHsync()) { os.hsync(); } else if (params.isHflush()) { os.hflush(); } final long ioEndTimeNs = (isThrottled ? System.nanoTime() : 0); Utils.enforceThrottle(ioEndTimeNs - ioStartTimeNs, expectedIoTimeNs); if (LOG.isDebugEnabled()) { long percentWritten = (j * params.getIoSize() * 100) / params.getFileSize(); if (percentWritten > lastLoggedPercent) { LOG.debug(" >> Wrote " + j * params.getIoSize() + "/" + params.getFileSize() + " [" + percentWritten + "%]"); lastLoggedPercent = percentWritten; } } } final long writeEndTime = System.nanoTime(); stats.addWriteTime(writeEndTime - writeStartTime); stats.incrFilesWritten(); stats.incrBytesWritten(params.getFileSize()); } finally { final long closeStartTime = System.nanoTime(); os.close(); final long closeEndTime = System.nanoTime(); stats.addCloseTime(closeEndTime - closeStartTime); } }