List of usage examples for org.apache.hadoop.fs.permission FsPermission valueOf
public static FsPermission valueOf(String unixSymbolicPermission)
From source file:ParascaleFileStatus.java
License:Apache License
void loadPermissionInfo() { if (permissionLoaded.get()) { return;/*from ww w . jav a 2 s . com*/ } IOException e = null; try { final StringTokenizer t = new StringTokenizer(getPermissionString()); // expected format // -rw------- 1 username groupname ... String permission = t.nextToken(); if (permission.length() > 10) { permission = permission.substring(0, 10); } setPermission(FsPermission.valueOf(permission)); t.nextToken(); setOwner(t.nextToken()); setGroup(t.nextToken()); } catch (final Shell.ExitCodeException ioe) { if (ioe.getExitCode() != 1) { e = ioe; } else { setPermission(null); setOwner(null); setGroup(null); } } catch (final IOException ioe) { e = ioe; } finally { if (e != null) { throw new RuntimeException("Error while running command to get " + "file permissions : " + StringUtils.stringifyException(e)); } permissionLoaded.set(true); } }
From source file:com.cloudera.hoop.client.fs.HoopFileSystem.java
License:Open Source License
/** * Creates a <code>FileStatus</code> object using a JSON file-status payload * received from a Hoop server./* w ww. j ava 2 s .c o m*/ * * @param json a JSON file-status payload received from a Hoop server * @return the corresponding <code>FileStatus</code> */ private FileStatus createFileStatus(JSONObject json) { Path path = new Path((String) json.get("path")); boolean isDir = (Boolean) json.get("isDir"); long len = (Long) json.get("len"); String owner = (String) json.get("owner"); String group = (String) json.get("group"); FsPermission permission = FsPermission.valueOf((String) json.get("permission")); long aTime = (Long) json.get("accessTime"); long mTime = (Long) json.get("modificationTime"); long blockSize = (Long) json.get("blockSize"); short replication = (short) (long) (Long) json.get("replication"); return new FileStatus(len, isDir, replication, blockSize, mTime, aTime, permission, owner, group, path); }
From source file:com.cloudera.hoop.fs.FSUtils.java
License:Open Source License
/** * Converts a Unix permission symbolic representation * (i.e. -rwxr--r--) into a Hadoop permission. * * @param str Unix permission symbolic representation. * @return the Hadoop permission. If the given string was * 'default', it returns <code>FsPermission.getDefault()</code>. *///w ww . j av a2s. com public static FsPermission getPermission(String str) { FsPermission permission; if (str.equals(DEFAULT_PERMISSION)) { permission = FsPermission.getDefault(); } else { //TODO: there is something funky here, it does not detect 'x' permission = FsPermission.valueOf(str); } return permission; }
From source file:com.cloudera.recordbreaker.analyzer.FileSummaryData.java
License:Open Source License
public FileSummaryData(FSAnalyzer fsAnalyzer, boolean isDir, long fid, long crawlid, String fname, String owner, String group, String permissions, long size, String lastModified, String path) { this.fsAnalyzer = fsAnalyzer; this.isDir = isDir; this.fid = fid; this.crawlid = crawlid; this.fname = fname; this.owner = owner; this.group = group; this.permissions = FsPermission.valueOf(permissions); this.size = size; this.lastModified = lastModified; this.path = path; this.dd = null; }
From source file:com.indeed.imhotep.builder.tsv.TsvConverter.java
License:Apache License
private static void makeWorldWritable(FileSystem fs, Path path) throws IOException { fs.setPermission(path, FsPermission.valueOf("-rwxrwxrwx")); }
From source file:com.indeed.imhotep.iql.cache.HDFSQueryCache.java
License:Apache License
private void makeSurePathExists(Path path) throws IOException { if (!hdfs.exists(path)) { hdfs.mkdirs(cachePath);/*from w w w . ja v a 2 s. c om*/ if (cacheDirWorldWritable) { hdfs.setPermission(path, FsPermission.valueOf("-rwxrwxrwx")); } } }
From source file:com.streamsets.pipeline.stage.destination.hdfs.TestHDFSTargetWholeFile.java
License:Apache License
@Test public void testWholeFilePermission() throws Exception { java.nio.file.Path filePath1 = Paths.get(getTestDir() + "/source_testWholeFilePermissionFiles1.txt"); java.nio.file.Path filePath2 = Paths.get(getTestDir() + "/source_testWholeFilePermissionFiles2.txt"); java.nio.file.Path filePath3 = Paths.get(getTestDir() + "/source_testWholeFilePermissionFiles3.txt"); Files.write(filePath1, "This is a sample file 1 with some text".getBytes()); Files.write(filePath2, "This is a sample file 2 with some text".getBytes()); Files.write(filePath3, "This is a sample file 3 with some text".getBytes()); HdfsTarget hdfsTarget = HdfsTargetUtil.newBuilder().hdfsUri(uri.toString()).dirPathTemplate(getTestDir()) .timeDriver("${time:now()}").dataForamt(DataFormat.WHOLE_FILE).fileType(HdfsFileType.WHOLE_FILE) .fileNameEL("${record:value('/fileInfo/filename')}").maxRecordsPerFile(1).maxFileSize(0) .uniquePrefix("sdc-").idleTimeout("-1").permissionEL("${record:value('/fileInfo/permissions')}") .lateRecordsAction(LateRecordsAction.SEND_TO_LATE_RECORDS_FILE).build(); TargetRunner runner = new TargetRunner.Builder(HdfsDTarget.class, hdfsTarget) .setOnRecordError(OnRecordError.STOP_PIPELINE).build(); runner.runInit();/*w w w. j a v a 2s .c om*/ try { runner.runWrite(Arrays.asList(getFileRefRecordForFile(filePath1, "755"), //posix style getFileRefRecordForFile(filePath2, "rwxr--r--"), //unix style getFileRefRecordForFile(filePath3, "-rw-rw----"))); org.apache.hadoop.fs.Path targetPath1 = new org.apache.hadoop.fs.Path( getTestDir() + "/sdc-" + filePath1.getFileName()); org.apache.hadoop.fs.Path targetPath2 = new org.apache.hadoop.fs.Path( getTestDir() + "/sdc-" + filePath2.getFileName()); org.apache.hadoop.fs.Path targetPath3 = new org.apache.hadoop.fs.Path( getTestDir() + "/sdc-" + filePath3.getFileName()); FileSystem fs = FileSystem.get(uri, new HdfsConfiguration()); Assert.assertTrue(fs.exists(targetPath1)); Assert.assertTrue(fs.exists(targetPath2)); Assert.assertTrue(fs.exists(targetPath3)); FsPermission actual1 = fs.listStatus(targetPath1)[0].getPermission(); FsPermission actual2 = fs.listStatus(targetPath2)[0].getPermission(); FsPermission actual3 = fs.listStatus(targetPath3)[0].getPermission(); FsPermission expected1 = new FsPermission("755"); FsPermission expected2 = FsPermission.valueOf("-rwxr--r--"); FsPermission expected3 = FsPermission.valueOf("-rw-rw----"); Assert.assertEquals(expected1, actual1); Assert.assertEquals(expected2, actual2); Assert.assertEquals(expected3, actual3); } finally { runner.runDestroy(); } }
From source file:com.streamsets.pipeline.stage.destination.hdfs.util.HdfsUtils.java
License:Apache License
/** * Parse String representation of permissions into HDFS FsPermission class. * * This method accepts the following formats: * * Octal like '777' or '770'/* w ww.j a v a2s. co m*/ * * HDFS style changes like 'a-rwx' * * Unix style write up with 9 characters like 'rwxrwx---' * * @param permissions String representing the permissions * @return Parsed FsPermission object */ public static FsPermission parseFsPermission(String permissions) throws IllegalArgumentException { try { // Octal or symbolic representation return new FsPermission(permissions); } catch (IllegalArgumentException e) { // FsPermission.valueOf will work with unix style permissions which is 10 characters // where the first character says the type of file if (permissions.length() == 9) { // This means it is a posix standard without the first character for file type // We will simply set it to '-' suggesting regular file permissions = "-" + permissions; } // Try to parse unix style format. return FsPermission.valueOf(permissions); } }
From source file:datafu.hourglass.jobs.StagedOutputJob.java
License:Apache License
/** * Writes Hadoop counters and other task statistics to a file in the file system. * //from ww w .ja va2 s .c o m * @param fs * @throws IOException */ private void writeCounters(final FileSystem fs) throws IOException { final Path actualOutputPath = FileOutputFormat.getOutputPath(this); SimpleDateFormat timestampFormat = new SimpleDateFormat("yyyyMMddHHmmss"); String suffix = timestampFormat.format(new Date()); if (_countersParentPath != null) { if (!fs.exists(_countersParentPath)) { _log.info("Creating counter parent path " + _countersParentPath); fs.mkdirs(_countersParentPath, FsPermission.valueOf("-rwxrwxr-x")); } // make the name as unique as possible in this case because this may be a directory // where other counter files will be dropped _countersPath = new Path(_countersParentPath, ".counters." + suffix); } else { _countersPath = new Path(actualOutputPath, ".counters." + suffix); } _log.info(String.format("Writing counters to %s", _countersPath)); FSDataOutputStream counterStream = fs.create(_countersPath); BufferedOutputStream buffer = new BufferedOutputStream(counterStream, 256 * 1024); OutputStreamWriter writer = new OutputStreamWriter(buffer); for (String groupName : getCounters().getGroupNames()) { for (Counter counter : getCounters().getGroup(groupName)) { writeAndLog(writer, String.format("%s=%d", counter.getName(), counter.getValue())); } } JobID jobID = this.getJobID(); org.apache.hadoop.mapred.JobID oldJobId = new org.apache.hadoop.mapred.JobID(jobID.getJtIdentifier(), jobID.getId()); long minStart = Long.MAX_VALUE; long maxFinish = 0; long setupStart = Long.MAX_VALUE; long cleanupFinish = 0; DescriptiveStatistics mapStats = new DescriptiveStatistics(); DescriptiveStatistics reduceStats = new DescriptiveStatistics(); boolean success = true; JobClient jobClient = new JobClient(this.conf); Map<String, String> taskIdToType = new HashMap<String, String>(); TaskReport[] setupReports = jobClient.getSetupTaskReports(oldJobId); if (setupReports.length > 0) { _log.info("Processing setup reports"); for (TaskReport report : jobClient.getSetupTaskReports(oldJobId)) { taskIdToType.put(report.getTaskID().toString(), "SETUP"); if (report.getStartTime() == 0) { _log.warn("Skipping report with zero start time"); continue; } setupStart = Math.min(setupStart, report.getStartTime()); } } else { _log.error("No setup reports"); } TaskReport[] mapReports = jobClient.getMapTaskReports(oldJobId); if (mapReports.length > 0) { _log.info("Processing map reports"); for (TaskReport report : mapReports) { taskIdToType.put(report.getTaskID().toString(), "MAP"); if (report.getFinishTime() == 0 || report.getStartTime() == 0) { _log.warn("Skipping report with zero start or finish time"); continue; } minStart = Math.min(minStart, report.getStartTime()); mapStats.addValue(report.getFinishTime() - report.getStartTime()); } } else { _log.error("No map reports"); } TaskReport[] reduceReports = jobClient.getReduceTaskReports(oldJobId); if (reduceReports.length > 0) { _log.info("Processing reduce reports"); for (TaskReport report : reduceReports) { taskIdToType.put(report.getTaskID().toString(), "REDUCE"); if (report.getFinishTime() == 0 || report.getStartTime() == 0) { _log.warn("Skipping report with zero start or finish time"); continue; } maxFinish = Math.max(maxFinish, report.getFinishTime()); reduceStats.addValue(report.getFinishTime() - report.getStartTime()); } } else { _log.error("No reduce reports"); } TaskReport[] cleanupReports = jobClient.getCleanupTaskReports(oldJobId); if (cleanupReports.length > 0) { _log.info("Processing cleanup reports"); for (TaskReport report : cleanupReports) { taskIdToType.put(report.getTaskID().toString(), "CLEANUP"); if (report.getFinishTime() == 0) { _log.warn("Skipping report with finish time of zero"); continue; } cleanupFinish = Math.max(cleanupFinish, report.getFinishTime()); } } else { _log.error("No cleanup reports"); } if (minStart == Long.MAX_VALUE) { _log.error("Could not determine map-reduce start time"); success = false; } if (maxFinish == 0) { _log.error("Could not determine map-reduce finish time"); success = false; } if (setupStart == Long.MAX_VALUE) { _log.error("Could not determine setup start time"); success = false; } if (cleanupFinish == 0) { _log.error("Could not determine cleanup finish time"); success = false; } // Collect statistics on successful/failed/killed task attempts, categorized by setup/map/reduce/cleanup. // Unfortunately the job client doesn't have an easier way to get these statistics. Map<String, Integer> attemptStats = new HashMap<String, Integer>(); _log.info("Processing task attempts"); for (TaskCompletionEvent event : getTaskCompletionEvents(jobClient, oldJobId)) { String type = taskIdToType.get(event.getTaskAttemptId().getTaskID().toString()); String status = event.getTaskStatus().toString(); String key = String.format("%s_%s_ATTEMPTS", status, type); if (!attemptStats.containsKey(key)) { attemptStats.put(key, 0); } attemptStats.put(key, attemptStats.get(key) + 1); } if (success) { writeAndLog(writer, String.format("SETUP_START_TIME_MS=%d", setupStart)); writeAndLog(writer, String.format("CLEANUP_FINISH_TIME_MS=%d", cleanupFinish)); writeAndLog(writer, String.format("COMPLETE_WALL_CLOCK_TIME_MS=%d", cleanupFinish - setupStart)); writeAndLog(writer, String.format("MAP_REDUCE_START_TIME_MS=%d", minStart)); writeAndLog(writer, String.format("MAP_REDUCE_FINISH_TIME_MS=%d", maxFinish)); writeAndLog(writer, String.format("MAP_REDUCE_WALL_CLOCK_TIME_MS=%d", maxFinish - minStart)); writeAndLog(writer, String.format("MAP_TOTAL_TASKS=%d", (long) mapStats.getN())); writeAndLog(writer, String.format("MAP_MAX_TIME_MS=%d", (long) mapStats.getMax())); writeAndLog(writer, String.format("MAP_MIN_TIME_MS=%d", (long) mapStats.getMin())); writeAndLog(writer, String.format("MAP_AVG_TIME_MS=%d", (long) mapStats.getMean())); writeAndLog(writer, String.format("MAP_STD_TIME_MS=%d", (long) mapStats.getStandardDeviation())); writeAndLog(writer, String.format("MAP_SUM_TIME_MS=%d", (long) mapStats.getSum())); writeAndLog(writer, String.format("REDUCE_TOTAL_TASKS=%d", (long) reduceStats.getN())); writeAndLog(writer, String.format("REDUCE_MAX_TIME_MS=%d", (long) reduceStats.getMax())); writeAndLog(writer, String.format("REDUCE_MIN_TIME_MS=%d", (long) reduceStats.getMin())); writeAndLog(writer, String.format("REDUCE_AVG_TIME_MS=%d", (long) reduceStats.getMean())); writeAndLog(writer, String.format("REDUCE_STD_TIME_MS=%d", (long) reduceStats.getStandardDeviation())); writeAndLog(writer, String.format("REDUCE_SUM_TIME_MS=%d", (long) reduceStats.getSum())); writeAndLog(writer, String.format("MAP_REDUCE_SUM_TIME_MS=%d", (long) mapStats.getSum() + (long) reduceStats.getSum())); for (Map.Entry<String, Integer> attemptStat : attemptStats.entrySet()) { writeAndLog(writer, String.format("%s=%d", attemptStat.getKey(), attemptStat.getValue())); } } writer.close(); buffer.close(); counterStream.close(); }
From source file:etl.cmd.test.XFsTestCase.java
License:Apache License
private Path initFileSystem(FileSystem fs) throws Exception { Path path = new Path(fs.getWorkingDirectory(), java.util.UUID.randomUUID().toString()); Path testDirInFs = fs.makeQualified(path); System.out.println(XLog.format("Setting FS testcase work dir[{0}]", testDirInFs)); if (fs.exists(testDirInFs)) { setAllPermissions(fs, testDirInFs); }/*from w w w . j a v a 2s.c om*/ fs.delete(testDirInFs, true); if (!fs.mkdirs(path)) { throw new IOException(XLog.format("Could not create FS testcase dir [{0}]", testDirInFs)); } fs.setOwner(testDirInFs, getTestUser(), getTestGroup()); fs.setPermission(testDirInFs, FsPermission.valueOf("-rwxrwx--x")); return testDirInFs; }