List of usage examples for org.apache.hadoop.fs Path getName
public String getName()
From source file:com.philiphubbard.digraph.MRBuildVerticesTest.java
License:Open Source License
private static void readVertices(FileStatus status, ArrayList<MRVertex> vertices, Configuration conf) throws IOException { Path path = status.getPath(); if (path.getName().startsWith("part")) { System.out.println(path); SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(path)); IntWritable key = new IntWritable(); BytesWritable value = new BytesWritable(); while (reader.next(key, value)) vertices.add(new MRVertex(value, conf)); reader.close();/* w w w. j av a 2 s. c o m*/ } }
From source file:com.philiphubbard.digraph.MRCompressChainsTest.java
License:Open Source License
private static void cleanupTest(Configuration conf) throws IOException { FileSystem fileSystem = FileSystem.get(conf); ArrayList<MRVertex> vertices = new ArrayList<MRVertex>(); FileStatus[] files = fileSystem.listStatus(new Path(testOutput)); for (FileStatus status : files) { Path path = status.getPath(); if (path.getName().startsWith("part")) { System.out.println(path); SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(path)); IntWritable key = new IntWritable(); BytesWritable value = new BytesWritable(); while (reader.next(key, value)) vertices.add(new MRVertex(value, conf)); reader.close();/*from ww w. j a v a 2s .c o m*/ } } for (MRVertex vertex : vertices) System.out.println(vertex.toDisplayString()); fileSystem.delete(new Path(testInput), true); fileSystem.delete(new Path(testOutput), true); fileSystem.close(); }
From source file:com.philiphubbard.sabe.MRAssembler.java
License:Open Source License
private void readVertices(FileStatus status, ArrayList<MRMerVertex> vertices, Configuration conf) throws IOException { Path path = status.getPath(); if (path.getName().startsWith("part")) { SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(path)); IntWritable key = new IntWritable(); BytesWritable value = new BytesWritable(); while (reader.next(key, value)) vertices.add(new MRMerVertex(value, conf)); reader.close();/*from w w w.j a v a2 s. c o m*/ } }
From source file:com.pinterest.terrapin.storage.HFileReaderTest.java
License:Apache License
@Test public void testTerrapinPath() { String partName = TerrapinUtil.formatPartitionName(0); Path path = new HFileReader.TerrapinPath("/terrapin/data/meta_user_join/1234/" + partName); assertEquals(partName + "_meta_user_join_1234", path.getName()); }
From source file:com.quixey.hadoop.fs.oss.OSSFileSystem.java
License:Apache License
@Override public boolean rename(Path src, Path dst) throws IOException { src = checkNotNull(src);/*from w w w . java 2 s . c om*/ dst = checkNotNull(dst); String srcKey = pathToKey(makeAbsolute(src)); final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - "; if (isRoot(srcKey)) { // Cannot rename root of file system LOG.debug("{} returning false as cannot rename the root of a filesystem", debugPreamble); return false; } // get status of source boolean srcIsFile; try { srcIsFile = getFileStatus(src).isFile(); } catch (FileNotFoundException e) { // bail out fast if the source does not exist LOG.debug("{} returning false as src does not exist", debugPreamble); return false; } // figure out the final destination String dstKey = pathToKey(makeAbsolute(dst)); try { boolean dstIsFile = getFileStatus(dst).isFile(); if (dstIsFile) { // destination is a file. // you can't copy a file or a directory onto an existing file // except for the special case of dest==src, which is a no-op LOG.debug("{} returning without rename as dst is an already existing file", debugPreamble); // exit, returning true iff the rename is onto self return srcKey.equals(dstKey); } else { // destination exists and is a directory LOG.debug("{} using dst as output directory", debugPreamble); // destination goes under the dst path, with the name of the // source entry dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName()))); } } catch (FileNotFoundException e) { // destination does not exist => the source file or directory // is copied over with the name of the destination LOG.debug("{} using dst as output destination", debugPreamble); try { if (getFileStatus(dst.getParent()).isFile()) { LOG.debug("{} returning false as dst parent exists and is a file", debugPreamble); return false; } } catch (FileNotFoundException ex) { LOG.debug("{} returning false as dst parent does not exist", debugPreamble); return false; } } // rename to self behavior follows Posix rules and is different // for directories and files -the return code is driven by src type if (srcKey.equals(dstKey)) { // fully resolved destination key matches source: fail LOG.debug("{} renamingToSelf; returning true", debugPreamble); return true; } if (srcIsFile) { renameOneFile(srcKey, dstKey, debugPreamble); } else { // src is a directory LOG.debug("{} src is directory, so copying contents", debugPreamble); // verify dest is not a child of the parent if (isSubDir(dstKey, srcKey)) { LOG.debug("{} cannot rename a directory to a subdirectory of self", debugPreamble); return false; } renameDir(srcKey, dstKey, debugPreamble); LOG.debug("{} done", debugPreamble); } return true; }
From source file:com.quixey.hadoop.fs.oss.OSSFileSystemContractBaseTest.java
License:Apache License
/** * Checks that when/*w w w.j a va 2 s . c o m*/ * * - x/y exists, but * - x/ does not exist, * * list("x") still works as expected. */ public void testListUnmarkedDir() throws Exception { Path dir = path("test/hadoop"); assertFalse(fs.exists(dir)); Path file = path("test/hadoop/file"); createFile(file); assertTrue(fs.exists(dir)); FileStatus[] paths = fs.listStatus(dir); assertEquals(1, paths.length); assertEquals(file.getName(), paths[0].getPath().getName()); }
From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java
License:Open Source License
/** * Copy from local fs to HDFS//from ww w . jav a 2 s. c o m * * @param local_path * @param hdfs_path * @return Error message * @throws RemoteException */ @Override public String copyFromLocal(String local_path, String hdfs_path) throws RemoteException { String error = null; Path localP = new Path(local_path), hdfsP = new Path(hdfs_path); File failFile = new File(localP.getParent().toString(), "." + localP.getName() + ".crc"); try { FileChecker hChO = new FileChecker(new File(local_path)); if (hChO.exists()) { FileSystem fs = NameNodeVar.getFS(); if (failFile.exists()) { failFile.delete(); } fs.copyFromLocalFile(false, localP, hdfsP); } else { error = LanguageManagerWF.getText("HdfsInterface.ouputexists"); } } catch (IOException e) { logger.error(e.getMessage()); error = LanguageManagerWF.getText("HdfsInterface.errormove", new Object[] { e.getMessage() }); } if (error != null) { if (failFile.exists()) { failFile.delete(); } logger.debug(error); } return error; }
From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java
License:Open Source License
public String copyToLocal(String hdfs_path, String local_path, boolean writtableByAll) throws RemoteException { String error = null;//w ww . j av a 2 s .c o m Path localP = new Path(local_path), hdfsP = new Path(hdfs_path); File failFile = new File(localP.getParent().toString(), "." + localP.getName() + ".crc"); try { FileChecker hChN = new FileChecker(new File(local_path)); HdfsFileChecker hChO = new HdfsFileChecker(hdfsP); if (!hChN.exists() && hChO.exists()) { FileSystem fs = NameNodeVar.getFS(); if (failFile.exists()) { failFile.delete(); } fs.copyToLocalFile(false, hdfsP, localP); if (writtableByAll) { new File(local_path).setWritable(true, false); } } else { error = LanguageManagerWF.getText("HdfsInterface.ouputexists"); } } catch (IOException e) { logger.error(e.getMessage()); error = LanguageManagerWF.getText("HdfsInterface.errormove", new Object[] { e.getMessage() }); } if (error != null) { logger.warn(error); if (failFile.exists()) { failFile.delete(); } } return error; }
From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java
License:Open Source License
/** * Change the permissions of a path/*from w w w . j a va 2 s . c o m*/ * * @param fs * @param path * @param permission * @param recursive * @return Error Message */ protected String changePermission(FileSystem fs, Path path, String permission, boolean recursive) { String error = null; try { FileStatus stat = fs.getFileStatus(path); if (stat.getOwner().equals(System.getProperty("user.name"))) { if (recursive) { FileStatus[] child = fs.listStatus(path); for (int i = 0; i < child.length && error == null; ++i) { error = changePermission(fs, child[i].getPath(), permission, recursive); } } if (error == null) { logger.debug("1 ----- path " + path.getName() + " new perms " + permission); fs.setPermission(path, new FsPermission(permission)); } } else { error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror", new Object[] { path.toString() }); } } catch (IOException e) { logger.error("Cannot operate on the file or directory: " + path.toString()); logger.error(e.getMessage()); error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path }); } if (error != null) { logger.debug(error); } return error; }
From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java
License:Open Source License
/** * Change the permission of a path/*from w w w . ja va2s . c o m*/ * * @param path * @param permission * @param recursive * @return Error Message */ protected String changePermission(Path path, String permission, boolean recursive) { String error = null; try { logger.debug("1 " + path.getName()); FileSystem fs = NameNodeVar.getFS(); FileStatus stat = fs.getFileStatus(path); if (stat.getOwner().equals(System.getProperty("user.name"))) { FileStatus[] child = fs.listStatus(path); if (recursive) { logger.debug("children : " + child.length); for (int i = 0; i < child.length && error == null; ++i) { error = changePermission(fs, child[i].getPath(), permission, recursive); } } if (error == null) { logger.debug("set permissions : " + path.toString() + " , " + new FsPermission(permission).toString()); fs.setPermission(path, new FsPermission(permission)); logger.debug(getProperties(path.getName())); } } else { error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror", new Object[] { path.toString() }); } // fs.close(); } catch (IOException e) { logger.error("Cannot operate on the file or directory: " + path.toString()); logger.error(e.getMessage()); error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path }); } if (error != null) { logger.debug(error); } return error; }