List of usage examples for org.apache.hadoop.fs FileSystem rename
public abstract boolean rename(Path src, Path dst) throws IOException;
From source file:tv.icntv.log.crawl.store.HdfsDefaultStore.java
License:Apache License
@Override public boolean rename(String srcName, String name) { FSDataOutputStream out = null;//from w w w . j av a 2 s . c o m FileSystem fileSystem = null; try { fileSystem = FileSystem.get(configuration); if (fileSystem.exists(new Path(srcName))) { return fileSystem.rename(new Path(srcName), new Path(name)); } logger.info("try rename ,but name={} not exist,create file{} ", srcName, name); out = fileSystem.create(new Path(name)); out.flush(); return false; } catch (IOException e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. logger.error("rename error:", e); return false; } finally { if (null != out) { IOUtils.closeStream(out); } if (null != fileSystem) { try { fileSystem.close(); } catch (IOException e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } } } }
From source file:tv.icntv.log.stb.commons.HadoopUtils.java
License:Apache License
/** * mv/*from w ww . j ava 2s . c o m*/ * @param from * @param to * @throws IOException */ public static void rename(Path from, Path to) throws IOException { FileSystem fileSystem = null; try { fileSystem = FileSystem.get(configuration); fileSystem.rename(from, to); } catch (IOException e) { e.printStackTrace(); } finally { if (null != fileSystem) { fileSystem.close(); } } }
From source file:voldemort.store.readonly.disk.HadoopStoreWriter.java
License:Apache License
@Override public void close() throws IOException { this.indexFileStream.close(); this.valueFileStream.close(); if (this.nodeId == -1 || this.chunkId == -1 || this.partitionId == -1) { // Issue 258 - No data was read in the reduce phase, do not create // any output return;//w w w .ja va 2 s .c o m } // If the replica type read was not valid, shout out if (getSaveKeys() && this.replicaType == -1) { throw new RuntimeException("Could not read the replica type correctly for node " + nodeId + " ( partition - " + this.partitionId + " )"); } String fileNamePrefix = null; if (getSaveKeys()) { fileNamePrefix = new String(Integer.toString(this.partitionId) + "_" + Integer.toString(this.replicaType) + "_" + Integer.toString(this.chunkId)); } else { fileNamePrefix = new String(Integer.toString(this.partitionId) + "_" + Integer.toString(this.chunkId)); } // Initialize the node directory Path nodeDir = new Path(this.outputDir, "node-" + this.nodeId); // Create output directory, if it doesn't exist FileSystem outputFs = nodeDir.getFileSystem(this.conf); outputFs.mkdirs(nodeDir); outputFs.setPermission(nodeDir, new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION)); logger.info("Setting permission to 755 for " + nodeDir); // Write the checksum and output files if (this.checkSumType != CheckSumType.NONE) { if (this.checkSumDigestIndex != null && this.checkSumDigestValue != null) { Path checkSumIndexFile = new Path(nodeDir, fileNamePrefix + ".index.checksum"); Path checkSumValueFile = new Path(nodeDir, fileNamePrefix + ".data.checksum"); if (outputFs.exists(checkSumIndexFile)) { outputFs.delete(checkSumIndexFile); } FSDataOutputStream output = outputFs.create(checkSumIndexFile); outputFs.setPermission(checkSumIndexFile, new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION)); output.write(this.checkSumDigestIndex.getCheckSum()); output.close(); if (outputFs.exists(checkSumValueFile)) { outputFs.delete(checkSumValueFile); } output = outputFs.create(checkSumValueFile); outputFs.setPermission(checkSumValueFile, new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION)); output.write(this.checkSumDigestValue.getCheckSum()); output.close(); } else { throw new RuntimeException("Failed to open checksum digest for node " + nodeId + " ( partition - " + this.partitionId + ", chunk - " + chunkId + " )"); } } // Generate the final chunk files Path indexFile = new Path(nodeDir, fileNamePrefix + ".index"); Path valueFile = new Path(nodeDir, fileNamePrefix + ".data"); logger.info("Moving " + this.taskIndexFileName + " to " + indexFile); if (outputFs.exists(indexFile)) { outputFs.delete(indexFile); } outputFs.rename(taskIndexFileName, indexFile); logger.info("Moving " + this.taskValueFileName + " to " + valueFile); if (outputFs.exists(valueFile)) { outputFs.delete(valueFile); } outputFs.rename(this.taskValueFileName, valueFile); }
From source file:voldemort.store.readonly.mapreduce.HadoopStoreBuilderReducer.java
License:Apache License
@Override public void cleanup(Context context) throws IOException { this.indexFileStream.close(); this.valueFileStream.close(); if (this.nodeId == -1 || this.chunkId == -1 || this.partitionId == -1) { // No data was read in the reduce phase, do not create any output // directory (Also Issue 258) return;//from w w w. j a va 2 s. c om } Path nodeDir = new Path(this.outputDir, "node-" + this.nodeId); Path indexFile = new Path(nodeDir, this.partitionId + "_" + this.chunkId + ".index"); Path valueFile = new Path(nodeDir, this.partitionId + "_" + this.chunkId + ".data"); // create output directory FileSystem fs = indexFile.getFileSystem(this.conf); fs.mkdirs(nodeDir); if (this.checkSumType != CheckSumType.NONE) { if (this.checkSumDigestIndex != null && this.checkSumDigestValue != null) { Path checkSumIndexFile = new Path(nodeDir, this.partitionId + "_" + this.chunkId + ".index.checksum"); Path checkSumValueFile = new Path(nodeDir, this.partitionId + "_" + this.chunkId + ".data.checksum"); FSDataOutputStream output = fs.create(checkSumIndexFile); output.write(this.checkSumDigestIndex.getCheckSum()); output.close(); output = fs.create(checkSumValueFile); output.write(this.checkSumDigestValue.getCheckSum()); output.close(); } else { throw new VoldemortException("Failed to open CheckSum digest"); } } logger.info("Moving " + this.taskIndexFileName + " to " + indexFile + "."); fs.rename(taskIndexFileName, indexFile); logger.info("Moving " + this.taskValueFileName + " to " + valueFile + "."); fs.rename(this.taskValueFileName, valueFile); }
From source file:voldemort.store.readonly.mr.utils.HadoopUtils.java
License:Apache License
/** * Move the file from one place to another. Unlike the raw Hadoop API this * will throw an exception if it fails. Like the Hadoop api it will fail if * a file exists in the destination.//from ww w . j a va 2s.c o m * * @param fs The filesystem * @param from The source file to move * @param to The destination location * @throws IOException */ public static void move(FileSystem fs, Path from, Path to) throws IOException { boolean success = fs.rename(from, to); if (!success) throw new RuntimeException("Failed to move " + from + " to " + to); }
From source file:voldemort.store.readonly.mr.utils.HadoopUtils.java
License:Apache License
/** * Move the give file to the given location. Delete any existing file in * that location. Use the temp directory to make the operation as * transactional as possible. Throws an exception if the move fails. * /*from w ww .ja v a 2 s . co m*/ * @param fs The filesystem * @param from The source file * @param to The destination file * @param temp A temp directory to use * @throws IOException */ public static void replaceFile(FileSystem fs, Path from, Path to, Path temp) throws IOException { fs.delete(temp, true); move(fs, to, temp); try { move(fs, from, to); fs.delete(temp, true); } catch (IOException e) { // hmm something went wrong, attempt to restore fs.rename(temp, to); throw e; } }