Example usage for org.apache.hadoop.fs FileSystem rename

List of usage examples for org.apache.hadoop.fs FileSystem rename

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem rename.

Prototype

public abstract boolean rename(Path src, Path dst) throws IOException;

Source Link

Document

Renames Path src to Path dst.

Usage

From source file:com.conversantmedia.mapreduce.tool.BaseTool.java

License:Apache License

/**
 * Moves our inputs into the 'archive' path for
 * long term storage, or perhaps further processing.
 * @param context      the job's driver context bean
 * @throws IOException   if the inputs cannot be moved to
 *          the archive path./*  w ww . j a  va 2  s . com*/
 */
protected void archiveInputs(T context) throws IOException {
    FileSystem fs = FileSystem.get(getConf());
    fs.mkdirs(context.getArchive());

    for (Path input : context.getInput()) {
        List<FileStatus> status = getInputFiles(input);
        for (FileStatus file : status) {
            Path dest = new Path(context.getArchive(), file.getPath().getName());
            fs.rename(file.getPath(), dest);
            logger().debug("Moved [" + input + "] to [" + dest + "]");
        }
    }
}

From source file:com.datatorrent.stram.StreamingContainerManager.java

License:Apache License

/**
 * This method is for saving meta information about this application in HDFS -- the meta information that generally
 * does not change across multiple attempts
 *//*from w  w w  .java  2 s.  co  m*/
private void saveMetaInfo() throws IOException {
    Path path = new Path(this.vars.appPath, APP_META_FILENAME + "." + System.nanoTime());
    FileSystem fs = FileSystem.newInstance(path.toUri(), new Configuration());
    try {
        FSDataOutputStream os = fs.create(path);
        try {
            JSONObject top = new JSONObject();
            JSONObject attributes = new JSONObject();
            for (Map.Entry<Attribute<?>, Object> entry : this.plan.getLogicalPlan().getAttributes()
                    .entrySet()) {
                attributes.put(entry.getKey().getSimpleName(), entry.getValue());
            }
            JSONObject customMetrics = new JSONObject();
            for (Map.Entry<String, Map<String, Object>> entry : latestLogicalMetrics.entrySet()) {
                customMetrics.put(entry.getKey(), new JSONArray(entry.getValue().keySet()));
            }
            top.put(APP_META_KEY_ATTRIBUTES, attributes);
            top.put(APP_META_KEY_CUSTOM_METRICS, customMetrics);
            os.write(top.toString().getBytes());
        } catch (JSONException ex) {
            throw new RuntimeException(ex);
        } finally {
            os.close();
        }
        Path origPath = new Path(this.vars.appPath, APP_META_FILENAME);
        fs.rename(path, origPath);
    } finally {
        fs.close();
    }
}

From source file:com.facebook.hiveio.common.FileSystems.java

License:Apache License

/**
 * Move a file or directory from source to destination, recursively copying
 * subdirectories./*from  w w  w . ja  va 2  s .  co  m*/
 *
 * @param fs FileSystem
 * @param file path to copy (file or directory)
 * @param src path to source directory
 * @param dest path to destination directory
 * @throws IOException I/O problems
 */
public static void move(FileSystem fs, Path file, Path src, Path dest) throws IOException {
    Path destFilePath = pathInDestination(file, src, dest);
    if (fs.isFile(file)) {
        if (fs.exists(destFilePath)) {
            if (!fs.delete(destFilePath, true)) {
                throw new IllegalArgumentException("Could not remove existing file " + destFilePath);
            }
        }
        if (!fs.rename(file, destFilePath)) {
            throw new IllegalArgumentException("Could not move " + file + " to " + destFilePath);
        }
    } else if (fs.getFileStatus(file).isDir()) {
        FileStatus[] statuses = fs.listStatus(file);
        fs.mkdirs(destFilePath);
        if (statuses != null) {
            for (FileStatus status : statuses) {
                move(fs, status.getPath(), src, dest);
            }
        }
    }
}

From source file:com.facebook.presto.hive.AbstractTestHiveFileSystem.java

License:Apache License

@Test
public void testRename() throws Exception {
    Path basePath = new Path(getBasePath(), UUID.randomUUID().toString());
    FileSystem fs = hdfsEnvironment.getFileSystem(TESTING_CONTEXT, basePath);
    assertFalse(fs.exists(basePath));/*from  w w w  . j av a 2  s . c  om*/

    // create file foo.txt
    Path path = new Path(basePath, "foo.txt");
    assertTrue(fs.createNewFile(path));
    assertTrue(fs.exists(path));

    // rename foo.txt to bar.txt when bar does not exist
    Path newPath = new Path(basePath, "bar.txt");
    assertFalse(fs.exists(newPath));
    assertTrue(fs.rename(path, newPath));
    assertFalse(fs.exists(path));
    assertTrue(fs.exists(newPath));

    // rename foo.txt to foo.txt when foo.txt does not exist
    assertFalse(fs.rename(path, path));

    // create file foo.txt and rename to existing bar.txt
    assertTrue(fs.createNewFile(path));
    assertFalse(fs.rename(path, newPath));

    // rename foo.txt to foo.txt when foo.txt exists
    assertFalse(fs.rename(path, path));

    // delete foo.txt
    assertTrue(fs.delete(path, false));
    assertFalse(fs.exists(path));

    // create directory source with file
    Path source = new Path(basePath, "source");
    assertTrue(fs.createNewFile(new Path(source, "test.txt")));

    // rename source to non-existing target
    Path target = new Path(basePath, "target");
    assertFalse(fs.exists(target));
    assertTrue(fs.rename(source, target));
    assertFalse(fs.exists(source));
    assertTrue(fs.exists(target));

    // create directory source with file
    assertTrue(fs.createNewFile(new Path(source, "test.txt")));

    // rename source to existing target
    assertTrue(fs.rename(source, target));
    assertFalse(fs.exists(source));
    target = new Path(target, "source");
    assertTrue(fs.exists(target));
    assertTrue(fs.exists(new Path(target, "test.txt")));

    // delete target
    target = new Path(basePath, "target");
    assertTrue(fs.exists(target));
    assertTrue(fs.delete(target, true));
    assertFalse(fs.exists(target));

    // cleanup
    fs.delete(basePath, true);
}

From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java

License:Apache License

private static void asyncRename(HdfsEnvironment hdfsEnvironment, Executor executor, AtomicBoolean cancelled,
        List<CompletableFuture<?>> fileRenameFutures, String user, Path currentPath, Path targetPath,
        List<String> fileNames) {
    FileSystem fileSystem;
    try {//from  www . ja  va2 s.c  o m
        fileSystem = hdfsEnvironment.getFileSystem(user, currentPath);
    } catch (IOException e) {
        throw new PrestoException(HIVE_FILESYSTEM_ERROR,
                format("Error moving data files to final location. Error listing directory %s", currentPath),
                e);
    }

    for (String fileName : fileNames) {
        Path source = new Path(currentPath, fileName);
        Path target = new Path(targetPath, fileName);
        fileRenameFutures.add(CompletableFuture.runAsync(() -> {
            if (cancelled.get()) {
                return;
            }
            try {
                if (!fileSystem.rename(source, target)) {
                    throw new PrestoException(HIVE_FILESYSTEM_ERROR,
                            format("Error moving data files from %s to final location %s", source, target));
                }
            } catch (IOException e) {
                throw new PrestoException(HIVE_FILESYSTEM_ERROR,
                        format("Error moving data files from %s to final location %s", source, target), e);
            }
        }, executor));
    }
}

From source file:com.fullcontact.sstable.index.SSTableIndexIndex.java

License:Apache License

/**
 * Create and write an index index based on the input Cassandra Index.db file. Read the Index.db and generate chunks
 * (splits) based on the configured chunk size.
 *
 * @param fileSystem Hadoop file system.
 * @param sstablePath SSTable Index.db./*  ww w  .j av a2  s.  c o m*/
 * @throws IOException
 */
public static void writeIndex(final FileSystem fileSystem, final Path sstablePath) throws IOException {

    final Configuration configuration = fileSystem.getConf();

    final long splitSize = configuration.getLong(HadoopSSTableConstants.HADOOP_SSTABLE_SPLIT_MB,
            HadoopSSTableConstants.DEFAULT_SPLIT_MB) * 1024 * 1024;

    final Closer closer = Closer.create();

    final Path outputPath = sstablePath.suffix(SSTABLE_INDEX_SUFFIX);
    final Path inProgressOutputPath = sstablePath.suffix(SSTABLE_INDEX_IN_PROGRESS_SUFFIX);

    boolean success = false;
    try {
        final FSDataOutputStream os = closer.register(fileSystem.create(inProgressOutputPath));

        final TLongArrayList splitOffsets = new TLongArrayList();
        long currentStart = 0;
        long currentEnd = 0;
        final IndexOffsetScanner index = new IndexOffsetScanner(sstablePath, fileSystem);

        while (index.hasNext()) {
            // NOTE: This does not give an exact size of this split in bytes but a rough estimate.
            // This should be good enough since it's only used for sorting splits by size in hadoop land.
            while (currentEnd - currentStart < splitSize && index.hasNext()) {
                currentEnd = index.next();
                splitOffsets.add(currentEnd);
            }

            // Record the split
            final long[] offsets = splitOffsets.toArray();
            os.writeLong(offsets[0]); // Start
            os.writeLong(offsets[offsets.length - 1]); // End

            // Clear the offsets
            splitOffsets.clear();

            if (index.hasNext()) {
                currentStart = index.next();
                currentEnd = currentStart;
                splitOffsets.add(currentStart);
            }
        }

        success = true;
    } finally {
        closer.close();

        if (!success) {
            fileSystem.delete(inProgressOutputPath, false);
        } else {
            fileSystem.rename(inProgressOutputPath, outputPath);
        }
    }
}

From source file:com.github.dryangkun.hbase.tidx.hive.HiveHFileOutputFormat.java

License:Apache License

@Override
public RecordWriter getHiveRecordWriter(final JobConf jc, final Path finalOutPath,
        Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties,
        final Progressable progressable) throws IOException {

    // Read configuration for the target path, first from jobconf, then from table properties
    String hfilePath = getFamilyPath(jc, tableProperties);
    if (hfilePath == null) {
        throw new RuntimeException("Please set " + HFILE_FAMILY_PATH + " to target location for HFiles");
    }/*from   ww w . j a v  a 2  s  . c  om*/

    // Target path's last component is also the column family name.
    final Path columnFamilyPath = new Path(hfilePath);
    final String columnFamilyName = columnFamilyPath.getName();
    final byte[] columnFamilyNameBytes = Bytes.toBytes(columnFamilyName);
    final Job job = new Job(jc);
    setCompressOutput(job, isCompressed);
    setOutputPath(job, finalOutPath);

    // Create the HFile writer
    final org.apache.hadoop.mapreduce.TaskAttemptContext tac = ShimLoader.getHadoopShims()
            .newTaskAttemptContext(job.getConfiguration(), progressable);

    final Path outputdir = FileOutputFormat.getOutputPath(tac);
    final org.apache.hadoop.mapreduce.RecordWriter<ImmutableBytesWritable, KeyValue> fileWriter = getFileWriter(
            tac);

    // Individual columns are going to be pivoted to HBase cells,
    // and for each row, they need to be written out in order
    // of column name, so sort the column names now, creating a
    // mapping to their column position.  However, the first
    // column is interpreted as the row key.
    String columnList = tableProperties.getProperty("columns");
    String[] columnArray = columnList.split(",");
    final SortedMap<byte[], Integer> columnMap = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    int i = 0;
    for (String columnName : columnArray) {
        if (i != 0) {
            columnMap.put(Bytes.toBytes(columnName), i);
        }
        ++i;
    }

    return new RecordWriter() {

        @Override
        public void close(boolean abort) throws IOException {
            try {
                fileWriter.close(null);
                if (abort) {
                    return;
                }
                // Move the hfiles file(s) from the task output directory to the
                // location specified by the user.
                FileSystem fs = outputdir.getFileSystem(jc);
                fs.mkdirs(columnFamilyPath);
                Path srcDir = outputdir;
                for (;;) {
                    FileStatus[] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER);
                    if ((files == null) || (files.length == 0)) {
                        throw new IOException("No family directories found in " + srcDir);
                    }
                    if (files.length != 1) {
                        throw new IOException("Multiple family directories found in " + srcDir);
                    }
                    srcDir = files[0].getPath();
                    if (srcDir.getName().equals(columnFamilyName)) {
                        break;
                    }
                }
                for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) {
                    fs.rename(regionFile.getPath(), new Path(columnFamilyPath, regionFile.getPath().getName()));
                }
                // Hive actually wants a file as task output (not a directory), so
                // replace the empty directory with an empty file to keep it happy.
                fs.delete(outputdir, true);
                fs.createNewFile(outputdir);
            } catch (InterruptedException ex) {
                throw new IOException(ex);
            }
        }

        private void writeText(Text text) throws IOException {
            // Decompose the incoming text row into fields.
            String s = text.toString();
            String[] fields = s.split("\u0001");
            assert (fields.length <= (columnMap.size() + 1));
            // First field is the row key.
            byte[] rowKeyBytes = Bytes.toBytes(fields[0]);
            // Remaining fields are cells addressed by column name within row.
            for (Map.Entry<byte[], Integer> entry : columnMap.entrySet()) {
                byte[] columnNameBytes = entry.getKey();
                int iColumn = entry.getValue();
                String val;
                if (iColumn >= fields.length) {
                    // trailing blank field
                    val = "";
                } else {
                    val = fields[iColumn];
                    if ("\\N".equals(val)) {
                        // omit nulls
                        continue;
                    }
                }
                byte[] valBytes = Bytes.toBytes(val);
                KeyValue kv = new KeyValue(rowKeyBytes, columnFamilyNameBytes, columnNameBytes, valBytes);
                try {
                    fileWriter.write(null, kv);
                } catch (IOException e) {
                    LOG.error("Failed while writing row: " + s);
                    throw e;
                } catch (InterruptedException ex) {
                    throw new IOException(ex);
                }
            }
        }

        private void writePut(PutWritable put) throws IOException {
            ImmutableBytesWritable row = new ImmutableBytesWritable(put.getPut().getRow());
            SortedMap<byte[], List<Cell>> cells = put.getPut().getFamilyCellMap();
            for (Map.Entry<byte[], List<Cell>> entry : cells.entrySet()) {
                Collections.sort(entry.getValue(), new CellComparator());
                for (Cell c : entry.getValue()) {
                    try {
                        fileWriter.write(row, KeyValueUtil.copyToNewKeyValue(c));
                    } catch (InterruptedException e) {
                        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
                    }
                }
            }
        }

        @Override
        public void write(Writable w) throws IOException {
            if (w instanceof Text) {
                writeText((Text) w);
            } else if (w instanceof PutWritable) {
                writePut((PutWritable) w);
            } else {
                throw new IOException("Unexpected writable " + w);
            }
        }
    };
}

From source file:com.hadoop.compression.lzo.LzoIndex.java

License:Open Source License

/**
 * Index an lzo file to allow the input format to split them into separate map
 * jobs./*from w w  w .jav a 2 s.co  m*/
 *
 * @param fs File system that contains the file.
 * @param lzoFile the lzo file to index.  For filename.lzo, the created index file will be
 * filename.lzo.index.
 * @throws IOException
 */
public static void createIndex(FileSystem fs, Path lzoFile) throws IOException {

    Configuration conf = fs.getConf();
    CompressionCodecFactory factory = new CompressionCodecFactory(conf);
    CompressionCodec codec = factory.getCodec(lzoFile);
    if (null == codec) {
        throw new IOException("Could not find codec for file " + lzoFile
                + " - you may need to add the LZO codec to your io.compression.codecs "
                + "configuration in core-site.xml");
    }
    ((Configurable) codec).setConf(conf);

    FSDataInputStream is = null;
    FSDataOutputStream os = null;
    Path outputFile = lzoFile.suffix(LZO_INDEX_SUFFIX);
    Path tmpOutputFile = lzoFile.suffix(LZO_TMP_INDEX_SUFFIX);

    // Track whether an exception was thrown or not, so we know to either
    // delete the tmp index file on failure, or rename it to the new index file on success.
    boolean indexingSucceeded = false;
    try {
        is = fs.open(lzoFile);
        os = fs.create(tmpOutputFile);
        LzopDecompressor decompressor = (LzopDecompressor) codec.createDecompressor();
        // Solely for reading the header
        codec.createInputStream(is, decompressor);
        int numCompressedChecksums = decompressor.getCompressedChecksumsCount();
        int numDecompressedChecksums = decompressor.getDecompressedChecksumsCount();

        while (true) {
            // read and ignore, we just want to get to the next int
            int uncompressedBlockSize = is.readInt();
            if (uncompressedBlockSize == 0) {
                break;
            } else if (uncompressedBlockSize < 0) {
                throw new EOFException();
            }

            int compressedBlockSize = is.readInt();
            if (compressedBlockSize <= 0) {
                throw new IOException("Could not read compressed block size");
            }

            // See LzopInputStream.getCompressedData
            boolean isUncompressedBlock = (uncompressedBlockSize == compressedBlockSize);
            int numChecksumsToSkip = isUncompressedBlock ? numDecompressedChecksums
                    : numDecompressedChecksums + numCompressedChecksums;
            long pos = is.getPos();
            // write the pos of the block start
            os.writeLong(pos - 8);
            // seek to the start of the next block, skip any checksums
            is.seek(pos + compressedBlockSize + (4 * numChecksumsToSkip));
        }
        // If we're here, indexing was successful.
        indexingSucceeded = true;
    } finally {
        // Close any open streams.
        if (is != null) {
            is.close();
        }

        if (os != null) {
            os.close();
        }

        if (!indexingSucceeded) {
            // If indexing didn't succeed (i.e. an exception was thrown), clean up after ourselves.
            fs.delete(tmpOutputFile, false);
        } else {
            // Otherwise, rename filename.lzo.index.tmp to filename.lzo.index.
            fs.rename(tmpOutputFile, outputFile);
        }
    }
}

From source file:com.hadoop.mapreduce.LzoTextInputFormat.java

License:Open Source License

/**
 * Index an lzo file to allow the input format to split them into separate map
 * jobs.//from   w  w  w. j  a v a 2 s.  c o m
 * 
 * @param fs
 *          File system that contains the file.
 * @param lzoFile
 *          the lzo file to index.
 * @throws IOException
 */
public static void createIndex(FileSystem fs, Path lzoFile) throws IOException {

    Configuration conf = fs.getConf();
    CompressionCodecFactory factory = new CompressionCodecFactory(fs.getConf());
    CompressionCodec codec = factory.getCodec(lzoFile);
    ((Configurable) codec).setConf(conf);

    InputStream lzoIs = null;
    FSDataOutputStream os = null;
    Path outputFile = new Path(lzoFile.toString() + LzoTextInputFormat.LZO_INDEX_SUFFIX);
    Path tmpOutputFile = outputFile.suffix(".tmp");

    try {
        FSDataInputStream is = fs.open(lzoFile);
        os = fs.create(tmpOutputFile);
        LzopDecompressor decompressor = (LzopDecompressor) codec.createDecompressor();
        // for reading the header
        lzoIs = codec.createInputStream(is, decompressor);

        int numChecksums = decompressor.getChecksumsCount();

        while (true) {
            // read and ignore, we just want to get to the next int
            int uncompressedBlockSize = is.readInt();
            if (uncompressedBlockSize == 0) {
                break;
            } else if (uncompressedBlockSize < 0) {
                throw new EOFException();
            }

            int compressedBlockSize = is.readInt();
            if (compressedBlockSize <= 0) {
                throw new IOException("Could not read compressed block size");
            }

            long pos = is.getPos();
            // write the pos of the block start
            os.writeLong(pos - 8);
            // seek to the start of the next block, skip any checksums
            is.seek(pos + compressedBlockSize + (4 * numChecksums));
        }
    } finally {
        if (lzoIs != null) {
            lzoIs.close();
        }

        if (os != null) {
            os.close();
        }
    }

    fs.rename(tmpOutputFile, outputFile);
}

From source file:com.ibm.bi.dml.runtime.controlprogram.parfor.ResultMergeLocalFile.java

License:Open Source License

/**
 * //  w w  w .j  a va 2 s.  c o  m
 * @param fnameNew
 * @param inMO
 * @throws CacheException
 * @throws IOException
 */
private void copyAllFiles(String fnameNew, ArrayList<MatrixObject> inMO) throws CacheException, IOException {
    JobConf job = new JobConf(ConfigurationManager.getCachedJobConf());
    FileSystem fs = FileSystem.get(job);
    Path path = new Path(fnameNew);

    //create output dir
    fs.mkdirs(path);

    //merge in all input matrix objects
    IDSequence seq = new IDSequence();
    for (MatrixObject in : inMO) {
        LOG.trace("ResultMerge (local, file): Merge input " + in.getVarName() + " (fname=" + in.getFileName()
                + ") via file rename.");

        //copy over files (just rename file or entire dir)
        Path tmpPath = new Path(in.getFileName());
        String lname = tmpPath.getName();
        fs.rename(tmpPath, new Path(fnameNew + "/" + lname + seq.getNextID()));
    }
}