Example usage for org.apache.hadoop.fs FileSystem rename

List of usage examples for org.apache.hadoop.fs FileSystem rename

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem rename.

Prototype

public abstract boolean rename(Path src, Path dst) throws IOException;

Source Link

Document

Renames Path src to Path dst.

Usage

From source file:eu.stratosphere.hadoopcompatibility.mapreduce.HadoopOutputFormat.java

License:Apache License

/**
 * commit the task by moving the output file out from the temporary directory.
 * @throws IOException//from  w w w  .j  a  v  a 2s.co  m
 */
@Override
public void close() throws IOException {
    try {
        this.recordWriter.close(this.context);
    } catch (InterruptedException e) {
        throw new IOException("Could not close RecordReader.", e);
    }

    if (this.fileOutputCommitter.needsTaskCommit(this.context)) {
        this.fileOutputCommitter.commitTask(this.context);
    }
    this.fileOutputCommitter.commitJob(this.context);

    // rename tmp-* files to final name
    FileSystem fs = FileSystem.get(this.configuration);

    Path outputPath = new Path(this.configuration.get("mapred.output.dir"));

    final Pattern p = Pattern.compile("tmp-(.)-([0-9]+)");

    // isDirectory does not work in hadoop 1
    if (fs.getFileStatus(outputPath).isDir()) {
        FileStatus[] files = fs.listStatus(outputPath);

        for (FileStatus f : files) {
            Matcher m = p.matcher(f.getPath().getName());
            if (m.matches()) {
                int part = Integer.valueOf(m.group(2));
                fs.rename(f.getPath(), new Path(outputPath.toString() + "/" + part));
            }
        }
    }
}

From source file:fi.tkk.ics.hadoop.bam.cli.plugins.chipster.Summarize.java

License:Open Source License

@Override
protected int run(CmdLineParser parser) {

    final List<String> args = parser.getRemainingArgs();
    switch (args.size()) {
    case 0://from ww  w. j av a 2  s. com
        return missingArg("WORKDIR");
    case 1:
        return missingArg("LEVELS");
    case 2:
        return missingArg("INPATH");
    default:
        break;
    }
    if (!cacheAndSetProperties(parser))
        return 3;

    levels = args.get(1).split(",");
    for (String l : levels) {
        try {
            int lvl = Integer.parseInt(l);
            if (lvl > 0)
                continue;
            System.err.printf("summarize :: summary level '%d' is not positive!\n", lvl);
        } catch (NumberFormatException e) {
            System.err.printf("summarize :: summary level '%s' is not an integer!\n", l);
        }
        return 3;
    }

    wrkDir = new Path(args.get(0));
    final Path bam = new Path(args.get(2));

    final boolean sort = parser.getBoolean(sortOpt);

    final Configuration conf = getConf();

    conf.setBoolean(AnySAMInputFormat.TRUST_EXTS_PROPERTY, !parser.getBoolean(noTrustExtsOpt));

    // Used by Utils.getMergeableWorkFile() to name the output files.
    wrkFile = bam.getName();
    conf.set(Utils.WORK_FILENAME_PROPERTY, wrkFile);

    conf.setStrings(SummarizeReducer.SUMMARY_LEVELS_PROP, levels);

    try {
        try {
            // There's a lot of different Paths here, and it can get a bit
            // confusing. Here's how it works:
            //
            // - outPath is the output dir for the final merged output, given
            //   with the -o parameter.
            //
            // - wrkDir is the user-given path where the outputs of the
            //   reducers go.
            //
            // - mergedTmpDir (defined further below) is $wrkDir/sort.tmp: if
            //   we are sorting, the summaries output in the first Hadoop job
            //   are merged in there.
            //
            // - mainSortOutputDir is $wrkDir/sorted.tmp: getSortOutputDir()
            //   gives a per-level/strand directory under it, which is used by
            //   doSorting() and mergeOne(). This is necessary because we
            //   cannot have multiple Hadoop jobs outputting into the same
            //   directory at the same time, as explained in the comment in
            //   sortMerged().

            // Required for path ".", for example.
            wrkDir = wrkDir.getFileSystem(conf).makeQualified(wrkDir);

            mainSortOutputDir = sort ? new Path(wrkDir, "sorted.tmp") : null;

            if (!runSummary(bam))
                return 4;
        } catch (IOException e) {
            System.err.printf("summarize :: Summarizing failed: %s\n", e);
            return 4;
        }

        Path mergedTmpDir = null;
        try {
            if (sort) {
                mergedTmpDir = new Path(wrkDir, "sort.tmp");
                mergeOutputs(mergedTmpDir);

            } else if (outPath != null)
                mergeOutputs(outPath);

        } catch (IOException e) {
            System.err.printf("summarize :: Merging failed: %s\n", e);
            return 5;
        }

        if (sort) {
            if (!doSorting(mergedTmpDir))
                return 6;

            // Reset this since SummarySort uses it.
            conf.set(Utils.WORK_FILENAME_PROPERTY, wrkFile);

            tryDelete(mergedTmpDir);

            if (outPath != null)
                try {
                    sorted = true;
                    mergeOutputs(outPath);
                } catch (IOException e) {
                    System.err.printf("summarize :: Merging sorted output failed: %s\n", e);
                    return 7;
                }
            else {
                // Move the unmerged results out of the mainSortOutputDir
                // subdirectories to wrkDir.

                System.out.println("summarize :: Moving outputs from temporary directories...");
                t.start();

                try {
                    final FileSystem fs = wrkDir.getFileSystem(conf);
                    for (String lvl : levels) {
                        final FileStatus[] parts;

                        try {
                            parts = fs.globStatus(new Path(new Path(mainSortOutputDir, lvl + "[fr]"),
                                    "*-[0-9][0-9][0-9][0-9][0-9][0-9]"));
                        } catch (IOException e) {
                            System.err.printf("summarize :: Couldn't move level %s results: %s", lvl, e);
                            continue;
                        }

                        for (FileStatus part : parts) {
                            final Path path = part.getPath();
                            try {
                                fs.rename(path, new Path(wrkDir, path.getName()));
                            } catch (IOException e) {
                                System.err.printf("summarize :: Couldn't move '%s': %s", path, e);
                            }
                        }
                    }
                } catch (IOException e) {
                    System.err.printf("summarize :: Moving results failed: %s", e);
                }
                System.out.printf("summarize :: Moved in %d.%03d s.\n", t.stopS(), t.fms());
            }
            tryDelete(mainSortOutputDir);
        }
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }

    return 0;
}

From source file:finderbots.recommenders.hadoop.RecommenderUpdateJob.java

License:Apache License

private void moveMatrices() throws IOException {
    //so it can output to Solr if options specify
    FileSystem fs = FileSystem.get(getConf());
    Path from = new Path(options.getPrimarySimilarityMatrixPath());
    Path to = new Path(options.getPrimaryOutputDir(), XRecommenderJob.SIMS_MATRIX_DIR);//steal the dir name from Xrec
    fs.rename(from, to);
    //move the primary user action matrix to output
    from = new Path(new Path(options.getPrimaryTempDir(), RecommenderJob.DEFAULT_PREPARE_PATH),
            PreparePreferenceMatrixJob.USER_VECTORS);
    to = new Path(options.getOutputDir(), options.getPrimaryActionHistoryDir());
    fs.rename(from, to);/*from   ww  w  . ja  v a 2  s. com*/
    //if it was created move the secondary user action matrix to output
    if (options.getDoXRecommender()) {
        from = new Path(new Path(options.getSecondaryTempDir(), XRecommenderJob.DEFAULT_PREPARE_DIR),
                PrepareActionMatricesJob.USER_VECTORS_A);
        to = new Path(options.getOutputDir(), options.getSecondaryActionHistoryDir());
        fs.rename(from, to);
    }
}

From source file:FormatStorage1.MetaUtil.java

License:Open Source License

private static void rebuild(String[] args) throws IOException {
    int segs = args.length - 2;
    int[] starts, lens;
    String inputfile = null;// w w  w  .j ava  2 s  .  co  m
    String outputfile = null;
    inputfile = args[1];
    outputfile = inputfile;
    int first = 2;
    if (!args[2].startsWith("start=")) {
        outputfile = args[2];
        segs = args.length - 3;
        first = 3;
    }
    if (args.length - first == 0 || (args.length - first) % 2 != 0) {
        throw new IOException("error");
    }
    starts = new int[segs / 2];
    lens = new int[segs / 2];
    for (int i = first; i < args.length; i = i + 2) {
        String[] strs = args[i].split("=");
        if (strs[0].equals("start")) {
            starts[(i - first) / 2] = Integer.valueOf(strs[1]);
        } else {
            throw new IOException("error");
        }
        strs = args[i + 1].split("=");
        if (strs[0].equals("len")) {
            lens[(i - first) / 2] = Integer.valueOf(strs[1]);
        } else if (strs[0].equals("end")) {
            lens[(i - first) / 2] = Integer.valueOf(strs[1]) - starts[(i - first) / 2] + 1;
        } else {
            throw new IOException("error");
        }
    }
    Configuration conf = new Configuration();
    IFormatDataFile ifdfin = new IFormatDataFile(conf);
    IFormatDataFile ifdfout = new IFormatDataFile(conf);
    ifdfin.open(inputfile);
    ifdfout.create(outputfile + "_rebuild_tmp", ifdfin.fileInfo().head());
    for (int i = 0; i < starts.length; i++) {
        ifdfin.seek(starts[i]);
        for (int j = 0; j < lens[i]; j++) {
            ifdfout.addRecord(ifdfin.next());
        }
    }
    ifdfin.close();
    ifdfout.close();
    FileSystem fs = FileSystem.get(conf);
    if (inputfile.equals(outputfile)) {
        fs.delete(new Path(inputfile), true);
    }
    fs.rename(new Path(outputfile + "_rebuild_tmp"), new Path(outputfile));
}

From source file:FormatStorage1.MetaUtil.java

License:Open Source License

private static void delete(String[] args) throws IOException {
    String inputfile = args[1];/*  w ww .  j  a  v  a  2s  .  co m*/
    String outputfile = inputfile;
    int idx = 2;

    if (!args[2].startsWith("segid=") && !args[2].startsWith("unitid=") && !args[2].startsWith("lines=")) {
        outputfile = args[2];
        idx = 3;
    }

    int segid = -1, unitid = -1;
    boolean line = false;
    TreeSet<Integer> lines = new TreeSet<Integer>();
    TreeMap<Integer, TreeSet<Integer>> seg2unitID = new TreeMap<Integer, TreeSet<Integer>>();

    while (idx < args.length) {
        String[] strs = args[idx++].split("=");
        if (strs[0].equals("segid")) {
            segid = Integer.valueOf(strs[1]);
            seg2unitID.put(segid, new TreeSet<Integer>());
        } else if (strs[0].equals("unitid")) {
            unitid = Integer.valueOf(strs[1]);
            seg2unitID.lastEntry().getValue().add(unitid);
        } else if (strs[0].equals("lines")) {
            String[] linestr = strs[1].split(",");
            line = true;
            for (int j = 0; j < linestr.length; j++) {
                lines.add(Integer.valueOf(linestr[j]));
            }
        } else {
            throw new IOException("error");
        }
    }
    Configuration conf = new Configuration();
    IFormatDataFile ifdfin = new IFormatDataFile(conf);
    ifdfin.open(inputfile);

    ArrayList<Integer> starts = new ArrayList<Integer>();
    ArrayList<Integer> ends = new ArrayList<Integer>();
    if (line) {
        starts.add(0);
        for (int i : lines) {
            ends.add(i);
            starts.add(i + 1);
        }
        ends.add(ifdfin.recnum());
    } else {
        starts.add(0);
        for (int sid : seg2unitID.keySet()) {
            int segbl = ifdfin.segIndex().getILineIndex(sid).beginline();
            int segel = ifdfin.segIndex().getILineIndex(sid).endline();
            if (seg2unitID.get(sid) == null || seg2unitID.get(sid).size() == 0) {
                ends.add(segbl);
                starts.add(segel + 1);
            } else {
                ifdfin.seek(segbl);
                for (Integer uid : seg2unitID.get(sid)) {
                    int ubl = ifdfin.currSegment().unitindex().getLineIndex(uid).beginline();
                    int uel = ifdfin.currSegment().unitindex().getLineIndex(uid).endline();
                    ends.add(ubl);
                    starts.add(uel + 1);
                }
            }
        }
        ends.add(ifdfin.recnum());
    }
    System.out.println("starts:\t" + starts);
    System.out.println("ends:\t" + ends);

    IFormatDataFile ifdfout = new IFormatDataFile(conf);
    ifdfout.create(outputfile + "_delete_tmp", ifdfin.fileInfo().head());
    for (int i = 0; i < starts.size(); i++) {
        int bl = starts.get(i);
        ifdfin.seek(bl);
        for (int j = bl; j < ends.get(i); j++) {
            try {
                ifdfout.addRecord(ifdfin.next());
            } catch (Exception e) {
                System.out.println(j);
                e.printStackTrace();
                return;
            }
        }
    }
    ifdfout.close();
    ifdfin.close();
    FileSystem fs = FileSystem.get(conf);
    if (inputfile.equals(outputfile)) {
        fs.delete(new Path(inputfile), true);
    }
    fs.rename(new Path(outputfile + "_delete_tmp"), new Path(outputfile));
}

From source file:fr.ens.biologie.genomique.eoulsan.data.protocols.HDFSPathDataProtocol.java

License:LGPL

@Override
public void rename(final DataFile file, final DataFile dest) throws IOException {

    if (dest == null) {
        throw new NullPointerException("dest argument is null");
    }// w  w  w  . j a  va  2 s.  c o  m

    if (dest.getProtocol() != this) {
        throw new IOException("the protocol of the dest is not " + getName() + " protocol: " + dest);
    }

    final Path path = getPath(file);
    final Path newPath = getPath(dest);

    final FileSystem fs = path.getFileSystem(this.conf);

    fs.rename(path, newPath);
}

From source file:fuse4j.hadoopfs.HdfsClientImpl.java

License:Apache License

@Override
public boolean rename(int uid, String src, String dst) {
    FileSystem dfs = null;
    try {//from w w  w  . j  a v  a  2  s .  co m
        dfs = getDfs(uid);
        Path srcPath = new Path(src);
        Path dstPath = new Path(dst);
        if (srcPath.equals(dstPath)) {
            //source and destination are the same path
            return false;
        }
        if (dfs.isFile(dstPath) && dfs.isFile(srcPath)) {
            //TODO: temporary fix to overwrite files
            //delete destination file if exists.
            //"HDFS-654"  fixes the problem allowing atomic rename when dst exists
            dfs.delete(dstPath);
        }
        return dfs.rename(srcPath, dstPath);
    } catch (Exception ioe) {
        // fall through to failure
        System.out.println(ioe);
    }
    return false;
}

From source file:gobblin.compaction.mapreduce.avro.AvroKeyCompactorOutputCommitter.java

License:Apache License

/**
 * Commits the task, moving files to their final committed location by delegating to
 * {@link FileOutputCommitter} to perform the actual moving. First, renames the
 * files to include the count of records contained within the file and a timestamp,
 * in the form {recordCount}.{timestamp}.avro. Then, the files are moved to their
 * committed location.//from w w w  .j av a 2 s . co m
 */
@Override
public void commitTask(TaskAttemptContext context) throws IOException {
    Path workPath = getWorkPath();
    FileSystem fs = workPath.getFileSystem(context.getConfiguration());

    if (fs.exists(workPath)) {
        long recordCount = getRecordCountFromCounter(context, AvroKeyDedupReducer.EVENT_COUNTER.RECORD_COUNT);
        String fileNamePrefix;
        if (recordCount == 0) {

            // recordCount == 0 indicates that it is a map-only, non-dedup job, and thus record count should
            // be obtained from mapper counter.
            fileNamePrefix = CompactionRecordCountProvider.M_OUTPUT_FILE_PREFIX;
            recordCount = getRecordCountFromCounter(context, AvroKeyMapper.EVENT_COUNTER.RECORD_COUNT);
        } else {
            fileNamePrefix = CompactionRecordCountProvider.MR_OUTPUT_FILE_PREFIX;
        }
        String fileName = CompactionRecordCountProvider.constructFileName(fileNamePrefix, recordCount);

        for (FileStatus status : fs.listStatus(workPath, new PathFilter() {
            @Override
            public boolean accept(Path path) {
                return FilenameUtils.isExtension(path.getName(), "avro");
            }
        })) {
            Path newPath = new Path(status.getPath().getParent(), fileName);
            LOG.info(String.format("Renaming %s to %s", status.getPath(), newPath));
            fs.rename(status.getPath(), newPath);
        }
    }

    super.commitTask(context);
}

From source file:gobblin.compaction.mapreduce.MRCompactor.java

License:Apache License

/**
 * Rename all the source directories for a specific dataset
 */// w  w  w. j ava2 s. c o  m
public static void renameSourceDirAsCompactionComplete(FileSystem fs, Dataset dataset) {
    try {
        for (Path path : dataset.getRenamePaths()) {
            Path newPath = new Path(path.getParent(),
                    path.getName() + MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX);
            LOG.info("[{}] Renaming {} to {}", dataset.getDatasetName(), path, newPath);
            fs.rename(path, newPath);
        }
    } catch (Exception e) {
        LOG.error("Rename input path failed", e);
    }
}

From source file:gobblin.runtime.job_catalog.FSJobCatalog.java

License:Apache License

/**
 * Used for shadow copying in the process of updating a existing job configuration file,
 * which requires deletion of the pre-existed copy of file and create a new one with the same name.
 * Steps://  w w  w .  j a  v  a2 s .c o m
 *  Create a new one in /tmp.
 *  Safely deletion of old one.
 *  copy the newly created configuration file to jobConfigDir.
 *  Delete the shadow file.
 */
synchronized void materializedJobSpec(Path jobSpecPath, JobSpec jobSpec, FileSystem fs)
        throws IOException, JobSpecNotFoundException {
    Path shadowDirectoryPath = new Path("/tmp");
    Path shadowFilePath = new Path(shadowDirectoryPath, UUID.randomUUID().toString());
    /* If previously existed, should delete anyway */
    if (fs.exists(shadowFilePath)) {
        fs.delete(shadowFilePath, false);
    }

    ImmutableMap.Builder mapBuilder = ImmutableMap.builder();
    mapBuilder.put(ImmutableFSJobCatalog.DESCRIPTION_KEY_IN_JOBSPEC, jobSpec.getDescription())
            .put(ImmutableFSJobCatalog.VERSION_KEY_IN_JOBSPEC, jobSpec.getVersion());

    if (jobSpec.getTemplateURI().isPresent()) {
        mapBuilder.put(ConfigurationKeys.JOB_TEMPLATE_PATH, jobSpec.getTemplateURI().get().toString());
    }

    Map<String, String> injectedKeys = mapBuilder.build();
    String renderedConfig = ConfigFactory.parseMap(injectedKeys).withFallback(jobSpec.getConfig()).root()
            .render(ConfigRenderOptions.defaults());
    try (DataOutputStream os = fs.create(shadowFilePath);
            Writer writer = new OutputStreamWriter(os, Charsets.UTF_8)) {
        writer.write(renderedConfig);
    }

    /* (Optionally:Delete oldSpec) and copy the new one in. */
    if (fs.exists(jobSpecPath)) {
        if (!fs.delete(jobSpecPath, false)) {
            throw new IOException("Unable to delete existing job file: " + jobSpecPath);
        }
    }
    if (!fs.rename(shadowFilePath, jobSpecPath)) {
        throw new IOException("Unable to rename job file: " + shadowFilePath + " to " + jobSpecPath);
    }
}