Example usage for org.apache.hadoop.fs Path makeQualified

List of usage examples for org.apache.hadoop.fs Path makeQualified

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path makeQualified.

Prototype

@Deprecated
public Path makeQualified(FileSystem fs) 

Source Link

Document

Returns a qualified path object for the FileSystem 's working directory.

Usage

From source file:org.mrgeo.utils.geotools.GeotoolsVectorUtils.java

License:Apache License

public static Bounds calculateBounds(String[] inputs, Configuration conf) throws IOException {
    log.info("Calculating bounds");

    GeotoolsVectorReader reader = null;//from   w w  w .j  a va2s  .c o  m

    Bounds bounds = null;

    FileSystem fs = HadoopFileUtils.getFileSystem(conf);

    for (String input : inputs) {
        try {
            Path p = new Path(input);
            URI uri = p.makeQualified(fs).toUri();
            reader = GeotoolsVectorUtils.open(uri);

            Bounds b = calculateBounds(reader);

            if (bounds == null) {
                bounds = b;
            } else {
                bounds.expand(b);
            }
        }
        //      catch (IOException e)
        //      {
        //        e.printStackTrace();
        //      }
        finally {
            if (reader != null) {
                reader.close();
                reader = null;
            }
        }
    }

    log.info("Bounds {}", bounds);

    return bounds;
}

From source file:org.mrgeo.vector.mrsvector.MrsVectorPyramidMetadata.java

License:Apache License

/**
 * Loading metadata from HDFS.  The objects of
 * the file are stored in a json format.  This enables the ObjectMapper
 * to parse out the values correctly.// ww  w  .  j  a v  a 2  s .c  o  m
 * 
 * @param path - the location of the metadata file
 * @return a valid MrsVectorPyramidMetadata object
 * @throws JsonGenerationException
 * @throws JsonMappingException
 * @throws IOException
 */
public static MrsVectorPyramidMetadata load(final Path path)
        throws JsonGenerationException, JsonMappingException, IOException {

    // attach to hdfs and create an input stream for the file
    FileSystem fs = HadoopFileUtils.getFileSystem(path);
    log.debug("Physically loading image metadata from " + path.toString());
    final InputStream is = HadoopFileUtils.open(path); // fs.open(path);
    try {
        // load the metadata from the input stream
        final MrsVectorPyramidMetadata metadata = load(is);

        // set the fully qualified path for the metadata file
        Path fullPath = path.makeQualified(fs);
        metadata.setPyramid(fullPath.getParent().toString());
        return metadata;
    } finally {
        is.close();
    }
}

From source file:org.smartfrog.services.hadoop.mapreduce.terasort.TeraSortJob.java

License:Apache License

@SuppressWarnings("ProhibitedExceptionDeclared")
@Override/*w  w w.  j  a  v a2  s.  c  om*/
public int run(String[] args) throws Exception {
    LOG.info("starting");
    JobConf job = (JobConf) getConf();
    Path inputDir = new Path(args[0]);
    inputDir = inputDir.makeQualified(inputDir.getFileSystem(job));
    Path partitionFile = new Path(inputDir, TeraConstants.PARTITION_FILENAME);
    URI partitionUri = new URI(partitionFile.toString() + "#" + TeraConstants.PARTITION_FILENAME);
    TeraInputFormat.setInputPaths(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    job.setJobName("TeraSort");
    job.setJarByClass(TeraSortJob.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    job.setInputFormat(TeraInputFormat.class);
    job.setOutputFormat(TeraOutputFormat.class);
    job.setPartitionerClass(TotalOrderPartitioner.class);
    job.setBoolean(ClusterConstants.MAPRED_DISABLE_TOOL_WARNING, true);

    TeraInputFormat.writePartitionFile(job, partitionFile);
    DistributedCache.addCacheFile(partitionUri, job);
    DistributedCache.createSymlink(job);
    job.setInt("dfs.replication", 1);
    job.setInt("mapred.submit.replication", 1);
    TeraOutputFormat.setFinalSync(job, true);
    RunningJob runningJob = JobClient.runJob(job);
    LOG.info("done");
    return 0;
}

From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java

License:Open Source License

/**
 * Check the dest is not under the source Credit: Apache Hadoop team;
 *
 * @param srcFS source filesystem/*from  w  w w .  j  a  va2  s . c om*/
 * @param src   source path
 * @param dstFS dest filesystem
 * @param dst   dest path
 * @throws SmartFrogRuntimeException if there is a match.
 */

public static void assertNotDependent(FileSystem srcFS, Path src, FileSystem dstFS, Path dst)
        throws SmartFrogRuntimeException {
    if (srcFS.getUri().equals(dstFS.getUri())) {
        String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
        String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
        if (dstq.startsWith(srcq)) {
            if (srcq.length() == dstq.length()) {
                throw new SmartFrogRuntimeException(ERROR_CANNOT_COPY + src + " to itself.");
            } else {
                throw new SmartFrogRuntimeException(ERROR_CANNOT_COPY + src + " to its subdirectory " + dst);
            }
        }
    }
}

From source file:org.springframework.data.hadoop.fs.HdfsResource.java

License:Apache License

/**
 * Instantiates a new hdfs resource.//w  w w .  j  av  a2  s  .  c  om
 *
 * @param path the path
 * @param fs the fs
 * @param codecsFactory the codecs factory
 */
@SuppressWarnings("deprecation")
HdfsResource(Path path, FileSystem fs, CompressionCodecFactory codecsFactory) {
    Assert.notNull(path, "a valid path is required");
    Assert.notNull(fs, "non null file system required");

    this.location = path.toString();
    this.fs = fs;
    this.path = path.makeQualified(fs);

    boolean exists = false;

    try {
        exists = fs.exists(path);
    } catch (Exception ex) {
    }
    this.exists = exists;

    FileStatus status = null;
    try {
        status = fs.getFileStatus(path);
    } catch (Exception ex) {
    }
    this.status = status;
    this.codecsFactory = codecsFactory;
}

From source file:org.weikey.terasort.TeraSort.java

License:Apache License

@SuppressWarnings("deprecation")
public int run(String[] args) throws Exception {
    LOG.info("starting");
    JobConf job = (JobConf) getConf();//from  w  w w. j ava2s  .  c  o  m
    SortConfig sortConfig = new SortConfig(job);
    // if (args.length >= 3) {
    // job.setNumReduceTasks(Integer.valueOf(args[2]));
    // if (args.length >= 4) {
    // sortConfig.setStartKey(Integer.valueOf(args[3]));
    // if (args.length >= 5) {
    // sortConfig.setFieldSeparator(args[4]);
    // }
    // }
    // }

    Integer numMapTasks = null;
    Integer numReduceTasks = null;

    List<String> otherArgs = new ArrayList<String>();
    boolean createLzopIndex = false;
    for (int i = 0; i < args.length; ++i) {
        try {
            if ("-m".equals(args[i])) {
                job.setNumMapTasks(Integer.parseInt(args[++i]));
            } else if ("-r".equals(args[i])) {
                job.setNumReduceTasks(Integer.parseInt(args[++i]));
            } else if ("-f".equals(args[i]) || "--ignore-case".equals(args[i])) {
                sortConfig.setIgnoreCase(true);
            } else if ("-u".equals(args[i]) || "--unique".equals(args[i])) {
                sortConfig.setUnique(true);
            } else if ("-k".equals(args[i]) || "--key".equals(args[i])) {
                String[] parts = StringUtils.split(args[++i], ",");
                sortConfig.setStartKey(Integer.valueOf(parts[0]));
                if (parts.length > 1) {
                    sortConfig.setEndKey(Integer.valueOf(parts[1]));
                }
            } else if ("-t".equals(args[i]) || "--field-separator".equals(args[i])) {
                sortConfig.setFieldSeparator(args[++i]);
            } else if ("--total-order".equals(args[i])) {
                double pcnt = Double.parseDouble(args[++i]);
                int numSamples = Integer.parseInt(args[++i]);
                int maxSplits = Integer.parseInt(args[++i]);
                if (0 >= maxSplits) {
                    maxSplits = Integer.MAX_VALUE;
                }
            } else if ("--lzop-index".equals(args[i])) {
                createLzopIndex = true;
            } else {
                otherArgs.add(args[i]);
            }
        } catch (NumberFormatException except) {
            System.out.println("ERROR: Integer expected instead of " + args[i]);
            return printUsage();
        } catch (ArrayIndexOutOfBoundsException except) {
            System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
            return printUsage(); // exits
        }
    }

    // Make sure there are exactly 2 parameters left.
    if (otherArgs.size() != 2) {
        System.out.println("ERROR: Wrong number of parameters: " + otherArgs.size() + " instead of 2.");
        return printUsage();
    }

    Path inputDir = new Path(args[0]);
    inputDir = inputDir.makeQualified(inputDir.getFileSystem(job));
    Path partitionFile = new Path(inputDir, TeraInputFormat.PARTITION_FILENAME);
    URI partitionUri = new URI(partitionFile.toString() + "#" + TeraInputFormat.PARTITION_FILENAME);
    TeraInputFormat.setInputPaths(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    job.setJobName("TeraSort");
    job.setJarByClass(TeraSort.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    job.setInputFormat(TeraInputFormat.class);
    job.setOutputFormat(TeraOutputFormat.class);
    job.setPartitionerClass(TotalOrderPartitioner.class);
    TeraInputFormat.writePartitionFile(job, partitionFile);
    DistributedCache.addCacheFile(partitionUri, job);
    DistributedCache.createSymlink(job);
    job.setInt("dfs.replication", 1);
    TeraOutputFormat.setFinalSync(job, true);
    JobClient.runJob(job);
    LOG.info("done");
    return 0;
}

From source file:source.TeraSort.java

License:Apache License

public int run(String[] args) throws Exception {
    LOG.info("starting");
    JobConf job = (JobConf) getConf();//ww w .  j a v  a  2  s  .  c  o m

    Path inputDir = new Path(args[0]);
    inputDir = inputDir.makeQualified(inputDir.getFileSystem(job));
    Path partitionFile = new Path(inputDir, TeraInputFormat.PARTITION_FILENAME);
    URI partitionUri = new URI(partitionFile.toString() + "#" + TeraInputFormat.PARTITION_FILENAME);
    TeraInputFormat.setInputPaths(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    job.setJobName("TeraSort");
    job.setJarByClass(TeraSort.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    job.setInputFormat(TeraInputFormat.class);
    job.setOutputFormat(TeraOutputFormat.class);
    job.setPartitionerClass(TotalOrderPartitioner.class);
    TeraInputFormat.writePartitionFile(job, partitionFile);
    DistributedCache.addCacheFile(partitionUri, job);
    DistributedCache.createSymlink(job);
    job.setInt("dfs.replication", getOutputReplication(job));
    TeraOutputFormat.setFinalSync(job, true);
    JobClient.runJob(job);
    LOG.info("done");
    return 0;
}

From source file:ucsc.hadoop.mapreduce.apache.Sort.java

License:Apache License

/**
 * The main driver for sort program./* ww  w .ja  v  a2s  .  co  m*/
 * Invoke this method to submit the map/reduce job.
 * @throws IOException When there is communication problems with the 
 *                     job tracker.
 */
public int run(String[] args) throws Exception {

    JobConf jobConf = new JobConf(getConf(), Sort.class);
    jobConf.setJobName("sorter");

    jobConf.setMapperClass(IdentityMapper.class);
    jobConf.setReducerClass(IdentityReducer.class);

    JobClient client = new JobClient(jobConf);
    ClusterStatus cluster = client.getClusterStatus();
    int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
    String sort_reduces = jobConf.get("test.sort.reduces_per_host");
    if (sort_reduces != null) {
        num_reduces = cluster.getTaskTrackers() * Integer.parseInt(sort_reduces);
    }
    Class<? extends InputFormat> inputFormatClass = SequenceFileInputFormat.class;
    Class<? extends OutputFormat> outputFormatClass = SequenceFileOutputFormat.class;
    Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
    Class<? extends Writable> outputValueClass = BytesWritable.class;

    List<String> otherArgs = new ArrayList<String>();
    InputSampler.Sampler<K, V> sampler = null;

    for (int i = 0; i < args.length; ++i) {
        try {
            if ("-m".equals(args[i])) {
                jobConf.setNumMapTasks(Integer.parseInt(args[++i]));
            } else if ("-r".equals(args[i])) {
                num_reduces = Integer.parseInt(args[++i]);
            } else if ("-inFormat".equals(args[i])) {
                inputFormatClass = Class.forName(args[++i]).asSubclass(InputFormat.class);
            } else if ("-outFormat".equals(args[i])) {
                outputFormatClass = Class.forName(args[++i]).asSubclass(OutputFormat.class);
            } else if ("-outKey".equals(args[i])) {
                outputKeyClass = Class.forName(args[++i]).asSubclass(WritableComparable.class);
            } else if ("-outValue".equals(args[i])) {
                outputValueClass = Class.forName(args[++i]).asSubclass(Writable.class);
            } else if ("-totalOrder".equals(args[i])) {
                double pcnt = Double.parseDouble(args[++i]);
                int numSamples = Integer.parseInt(args[++i]);
                int maxSplits = Integer.parseInt(args[++i]);
                if (0 >= maxSplits)
                    maxSplits = Integer.MAX_VALUE;
                sampler = new InputSampler.RandomSampler<K, V>(pcnt, numSamples, maxSplits);
            } else {
                otherArgs.add(args[i]);
            }
        } catch (NumberFormatException except) {
            System.out.println("ERROR: Integer expected instead of " + args[i]);
            return printUsage();
        } catch (ArrayIndexOutOfBoundsException except) {
            System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
            return printUsage(); // exits
        }
    }

    // Set user-supplied (possibly default) job configs
    jobConf.setNumReduceTasks(num_reduces);

    jobConf.setInputFormat(inputFormatClass);
    jobConf.setOutputFormat(outputFormatClass);

    jobConf.setOutputKeyClass(outputKeyClass);
    jobConf.setOutputValueClass(outputValueClass);

    // Make sure there are exactly 2 parameters left.
    if (otherArgs.size() != 2) {
        System.out.println("ERROR: Wrong number of parameters: " + otherArgs.size() + " instead of 2.");
        return printUsage();
    }
    FileInputFormat.setInputPaths(jobConf, otherArgs.get(0));
    FileOutputFormat.setOutputPath(jobConf, new Path(otherArgs.get(1)));

    if (sampler != null) {
        System.out.println("Sampling input to effect total-order sort...");
        jobConf.setPartitionerClass(TotalOrderPartitioner.class);
        Path inputDir = FileInputFormat.getInputPaths(jobConf)[0];
        inputDir = inputDir.makeQualified(inputDir.getFileSystem(jobConf));
        Path partitionFile = new Path(inputDir, "_sortPartitioning");
        TotalOrderPartitioner.setPartitionFile(jobConf, partitionFile);
        InputSampler.<K, V>writePartitionFile(jobConf, sampler);
        URI partitionUri = new URI(partitionFile.toString() + "#" + "_sortPartitioning");
        DistributedCache.addCacheFile(partitionUri, jobConf);
        DistributedCache.createSymlink(jobConf);
    }

    System.out.println("Running on " + cluster.getTaskTrackers() + " nodes to sort from "
            + FileInputFormat.getInputPaths(jobConf)[0] + " into " + FileOutputFormat.getOutputPath(jobConf)
            + " with " + num_reduces + " reduces.");

    Date startTime = new Date();
    System.out.println("Job started: " + startTime);
    jobResult = JobClient.runJob(jobConf);
    Date end_time = new Date();
    System.out.println("Job ended: " + end_time);
    System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
    return 0;
}

From source file:voldemort.hadoop.VoldemortWordCount.java

License:Apache License

private static void addDepJars(Configuration conf, Class<?>[] deps) throws IOException {
    FileSystem localFs = FileSystem.getLocal(conf);
    Set<String> depJars = new HashSet<String>();
    for (Class<?> dep : deps) {
        String tmp = findInClasspath(dep.getCanonicalName());
        if (tmp != null) {
            Path path = new Path(tmp);
            depJars.add(path.makeQualified(localFs).toString());
        }/*  www .  j a  va 2 s .  c  o m*/
    }

    String[] tmpjars = conf.get("tmpjars", "").split(",");
    for (String tmpjar : tmpjars) {
        if (!StringUtils.isEmpty(tmpjar)) {
            depJars.add(tmpjar.trim());
        }
    }
    conf.set("tmpjars", StringUtils.join(depJars.iterator(), ','));
}

From source file:voldemort.store.readonly.mr.azkaban.VoldemortMultiStoreBuildAndPushJob.java

License:Apache License

@Override
public void run() throws Exception {

    // Mapping of Pair [ cluster url, store name ] to List of previous node
    // directories.
    // Required for rollback...
    Multimap<Pair<String, String>, Pair<Integer, String>> previousNodeDirPerClusterStore = HashMultimap
            .create();//from  ww  w. j a  va  2  s . com

    // Retrieve filesystem information for checking if folder exists
    final FileSystem fs = outputDir.getFileSystem(new Configuration());

    // Step 1 ) Order the stores depending on the size of the store
    TreeMap<Long, String> storeNameSortedBySize = Maps.newTreeMap();
    for (String storeName : storeNames) {
        storeNameSortedBySize.put(sizeOfPath(fs, inputDirsPerStore.get(storeName)), storeName);
    }

    log.info("Store names along with their input file sizes - " + storeNameSortedBySize);

    // This will collect it in ascending order of size
    this.storeNames = Lists.newArrayList(storeNameSortedBySize.values());

    // Reverse it such that is in descending order of size
    Collections.reverse(this.storeNames);

    log.info("Store names in the order of which we'll run build and push - " + this.storeNames);

    // Step 2 ) Get the push version if set
    final long pushVersion = props.containsKey("push.version.timestamp")
            ? Long.parseLong(new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()))
            : props.getLong("push.version", -1L);

    // Mapping of Pair [ cluster url, store name ] to Future with list of
    // node dirs
    HashMap<Pair<String, String>, Future<List<String>>> fetchDirsPerStoreCluster = Maps.newHashMap();

    // Store mapping of url to cluster metadata
    final ConcurrentHashMap<String, Cluster> urlToCluster = new ConcurrentHashMap<String, Cluster>();

    // Mapping of Pair [ cluster url, store name ] to List of node
    // directories
    final HashMap<Pair<String, String>, List<String>> nodeDirPerClusterStore = new HashMap<Pair<String, String>, List<String>>();

    // Iterate over all of them and check if they are complete
    final HashMap<Pair<String, String>, Exception> exceptions = Maps.newHashMap();

    ExecutorService executor = null;
    try {
        executor = Executors.newFixedThreadPool(props.getInt("build.push.parallel", 1));

        // Step 3 ) Start the building + pushing of all stores in parallel
        for (final String storeName : storeNames) {
            // Go over every cluster and do the build phase
            for (int index = 0; index < clusterUrls.size(); index++) {
                final String url = clusterUrls.get(index);
                fetchDirsPerStoreCluster.put(Pair.create(url, storeName),
                        executor.submit(new Callable<List<String>>() {

                            @Override
                            public List<String> call() throws Exception {

                                log.info("========= Working on build + push phase for store '" + storeName
                                        + "' and cluster '" + url + "' ==========");

                                // Create an admin
                                // client which will be
                                // used by
                                // everyone
                                AdminClient adminClient = null;

                                // Executor inside
                                // executor - your mind
                                // just
                                // exploded!
                                ExecutorService internalExecutor = null;

                                try {
                                    // Retrieve admin
                                    // client for
                                    // verification of
                                    // schema + pushing
                                    adminClient = new AdminClient(url, new AdminClientConfig());

                                    // Verify the store
                                    // exists ( If not,
                                    // add it
                                    // the
                                    // store )
                                    Pair<StoreDefinition, Cluster> metadata = verifySchema(storeName, url,
                                            inputDirsPerStore.get(storeName), adminClient);

                                    // Populate the url
                                    // to cluster
                                    // metadata
                                    urlToCluster.put(url, metadata.getSecond());

                                    // Create output
                                    // directory path
                                    URI uri = new URI(url);

                                    Path outputDirPath = new Path(outputDir + Path.SEPARATOR + storeName,
                                            uri.getHost());

                                    log.info("Running build phase for store '" + storeName + "' and url '" + url
                                            + "'. Reading from input directory '"
                                            + inputDirsPerStore.get(storeName) + "' and writing to "
                                            + outputDirPath);

                                    runBuildStore(metadata.getSecond(), metadata.getFirst(),
                                            inputDirsPerStore.get(storeName), outputDirPath);

                                    log.info("Finished running build phase for store " + storeName
                                            + " and url '" + url + "'. Written to directory " + outputDirPath);

                                    long storePushVersion = pushVersion;
                                    if (storePushVersion == -1L) {
                                        log.info("Retrieving version number for store '" + storeName
                                                + "' and cluster '" + url + "'");

                                        Map<String, Long> pushVersions = adminClient
                                                .getROMaxVersion(Lists.newArrayList(storeName));

                                        if (pushVersions == null || !pushVersions.containsKey(storeName)) {
                                            throw new RuntimeException(
                                                    "Could not retrieve version for store '" + storeName + "'");
                                        }

                                        storePushVersion = pushVersions.get(storeName);
                                        storePushVersion++;

                                        log.info("Retrieved max version number for store '" + storeName
                                                + "' and cluster '" + url + "' = " + storePushVersion);
                                    }

                                    log.info("Running push for cluster url " + url);

                                    // Used for
                                    // parallel pushing
                                    internalExecutor = Executors.newCachedThreadPool();

                                    AdminStoreSwapper swapper = new AdminStoreSwapper(metadata.getSecond(),
                                            internalExecutor, adminClient,
                                            1000 * props.getInt("timeout.seconds", 24 * 60 * 60), true, true);

                                    // Convert to
                                    // hadoop specific
                                    // path
                                    String outputDirPathString = outputDirPath.makeQualified(fs).toString();

                                    if (!fs.exists(outputDirPath)) {
                                        throw new RuntimeException("Output directory for store " + storeName
                                                + " and cluster '" + url + "' - " + outputDirPathString
                                                + " does not exist");
                                    }

                                    log.info("Pushing data to store '" + storeName + "' on cluster " + url
                                            + " from path  " + outputDirPathString + " with version "
                                            + storePushVersion);

                                    List<String> nodeDirs = swapper.invokeFetch(storeName, outputDirPathString,
                                            storePushVersion);

                                    log.info("Successfully pushed data to store '" + storeName + "' on cluster "
                                            + url + " from path  " + outputDirPathString + " with version "
                                            + storePushVersion);

                                    return nodeDirs;
                                } finally {
                                    if (internalExecutor != null) {
                                        internalExecutor.shutdownNow();
                                        internalExecutor.awaitTermination(10, TimeUnit.SECONDS);
                                    }
                                    if (adminClient != null) {
                                        adminClient.stop();
                                    }
                                }
                            }

                        }));

            }

        }

        for (final String storeName : storeNames) {
            for (int index = 0; index < clusterUrls.size(); index++) {
                Pair<String, String> key = Pair.create(clusterUrls.get(index), storeName);
                Future<List<String>> nodeDirs = fetchDirsPerStoreCluster.get(key);
                try {
                    nodeDirPerClusterStore.put(key, nodeDirs.get());
                } catch (Exception e) {
                    exceptions.put(key, e);
                }
            }
        }

    } finally {
        if (executor != null) {
            executor.shutdownNow();
            executor.awaitTermination(10, TimeUnit.SECONDS);
        }
    }

    // ===== If we got exceptions during the build + push, delete data from
    // successful
    // nodes ======
    if (!exceptions.isEmpty()) {

        log.error("Got an exception during pushes. Deleting data already pushed on successful nodes");

        for (int index = 0; index < clusterUrls.size(); index++) {
            String clusterUrl = clusterUrls.get(index);
            Cluster cluster = urlToCluster.get(clusterUrl);

            AdminClient adminClient = null;
            try {
                adminClient = new AdminClient(cluster, new AdminClientConfig());
                for (final String storeName : storeNames) {
                    // Check if the [ cluster , store name ] succeeded. We
                    // need to roll it back
                    Pair<String, String> key = Pair.create(clusterUrl, storeName);

                    if (nodeDirPerClusterStore.containsKey(key)) {
                        List<String> nodeDirs = nodeDirPerClusterStore.get(key);

                        log.info("Deleting data for successful pushes to " + clusterUrl + " and store "
                                + storeName);
                        int nodeId = 0;
                        for (String nodeDir : nodeDirs) {
                            try {
                                log.info("Deleting data ( " + nodeDir + " ) for successful pushes to '"
                                        + clusterUrl + "' and store '" + storeName + "' and node " + nodeId);
                                adminClient.failedFetchStore(nodeId, storeName, nodeDir);
                                log.info("Successfully deleted data for successful pushes to '" + clusterUrl
                                        + "' and store '" + storeName + "' and node " + nodeId);

                            } catch (Exception e) {
                                log.error("Failure while deleting data on node " + nodeId + " for store '"
                                        + storeName + "' and url '" + clusterUrl + "'");
                            }
                            nodeId++;
                        }
                    }
                }
            } finally {
                if (adminClient != null) {
                    adminClient.stop();
                }
            }
        }

        int errorNo = 1;
        for (Pair<String, String> key : exceptions.keySet()) {
            log.error("Error no " + errorNo + "] Error pushing for cluster '" + key.getFirst() + "' and store '"
                    + key.getSecond() + "' :", exceptions.get(key));
            errorNo++;
        }

        throw new VoldemortException("Exception during build + push");
    }

    // ====== Delete the temporary directory since we don't require it
    // ======
    if (!props.getBoolean("build.output.keep", false)) {
        JobConf jobConf = new JobConf();

        if (props.containsKey("hadoop.job.ugi")) {
            jobConf.set("hadoop.job.ugi", props.getString("hadoop.job.ugi"));
        }

        log.info("Deleting output directory since we have finished the pushes " + outputDir);
        HadoopUtils.deletePathIfExists(jobConf, outputDir.toString());
        log.info("Successfully deleted output directory since we have finished the pushes" + outputDir);
    }

    // ====== Time to swap the stores one node at a time ========
    try {
        for (int index = 0; index < clusterUrls.size(); index++) {
            String url = clusterUrls.get(index);
            Cluster cluster = urlToCluster.get(url);

            AdminClient adminClient = new AdminClient(cluster, new AdminClientConfig());

            log.info("Swapping all stores on cluster " + url);
            try {
                // Go over every node and swap
                for (Node node : cluster.getNodes()) {

                    log.info("Swapping all stores on cluster " + url + " and node " + node.getId());

                    // Go over every store and swap
                    for (String storeName : storeNames) {

                        Pair<String, String> key = Pair.create(url, storeName);
                        log.info("Swapping '" + storeName + "' store on cluster " + url + " and node "
                                + node.getId() + " - " + nodeDirPerClusterStore.get(key).get(node.getId()));

                        previousNodeDirPerClusterStore.put(key,
                                Pair.create(node.getId(), adminClient.swapStore(node.getId(), storeName,
                                        nodeDirPerClusterStore.get(key).get(node.getId()))));
                        log.info("Successfully swapped '" + storeName + "' store on cluster " + url
                                + " and node " + node.getId());

                    }

                }
            } finally {
                if (adminClient != null) {
                    adminClient.stop();
                }
            }
        }
    } catch (Exception e) {

        log.error("Got an exception during swaps. Rolling back data already pushed on successful nodes");

        for (Pair<String, String> clusterStoreTuple : previousNodeDirPerClusterStore.keySet()) {
            Collection<Pair<Integer, String>> nodeToPreviousDirs = previousNodeDirPerClusterStore
                    .get(clusterStoreTuple);
            String url = clusterStoreTuple.getFirst();
            Cluster cluster = urlToCluster.get(url);

            log.info("Rolling back for cluster " + url + " and store  " + clusterStoreTuple.getSecond());

            AdminClient adminClient = new AdminClient(cluster, new AdminClientConfig());
            try {
                for (Pair<Integer, String> nodeToPreviousDir : nodeToPreviousDirs) {
                    log.info("Rolling back for cluster " + url + " and store " + clusterStoreTuple.getSecond()
                            + " and node " + nodeToPreviousDir.getFirst() + " to dir "
                            + nodeToPreviousDir.getSecond());
                    adminClient.rollbackStore(nodeToPreviousDir.getFirst(), nodeToPreviousDir.getSecond(),
                            ReadOnlyUtils.getVersionId(new File(nodeToPreviousDir.getSecond())));
                    log.info("Successfully rolled back for cluster " + url + " and store "
                            + clusterStoreTuple.getSecond() + " and node " + nodeToPreviousDir.getFirst()
                            + " to dir " + nodeToPreviousDir.getSecond());

                }
            } finally {
                if (adminClient != null) {
                    adminClient.stop();
                }
            }
        }
        throw e;
    }
}