Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:nl.tudelft.graphalytics.mapreducev2.MapReduceJob.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    FileSystem dfs = FileSystem.get(getConf());
    String inPath = inputPath;// www  . jav  a 2 s.  c  o m

    while (!isFinished()) {
        iteration++;

        // Prepare job configuration
        JobConf jobConfiguration = new JobConf(this.getConf());
        jobConfiguration.setJarByClass(this.getClass());

        jobConfiguration.setMapOutputKeyClass(getMapOutputKeyClass());
        jobConfiguration.setMapOutputValueClass(getMapOutputValueClass());

        jobConfiguration.setMapperClass(getMapperClass());
        if (getCombinerClass() != null)
            jobConfiguration.setCombinerClass(getCombinerClass());
        jobConfiguration.setReducerClass(getReducerClass());

        jobConfiguration.setOutputKeyClass(getOutputKeyClass());
        jobConfiguration.setOutputValueClass(getOutputValueClass());

        jobConfiguration.setInputFormat(getInputFormatClass());
        jobConfiguration.setOutputFormat(getOutputFormatClass());

        if (getNumMappers() != -1)
            jobConfiguration.setNumMapTasks(getNumMappers());
        if (getNumReducers() != -1)
            jobConfiguration.setNumReduceTasks(getNumReducers());

        setConfigurationParameters(jobConfiguration);

        // Set the input and output paths
        String outPath = intermediatePath + "/iteration-" + iteration;
        FileInputFormat.addInputPath(jobConfiguration, new Path(inPath));
        FileOutputFormat.setOutputPath(jobConfiguration, new Path(outPath));

        // Execute the current iteration
        RunningJob jobExecution = JobClient.runJob(jobConfiguration);
        jobExecution.waitForCompletion();

        // Remove the output of the previous job (unless it is the input graph)
        if (iteration != 1) {
            dfs.delete(new Path(inPath), true);
        }
        inPath = outPath;

        processJobOutput(jobExecution);
    }

    // Rename the last job output to the specified output path
    try {
        dfs.mkdirs(new Path(outputPath).getParent());
        dfs.rename(new Path(inPath), new Path(outputPath));
    } catch (Exception e) {
        LOG.warn("Failed to rename MapReduce job output.", e);
    }

    return 0;
}

From source file:nthu.scopelab.tsqr.ssvd.SSVDSolver.java

License:Apache License

/**
 * run all SSVD jobs./* ww w .jav  a  2s.  c  o  m*/
 * 
 * @throws IOException
 *           if I/O condition occurs.
 */
public void run() throws Exception {
    try {
        System.out.println("SSVD start!");
        FileSystem fs = FileSystem.get(conf);

        Path qPath = new Path(outputPath, "Q-job");
        Path btPath = new Path(outputPath, "Bt-job");
        Path yPath = new Path(outputPath, "Y-job"); //tetst phase
        Path uHatPath = new Path(outputPath, "UHat");
        Path svPath = new Path(outputPath, "Sigma");
        Path uPath = new Path(outputPath, "U");
        Path vPath = new Path(outputPath, "V");

        if (overwrite) {
            fs.delete(outputPath, true);
        }

        int[] iseed = { 0, 0, 0, 1 };
        double[] x = new double[1];
        Dlarnv.dlarnv(2, iseed, 0, 1, x, 0);
        long seed = (long) (x[0] * (double) Long.MAX_VALUE);

        long start, end;

        start = new Date().getTime();
        QJob.run(conf, inputPath, qPath.toString(), reduceSchedule, k, p, seed, mis);
        end = new Date().getTime();
        System.out.println("Q-Job done " + Long.toString(end - start));
        Logger LOG = LoggerFactory.getLogger(SSVDSolver.class);

        /*
         * restrict number of reducers to a reasonable number so we don't have to
         * run too many additions in the frontend when reconstructing BBt for the
         * last B' and BB' computations. The user may not realize that and gives a
         * bit too many (I would be happy i that were ever the case though).
         */

        start = new Date().getTime();
        BtJob.run(conf, inputPath, btPath, qPath.toString(), k, p, outerBlockHeight,
                q <= 0 ? Math.min(1000, reduceTasks) : reduceTasks, q <= 0, reduceSchedule, mis);

        end = new Date().getTime();
        System.out.println("Bt-Job done " + Long.toString(end - start));

        // power iterations is unnecessary in application of recommendation system      
        /*for (int i = 0; i < q; i++) {
          Path btPathGlob = new Path(btPath, BtJob.OUTPUT_BT + "-*");
         Path aBtPath = new Path(outputPath, String.format("ABt-job-%d", i + 1));      
           qPath = new Path(outputPath, String.format("ABtQ-job-%d", i + 1));      
           ABtDenseOutJob.run(conf,
                   inputPath,
                   btPathGlob,
                   aBtPath,//qPath,
                   //ablockRows,
                   //minSplitSize,
                   k,
                   p,
                   //abtBlockHeight,
                   reduceTasks,
                   //broadcast
             mis);
                 
         ToolRunner.run(conf, new QRFirstJob(), new String[]{
                 "-input", aBtPath.toString(),
                 "-output", qPath.toString(),
              "-mis",Integer.toString(mis),
              "-colsize", Integer.toString(k+p),
                 "-reduceSchedule", reduceSchedule});
                      
           btPath = new Path(outputPath, String.format("Bt-job-%d", i + 1));
                
           BtJob.run(conf,
          inputPath,
                 btPath,
          qPath.toString(),                 
          k,
          p,
          outerBlockHeight,
          i == q - 1 ? Math.min(1000, reduceTasks) : reduceTasks,
          i == q - 1,
                 reduceSchedule,
                 mis);
         }*/

        cmUpperTriangDenseMatrix bbt = loadAndSumUpperTriangMatrices(fs,
                new Path(btPath, BtJob.OUTPUT_BBT + "-*"), conf);

        // convert bbt to something our eigensolver could understand
        assert bbt.numColumns() == k + p;

        double[][] bbtSquare = new double[k + p][];
        for (int i = 0; i < k + p; i++) {
            bbtSquare[i] = new double[k + p];
        }

        for (int i = 0; i < k + p; i++) {
            for (int j = i; j < k + p; j++) {
                bbtSquare[i][j] = bbtSquare[j][i] = bbt.get(i, j);
            }
        }

        svalues = new double[k + p];

        // try something else.
        EigenSolver eigenWrapper = new EigenSolver(bbtSquare);
        double[] eigenva2 = eigenWrapper.getWR();

        for (int i = 0; i < k + p; i++) {
            svalues[i] = Math.sqrt(eigenva2[i]); // sqrt?
        }
        // save/redistribute UHat
        double[][] uHat = eigenWrapper.getVL();
        //double[][] uHat = eigenWrapper.getUHat();

        fs.mkdirs(uHatPath);
        SequenceFile.Writer uHatWriter = SequenceFile.createWriter(fs, conf,
                uHatPath = new Path(uHatPath, "uhat.seq"), IntWritable.class, VectorWritable.class,
                CompressionType.BLOCK);

        int m = uHat.length;
        IntWritable iw = new IntWritable();
        VectorWritable vw = new VectorWritable();

        for (int i = 0; i < m; i++) {
            vw.set(new DenseVector(uHat[i], true));
            iw.set(i);
            uHatWriter.append(iw, vw);
        }
        uHatWriter.close();

        SequenceFile.Writer svWriter = SequenceFile.createWriter(fs, conf,
                svPath = new Path(svPath, "svalues.seq"), IntWritable.class, VectorWritable.class,
                CompressionType.BLOCK);

        vw.set(new DenseVector(svalues, true));
        svWriter.append(iw, vw);

        svWriter.close();

        start = new Date().getTime();
        UJob ujob = null;
        if (computeU) {
            ujob = new UJob();
            ujob.start(conf, new Path(btPath, BtJob.Q_MAT + "-*"), uHatPath, svPath, uPath, k, cUHalfSigma,
                    mis);
            // actually this is map-only job anyway
        }

        VJob vjob = null;
        if (computeV) {
            vjob = new VJob();
            vjob.start(conf, new Path(btPath, BtJob.OUTPUT_BT + "-*"), uHatPath, svPath, vPath, k, reduceTasks,
                    subRowSize, cVHalfSigma, mis);
        }

        if (ujob != null) {
            ujob.waitForCompletion();
            this.uPath = uPath.toString();
        }
        System.out.println("U-Job done ");

        if (vjob != null) {
            vjob.waitForCompletion();
            this.vPath = vPath.toString();
        }
        end = new Date().getTime();
        System.out.println("U-Job+V-Job done " + (end - start));

    } catch (InterruptedException exc) {
        throw new IOException("Interrupted", exc);
    } catch (ClassNotFoundException exc) {
        throw new IOException(exc);
    }

}

From source file:org.anon.smart.d2cache.store.fileStore.hadoop.HadoopFileStoreTransaction.java

License:Open Source License

@Override
public void commit() throws CtxException {

    FileSystem hdfs = ((HadoopFileStoreConnection) _connection).getHadoopFS();

    assertion().assertNotNull(hdfs, "Hadoop FileSystem is null");

    String repo = hdfs.getWorkingDirectory().toUri().toString();

    for (Object fi : files.keySet()) {
        try {//from ww w  .jav a 2  s .  co m

            String filePath = (String) fi;
            String[] tmp = filePath.split("/");
            String fileName = tmp[tmp.length - 1];

            Path fldr = new Path(files.get(fi));
            if (!hdfs.exists(fldr))
                hdfs.mkdirs(fldr);

            hdfs.copyFromLocalFile(true, new Path(filePath), new Path(files.get(fi) + "/" + fileName));

        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }

}

From source file:org.anon.smart.d2cache.store.fileStore.hadoop.HadoopStore.java

License:Open Source License

@Override
public void create(String name, Class cls) throws CtxException {
    FileSystem fs = ((HadoopFileStoreConnection) _connection).getHadoopFS();
    String repo = ((HadoopFileStoreConnection) _connection).getHadoopConf().baseDirectory();

    String baseDir = repo + "/" + name;

    try {//w  ww .  j  ava 2 s.c o  m

        Path wDir = new Path(baseDir);
        if (!fs.exists(wDir))
            fs.mkdirs(wDir);
        fs.setWorkingDirectory(wDir);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:org.apache.accumulo.core.client.mock.MockTableOperationsTest.java

License:Apache License

private ImportTestFilesAndData prepareTestFiles() throws Throwable {
    Configuration defaultConf = new Configuration();
    Path tempFile = new Path("target/accumulo-test/import/sample.rf");
    Path failures = new Path("target/accumulo-test/failures/");
    FileSystem fs = FileSystem.get(new URI("file:///"), defaultConf);
    fs.deleteOnExit(tempFile);/*from   w w w.  j a v  a2 s  .  c o m*/
    fs.deleteOnExit(failures);
    fs.delete(failures, true);
    fs.delete(tempFile, true);
    fs.mkdirs(failures);
    fs.mkdirs(tempFile.getParent());
    FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder()
            .forFile(tempFile.toString(), fs, defaultConf)
            .withTableConfiguration(AccumuloConfiguration.getDefaultConfiguration()).build();
    writer.startDefaultLocalityGroup();
    List<Pair<Key, Value>> keyVals = new ArrayList<>();
    for (int i = 0; i < 5; i++) {
        keyVals.add(new Pair<>(new Key("a" + i, "b" + i, "c" + i, new ColumnVisibility(""), 1000l + i),
                new Value(Integer.toString(i).getBytes())));
    }
    for (Pair<Key, Value> keyVal : keyVals) {
        writer.append(keyVal.getFirst(), keyVal.getSecond());
    }
    writer.close();
    ImportTestFilesAndData files = new ImportTestFilesAndData();
    files.failurePath = failures;
    files.importPath = tempFile.getParent();
    files.keyVals = keyVals;
    return files;
}

From source file:org.apache.accumulo.examples.mapreduce.bulk.BulkIngestExample.java

License:Apache License

@Override
public int run(String[] args) {
    Opts opts = new Opts();
    opts.parseArgs(BulkIngestExample.class.getName(), args);

    Configuration conf = getConf();
    PrintStream out = null;/* w  ww  .  j a v  a 2s .c  om*/
    try {
        Job job = Job.getInstance(conf);
        job.setJobName("bulk ingest example");
        job.setJarByClass(this.getClass());

        job.setInputFormatClass(TextInputFormat.class);

        job.setMapperClass(MapClass.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(ReduceClass.class);
        job.setOutputFormatClass(AccumuloFileOutputFormat.class);
        opts.setAccumuloConfigs(job);

        Connector connector = opts.getConnector();

        TextInputFormat.setInputPaths(job, new Path(opts.inputDir));
        AccumuloFileOutputFormat.setOutputPath(job, new Path(opts.workDir + "/files"));

        FileSystem fs = FileSystem.get(conf);
        out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));

        Collection<Text> splits = connector.tableOperations().listSplits(opts.getTableName(), 100);
        for (Text split : splits)
            out.println(Base64.getEncoder().encodeToString(TextUtil.getBytes(split)));

        job.setNumReduceTasks(splits.size() + 1);
        out.close();

        job.setPartitionerClass(RangePartitioner.class);
        RangePartitioner.setSplitFile(job, opts.workDir + "/splits.txt");

        job.waitForCompletion(true);
        Path failures = new Path(opts.workDir, "failures");
        fs.delete(failures, true);
        fs.mkdirs(new Path(opts.workDir, "failures"));
        // With HDFS permissions on, we need to make sure the Accumulo user can read/move the rfiles
        FsShell fsShell = new FsShell(conf);
        fsShell.run(new String[] { "-chmod", "-R", "777", opts.workDir });
        connector.tableOperations().importDirectory(opts.getTableName(), opts.workDir + "/files",
                opts.workDir + "/failures", false);

    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (out != null)
            out.close();
    }

    return 0;
}

From source file:org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample.java

License:Apache License

public int run(String[] args) {
    if (args.length != 7) {
        System.out.println("ERROR: Wrong number of parameters: " + args.length + " instead of 7.");
        return printUsage();
    }//from  w w  w.  j  ava  2  s . c  o  m

    Configuration conf = getConf();
    PrintStream out = null;
    try {
        Job job = new Job(conf, "bulk ingest example");
        job.setJarByClass(this.getClass());

        job.setInputFormatClass(TextInputFormat.class);

        job.setMapperClass(MapClass.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(ReduceClass.class);
        job.setOutputFormatClass(AccumuloFileOutputFormat.class);

        Instance instance = new ZooKeeperInstance(args[0], args[1]);
        String user = args[2];
        byte[] pass = args[3].getBytes();
        String tableName = args[4];
        String inputDir = args[5];
        String workDir = args[6];

        Connector connector = instance.getConnector(user, pass);

        TextInputFormat.setInputPaths(job, new Path(inputDir));
        AccumuloFileOutputFormat.setOutputPath(job, new Path(workDir + "/files"));

        FileSystem fs = FileSystem.get(conf);
        out = new PrintStream(new BufferedOutputStream(fs.create(new Path(workDir + "/splits.txt"))));

        Collection<Text> splits = connector.tableOperations().getSplits(tableName, 100);
        for (Text split : splits)
            out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));

        job.setNumReduceTasks(splits.size() + 1);
        out.close();

        job.setPartitionerClass(RangePartitioner.class);
        RangePartitioner.setSplitFile(job, workDir + "/splits.txt");

        job.waitForCompletion(true);
        Path failures = new Path(workDir, "failures");
        fs.delete(failures, true);
        fs.mkdirs(new Path(workDir, "failures"));
        connector.tableOperations().importDirectory(tableName, workDir + "/files", workDir + "/failures",
                false);

    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (out != null)
            out.close();
    }

    return 0;
}

From source file:org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample.java

License:Apache License

@Override
public int run(String[] args) {
    Opts opts = new Opts();
    opts.parseArgs(BulkIngestExample.class.getName(), args);

    Configuration conf = getConf();
    PrintStream out = null;//from ww  w.  ja  va2 s .  c  o m
    try {
        Job job = JobUtil.getJob(conf);
        job.setJobName("bulk ingest example");
        job.setJarByClass(this.getClass());

        job.setInputFormatClass(TextInputFormat.class);

        job.setMapperClass(MapClass.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(ReduceClass.class);
        job.setOutputFormatClass(AccumuloFileOutputFormat.class);
        opts.setAccumuloConfigs(job);

        Connector connector = opts.getConnector();

        TextInputFormat.setInputPaths(job, new Path(opts.inputDir));
        AccumuloFileOutputFormat.setOutputPath(job, new Path(opts.workDir + "/files"));

        FileSystem fs = FileSystem.get(conf);
        out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));

        Collection<Text> splits = connector.tableOperations().listSplits(opts.getTableName(), 100);
        for (Text split : splits)
            out.println(Base64.encodeBase64String(TextUtil.getBytes(split)));

        job.setNumReduceTasks(splits.size() + 1);
        out.close();

        job.setPartitionerClass(RangePartitioner.class);
        RangePartitioner.setSplitFile(job, opts.workDir + "/splits.txt");

        job.waitForCompletion(true);
        Path failures = new Path(opts.workDir, "failures");
        fs.delete(failures, true);
        fs.mkdirs(new Path(opts.workDir, "failures"));
        connector.tableOperations().importDirectory(opts.getTableName(), opts.workDir + "/files",
                opts.workDir + "/failures", false);

    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (out != null)
            out.close();
    }

    return 0;
}

From source file:org.apache.accumulo.examples.wikisearch.ingest.WikipediaPartitionedIngester.java

License:Apache License

private int loadBulkFiles()
        throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
    Configuration conf = getConf();

    Connector connector = WikipediaConfiguration.getConnector(conf);

    FileSystem fs = FileSystem.get(conf);
    String directory = WikipediaConfiguration.bulkIngestDir(conf);

    String failureDirectory = WikipediaConfiguration.bulkIngestFailureDir(conf);

    for (FileStatus status : fs.listStatus(new Path(directory))) {
        if (status.isDir() == false) {
            continue;
        }// www  . j  a v  a2s  .  c  o  m
        Path dir = status.getPath();
        Path failPath = new Path(failureDirectory + "/" + dir.getName());
        fs.mkdirs(failPath);
        connector.tableOperations().importDirectory(dir.getName(), dir.toString(), failPath.toString(), true);
    }

    return 0;
}

From source file:org.apache.accumulo.proxy.SimpleProxyBase.java

License:Apache License

@Test
public void testTableOperations() throws Exception {
    final String TABLE_TEST = makeTableName();

    client.createTable(creds, TABLE_TEST, true, TimeType.MILLIS);
    // constraints
    client.addConstraint(creds, TABLE_TEST, NumericValueConstraint.class.getName());
    assertEquals(2, client.listConstraints(creds, TABLE_TEST).size());

    UtilWaitThread.sleep(2000);//w ww  .  j a  va 2s.  c o  m

    client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "123"));

    try {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "x"));
        fail("constraint did not fire");
    } catch (MutationsRejectedException ex) {
    }

    client.removeConstraint(creds, TABLE_TEST, 2);

    UtilWaitThread.sleep(2000);

    assertEquals(1, client.listConstraints(creds, TABLE_TEST).size());

    client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "x"));
    assertScan(new String[][] { { "row1", "cf", "cq", "x" } }, TABLE_TEST);
    // splits, merge
    client.addSplits(creds, TABLE_TEST,
            new HashSet<ByteBuffer>(Arrays.asList(s2bb("a"), s2bb("m"), s2bb("z"))));
    List<ByteBuffer> splits = client.listSplits(creds, TABLE_TEST, 1);
    assertEquals(Arrays.asList(s2bb("m")), splits);
    client.mergeTablets(creds, TABLE_TEST, null, s2bb("m"));
    splits = client.listSplits(creds, TABLE_TEST, 10);
    assertEquals(Arrays.asList(s2bb("m"), s2bb("z")), splits);
    client.mergeTablets(creds, TABLE_TEST, null, null);
    splits = client.listSplits(creds, TABLE_TEST, 10);
    List<ByteBuffer> empty = Collections.emptyList();
    assertEquals(empty, splits);
    // iterators
    client.deleteTable(creds, TABLE_TEST);
    client.createTable(creds, TABLE_TEST, true, TimeType.MILLIS);
    HashMap<String, String> options = new HashMap<String, String>();
    options.put("type", "STRING");
    options.put("columns", "cf");
    IteratorSetting setting = new IteratorSetting(10, TABLE_TEST, SummingCombiner.class.getName(), options);
    client.attachIterator(creds, TABLE_TEST, setting, EnumSet.allOf(IteratorScope.class));
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "1"));
    }
    assertScan(new String[][] { { "row1", "cf", "cq", "10" } }, TABLE_TEST);
    try {
        client.checkIteratorConflicts(creds, TABLE_TEST, setting, EnumSet.allOf(IteratorScope.class));
        fail("checkIteratorConflicts did not throw an exception");
    } catch (Exception ex) {
    }
    client.deleteRows(creds, TABLE_TEST, null, null);
    client.removeIterator(creds, TABLE_TEST, "test", EnumSet.allOf(IteratorScope.class));
    String expected[][] = new String[10][];
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row" + i, "cf", "cq", "" + i));
        expected[i] = new String[] { "row" + i, "cf", "cq", "" + i };
        client.flushTable(creds, TABLE_TEST, null, null, true);
    }
    assertScan(expected, TABLE_TEST);
    // clone
    final String TABLE_TEST2 = makeTableName();
    client.cloneTable(creds, TABLE_TEST, TABLE_TEST2, true, null, null);
    assertScan(expected, TABLE_TEST2);
    client.deleteTable(creds, TABLE_TEST2);

    // don't know how to test this, call it just for fun
    client.clearLocatorCache(creds, TABLE_TEST);

    // compact
    client.compactTable(creds, TABLE_TEST, null, null, null, true, true, null);
    assertEquals(1, countFiles(TABLE_TEST));
    assertScan(expected, TABLE_TEST);

    // get disk usage
    client.cloneTable(creds, TABLE_TEST, TABLE_TEST2, true, null, null);
    Set<String> tablesToScan = new HashSet<String>();
    tablesToScan.add(TABLE_TEST);
    tablesToScan.add(TABLE_TEST2);
    tablesToScan.add("foo");
    client.createTable(creds, "foo", true, TimeType.MILLIS);
    List<DiskUsage> diskUsage = (client.getDiskUsage(creds, tablesToScan));
    assertEquals(2, diskUsage.size());
    assertEquals(1, diskUsage.get(0).getTables().size());
    assertEquals(2, diskUsage.get(1).getTables().size());
    client.compactTable(creds, TABLE_TEST2, null, null, null, true, true, null);
    diskUsage = (client.getDiskUsage(creds, tablesToScan));
    assertEquals(3, diskUsage.size());
    assertEquals(1, diskUsage.get(0).getTables().size());
    assertEquals(1, diskUsage.get(1).getTables().size());
    assertEquals(1, diskUsage.get(2).getTables().size());
    client.deleteTable(creds, "foo");
    client.deleteTable(creds, TABLE_TEST2);

    // export/import
    File dir = tempFolder.newFolder("test");
    File destDir = tempFolder.newFolder("test_dest");
    client.offlineTable(creds, TABLE_TEST, false);
    client.exportTable(creds, TABLE_TEST, dir.getAbsolutePath());
    // copy files to a new location
    FileSystem fs = FileSystem.get(new Configuration());
    FSDataInputStream is = fs.open(new Path(dir + "/distcp.txt"));
    BufferedReader r = new BufferedReader(new InputStreamReader(is));
    while (true) {
        String line = r.readLine();
        if (line == null)
            break;
        Path srcPath = new Path(line);
        FileUtils.copyFile(new File(srcPath.toUri().getPath()), new File(destDir, srcPath.getName()));
    }
    client.deleteTable(creds, TABLE_TEST);
    client.importTable(creds, "testify", destDir.getAbsolutePath());
    assertScan(expected, "testify");
    client.deleteTable(creds, "testify");

    try {
        // ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
        client.importTable(creds, "testify2", destDir.getAbsolutePath());
        fail();
    } catch (Exception e) {
    }

    assertFalse(client.listTables(creds).contains("testify2"));

    // Locality groups
    client.createTable(creds, "test", true, TimeType.MILLIS);
    Map<String, Set<String>> groups = new HashMap<String, Set<String>>();
    groups.put("group1", Collections.singleton("cf1"));
    groups.put("group2", Collections.singleton("cf2"));
    client.setLocalityGroups(creds, "test", groups);
    assertEquals(groups, client.getLocalityGroups(creds, "test"));
    // table properties
    Map<String, String> orig = client.getTableProperties(creds, "test");
    client.setTableProperty(creds, "test", "table.split.threshold", "500M");
    Map<String, String> update = client.getTableProperties(creds, "test");
    assertEquals(update.get("table.split.threshold"), "500M");
    client.removeTableProperty(creds, "test", "table.split.threshold");
    update = client.getTableProperties(creds, "test");
    assertEquals(orig, update);
    // rename table
    Map<String, String> tables = client.tableIdMap(creds);
    client.renameTable(creds, "test", "bar");
    Map<String, String> tables2 = client.tableIdMap(creds);
    assertEquals(tables.get("test"), tables2.get("bar"));
    // table exists
    assertTrue(client.tableExists(creds, "bar"));
    assertFalse(client.tableExists(creds, "test"));
    // bulk import
    String filename = dir + "/bulk/import/rfile.rf";
    FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(),
            DefaultConfiguration.getInstance());
    writer.startDefaultLocalityGroup();
    writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")),
            new Value("value".getBytes()));
    writer.close();
    fs.mkdirs(new Path(dir + "/bulk/fail"));
    client.importDirectory(creds, "bar", dir + "/bulk/import", dir + "/bulk/fail", true);
    String scanner = client.createScanner(creds, "bar", null);
    ScanResult more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
    assertEquals(1, more.results.size());
    ByteBuffer maxRow = client.getMaxRow(creds, "bar", null, null, false, null, false);
    assertEquals(s2bb("a"), maxRow);

    assertFalse(client.testTableClassLoad(creds, "bar", "abc123", SortedKeyValueIterator.class.getName()));
    assertTrue(client.testTableClassLoad(creds, "bar", VersioningIterator.class.getName(),
            SortedKeyValueIterator.class.getName()));
}