Example usage for org.apache.hadoop.fs FileSystem getConf

List of usage examples for org.apache.hadoop.fs FileSystem getConf

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getConf.

Prototype

@Override
    public Configuration getConf() 

Source Link

Usage

From source file:org.apache.accumulo.proxy.SimpleProxyIT.java

License:Apache License

@Test
public void testTableOperations() throws Exception {
    final String TABLE_TEST = makeTableName();

    client.createTable(creds, TABLE_TEST, true, TimeType.MILLIS);
    // constraints
    client.addConstraint(creds, TABLE_TEST, NumericValueConstraint.class.getName());
    assertEquals(2, client.listConstraints(creds, TABLE_TEST).size());

    UtilWaitThread.sleep(2000);//w ww. j a  v  a  2  s .  co  m

    client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "123"));

    try {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "x"));
        fail("constraint did not fire");
    } catch (MutationsRejectedException ex) {
    }

    client.removeConstraint(creds, TABLE_TEST, 2);

    UtilWaitThread.sleep(2000);

    assertEquals(1, client.listConstraints(creds, TABLE_TEST).size());

    client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "x"));
    assertScan(new String[][] { { "row1", "cf", "cq", "x" } }, TABLE_TEST);
    // splits, merge
    client.addSplits(creds, TABLE_TEST,
            new HashSet<ByteBuffer>(Arrays.asList(s2bb("a"), s2bb("m"), s2bb("z"))));
    List<ByteBuffer> splits = client.listSplits(creds, TABLE_TEST, 1);
    assertEquals(Arrays.asList(s2bb("m")), splits);
    client.mergeTablets(creds, TABLE_TEST, null, s2bb("m"));
    splits = client.listSplits(creds, TABLE_TEST, 10);
    assertEquals(Arrays.asList(s2bb("m"), s2bb("z")), splits);
    client.mergeTablets(creds, TABLE_TEST, null, null);
    splits = client.listSplits(creds, TABLE_TEST, 10);
    List<ByteBuffer> empty = Collections.emptyList();
    assertEquals(empty, splits);
    // iterators
    client.deleteTable(creds, TABLE_TEST);
    client.createTable(creds, TABLE_TEST, true, TimeType.MILLIS);
    HashMap<String, String> options = new HashMap<String, String>();
    options.put("type", "STRING");
    options.put("columns", "cf");
    IteratorSetting setting = new IteratorSetting(10, TABLE_TEST, SummingCombiner.class.getName(), options);
    client.attachIterator(creds, TABLE_TEST, setting, EnumSet.allOf(IteratorScope.class));
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "1"));
    }
    assertScan(new String[][] { { "row1", "cf", "cq", "10" } }, TABLE_TEST);
    try {
        client.checkIteratorConflicts(creds, TABLE_TEST, setting, EnumSet.allOf(IteratorScope.class));
        fail("checkIteratorConflicts did not throw an exception");
    } catch (Exception ex) {
    }
    client.deleteRows(creds, TABLE_TEST, null, null);
    client.removeIterator(creds, TABLE_TEST, "test", EnumSet.allOf(IteratorScope.class));
    String expected[][] = new String[10][];
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row" + i, "cf", "cq", "" + i));
        expected[i] = new String[] { "row" + i, "cf", "cq", "" + i };
        client.flushTable(creds, TABLE_TEST, null, null, true);
    }
    assertScan(expected, TABLE_TEST);
    // clone
    final String TABLE_TEST2 = makeTableName();
    client.cloneTable(creds, TABLE_TEST, TABLE_TEST2, true, null, null);
    assertScan(expected, TABLE_TEST2);
    client.deleteTable(creds, TABLE_TEST2);

    // don't know how to test this, call it just for fun
    client.clearLocatorCache(creds, TABLE_TEST);

    // compact
    client.compactTable(creds, TABLE_TEST, null, null, null, true, true);
    assertEquals(1, countFiles(TABLE_TEST));
    assertScan(expected, TABLE_TEST);

    // get disk usage
    client.cloneTable(creds, TABLE_TEST, TABLE_TEST2, true, null, null);
    Set<String> tablesToScan = new HashSet<String>();
    tablesToScan.add(TABLE_TEST);
    tablesToScan.add(TABLE_TEST2);
    tablesToScan.add("foo");
    client.createTable(creds, "foo", true, TimeType.MILLIS);
    List<DiskUsage> diskUsage = (client.getDiskUsage(creds, tablesToScan));
    assertEquals(2, diskUsage.size());
    assertEquals(1, diskUsage.get(0).getTables().size());
    assertEquals(2, diskUsage.get(1).getTables().size());
    client.compactTable(creds, TABLE_TEST2, null, null, null, true, true);
    diskUsage = (client.getDiskUsage(creds, tablesToScan));
    assertEquals(3, diskUsage.size());
    assertEquals(1, diskUsage.get(0).getTables().size());
    assertEquals(1, diskUsage.get(1).getTables().size());
    assertEquals(1, diskUsage.get(2).getTables().size());
    client.deleteTable(creds, "foo");
    client.deleteTable(creds, TABLE_TEST2);

    // export/import
    File dir = tempFolder.newFolder("test");
    File destDir = tempFolder.newFolder("test_dest");
    client.offlineTable(creds, TABLE_TEST, false);
    client.exportTable(creds, TABLE_TEST, dir.getAbsolutePath());
    // copy files to a new location
    FileSystem fs = FileSystem.get(new Configuration());
    FSDataInputStream is = fs.open(new Path(dir + "/distcp.txt"));
    BufferedReader r = new BufferedReader(new InputStreamReader(is));
    while (true) {
        String line = r.readLine();
        if (line == null)
            break;
        Path srcPath = new Path(line);
        FileUtils.copyFile(new File(srcPath.toUri().getPath()), new File(destDir, srcPath.getName()));
    }
    client.deleteTable(creds, TABLE_TEST);
    client.importTable(creds, "testify", destDir.getAbsolutePath());
    assertScan(expected, "testify");
    client.deleteTable(creds, "testify");

    try {
        // ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
        client.importTable(creds, "testify2", destDir.getAbsolutePath());
        fail();
    } catch (Exception e) {
    }

    assertFalse(client.listTables(creds).contains("testify2"));

    // Locality groups
    client.createTable(creds, "test", true, TimeType.MILLIS);
    Map<String, Set<String>> groups = new HashMap<String, Set<String>>();
    groups.put("group1", Collections.singleton("cf1"));
    groups.put("group2", Collections.singleton("cf2"));
    client.setLocalityGroups(creds, "test", groups);
    assertEquals(groups, client.getLocalityGroups(creds, "test"));
    // table properties
    Map<String, String> orig = client.getTableProperties(creds, "test");
    client.setTableProperty(creds, "test", "table.split.threshold", "500M");
    Map<String, String> update = client.getTableProperties(creds, "test");
    assertEquals(update.get("table.split.threshold"), "500M");
    client.removeTableProperty(creds, "test", "table.split.threshold");
    update = client.getTableProperties(creds, "test");
    assertEquals(orig, update);
    // rename table
    Map<String, String> tables = client.tableIdMap(creds);
    client.renameTable(creds, "test", "bar");
    Map<String, String> tables2 = client.tableIdMap(creds);
    assertEquals(tables.get("test"), tables2.get("bar"));
    // table exists
    assertTrue(client.tableExists(creds, "bar"));
    assertFalse(client.tableExists(creds, "test"));
    // bulk import
    String filename = dir + "/bulk/import/rfile.rf";
    FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(),
            DefaultConfiguration.getInstance());
    writer.startDefaultLocalityGroup();
    writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")),
            new Value("value".getBytes()));
    writer.close();
    fs.mkdirs(new Path(dir + "/bulk/fail"));
    client.importDirectory(creds, "bar", dir + "/bulk/import", dir + "/bulk/fail", true);
    String scanner = client.createScanner(creds, "bar", null);
    ScanResult more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
    assertEquals(1, more.results.size());
    ByteBuffer maxRow = client.getMaxRow(creds, "bar", null, null, false, null, false);
    assertEquals(s2bb("a"), maxRow);

    assertFalse(client.testTableClassLoad(creds, "bar", "abc123", SortedKeyValueIterator.class.getName()));
    assertTrue(client.testTableClassLoad(creds, "bar", VersioningIterator.class.getName(),
            SortedKeyValueIterator.class.getName()));
}

From source file:org.apache.accumulo.proxy.SimpleTest.java

License:Apache License

@Test
public void testTableOperations() throws Exception {
    final String TABLE_TEST = makeTableName();

    client.createTable(creds, TABLE_TEST, true, TimeType.MILLIS);
    // constraints
    client.addConstraint(creds, TABLE_TEST, NumericValueConstraint.class.getName());
    assertEquals(2, client.listConstraints(creds, TABLE_TEST).size());
    client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "123"));

    try {/*from   w  ww .  j  a  v a2  s  .c  o  m*/
        client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "x"));
        fail("constraint did not fire");
    } catch (MutationsRejectedException ex) {
    }

    client.removeConstraint(creds, TABLE_TEST, 2);
    assertEquals(1, client.listConstraints(creds, TABLE_TEST).size());
    client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "x"));
    String scanner = client.createScanner(creds, TABLE_TEST, null);
    ScanResult more = client.nextK(scanner, 2);
    client.closeScanner(scanner);
    assertFalse(more.isMore());
    assertEquals(1, more.getResults().size());
    assertEquals(s2bb("x"), more.getResults().get(0).value);
    // splits, merge
    client.addSplits(creds, TABLE_TEST,
            new HashSet<ByteBuffer>(Arrays.asList(s2bb("a"), s2bb("m"), s2bb("z"))));
    List<ByteBuffer> splits = client.listSplits(creds, TABLE_TEST, 1);
    assertEquals(Arrays.asList(s2bb("m")), splits);
    client.mergeTablets(creds, TABLE_TEST, null, s2bb("m"));
    splits = client.listSplits(creds, TABLE_TEST, 10);
    assertEquals(Arrays.asList(s2bb("m"), s2bb("z")), splits);
    client.mergeTablets(creds, TABLE_TEST, null, null);
    splits = client.listSplits(creds, TABLE_TEST, 10);
    List<ByteBuffer> empty = Collections.emptyList();
    assertEquals(empty, splits);
    // iterators
    client.deleteTable(creds, TABLE_TEST);
    client.createTable(creds, TABLE_TEST, true, TimeType.MILLIS);
    HashMap<String, String> options = new HashMap<String, String>();
    options.put("type", "STRING");
    options.put("columns", "cf");
    IteratorSetting setting = new IteratorSetting(10, TABLE_TEST, SummingCombiner.class.getName(), options);
    client.attachIterator(creds, TABLE_TEST, setting, EnumSet.allOf(IteratorScope.class));
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row1", "cf", "cq", "1"));
    }
    scanner = client.createScanner(creds, TABLE_TEST, null);
    more = client.nextK(scanner, 2);
    client.closeScanner(scanner);
    assertEquals("10", new String(more.getResults().get(0).getValue()));
    try {
        client.checkIteratorConflicts(creds, TABLE_TEST, setting, EnumSet.allOf(IteratorScope.class));
        fail("checkIteratorConflicts did not throw an exception");
    } catch (Exception ex) {
    }
    client.deleteRows(creds, TABLE_TEST, null, null);
    client.removeIterator(creds, TABLE_TEST, "test", EnumSet.allOf(IteratorScope.class));
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, TABLE_TEST, mutation("row" + i, "cf", "cq", "" + i));
        client.flushTable(creds, TABLE_TEST, null, null, true);
    }
    scanner = client.createScanner(creds, TABLE_TEST, null);
    more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
    assertEquals(10, more.getResults().size());
    // clone
    final String TABLE_TEST2 = makeTableName();
    client.cloneTable(creds, TABLE_TEST, TABLE_TEST2, true, null, null);
    scanner = client.createScanner(creds, TABLE_TEST2, null);
    more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
    assertEquals(10, more.getResults().size());
    client.deleteTable(creds, TABLE_TEST2);

    // don't know how to test this, call it just for fun
    client.clearLocatorCache(creds, TABLE_TEST);

    // compact
    client.compactTable(creds, TABLE_TEST, null, null, null, true, true);
    assertEquals(1, countFiles(TABLE_TEST));

    // get disk usage
    client.cloneTable(creds, TABLE_TEST, TABLE_TEST2, true, null, null);
    Set<String> tablesToScan = new HashSet<String>();
    tablesToScan.add(TABLE_TEST);
    tablesToScan.add(TABLE_TEST2);
    tablesToScan.add("foo");
    client.createTable(creds, "foo", true, TimeType.MILLIS);
    List<DiskUsage> diskUsage = (client.getDiskUsage(creds, tablesToScan));
    assertEquals(2, diskUsage.size());
    assertEquals(1, diskUsage.get(0).getTables().size());
    assertEquals(2, diskUsage.get(1).getTables().size());
    client.compactTable(creds, TABLE_TEST2, null, null, null, true, true);
    diskUsage = (client.getDiskUsage(creds, tablesToScan));
    assertEquals(3, diskUsage.size());
    assertEquals(1, diskUsage.get(0).getTables().size());
    assertEquals(1, diskUsage.get(1).getTables().size());
    assertEquals(1, diskUsage.get(2).getTables().size());
    client.deleteTable(creds, "foo");
    client.deleteTable(creds, TABLE_TEST2);

    // export/import
    String dir = folder.getRoot() + "/test";
    String destDir = folder.getRoot() + "/test_dest";
    client.offlineTable(creds, TABLE_TEST);
    client.exportTable(creds, TABLE_TEST, dir);
    // copy files to a new location
    FileSystem fs = FileSystem.get(new Configuration());
    FSDataInputStream is = fs.open(new Path(dir + "/distcp.txt"));
    BufferedReader r = new BufferedReader(new InputStreamReader(is));
    while (true) {
        String line = r.readLine();
        if (line == null)
            break;
        Path srcPath = new Path(line);
        FileUtils.copyFile(new File(srcPath.toUri().getPath()), new File(destDir, srcPath.getName()));
    }
    client.deleteTable(creds, TABLE_TEST);
    client.importTable(creds, "testify", destDir);
    scanner = client.createScanner(creds, "testify", null);
    more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
    assertEquals(10, more.results.size());

    try {
        // ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
        client.importTable(creds, "testify2", destDir);
        fail();
    } catch (Exception e) {
    }

    assertFalse(client.listTables(creds).contains("testify2"));

    // Locality groups
    client.createTable(creds, "test", true, TimeType.MILLIS);
    Map<String, Set<String>> groups = new HashMap<String, Set<String>>();
    groups.put("group1", Collections.singleton("cf1"));
    groups.put("group2", Collections.singleton("cf2"));
    client.setLocalityGroups(creds, "test", groups);
    assertEquals(groups, client.getLocalityGroups(creds, "test"));
    // table properties
    Map<String, String> orig = client.getTableProperties(creds, "test");
    client.setTableProperty(creds, "test", "table.split.threshold", "500M");
    Map<String, String> update = client.getTableProperties(creds, "test");
    for (int i = 0; i < 5; i++) {
        if (update.get("table.split.threshold").equals("500M"))
            break;
        UtilWaitThread.sleep(200);
    }
    assertEquals(update.get("table.split.threshold"), "500M");
    client.removeTableProperty(creds, "test", "table.split.threshold");
    update = client.getTableProperties(creds, "test");
    assertEquals(orig, update);
    // rename table
    Map<String, String> tables = client.tableIdMap(creds);
    client.renameTable(creds, "test", "bar");
    Map<String, String> tables2 = client.tableIdMap(creds);
    assertEquals(tables.get("test"), tables2.get("bar"));
    // table exists
    assertTrue(client.tableExists(creds, "bar"));
    assertFalse(client.tableExists(creds, "test"));
    // bulk import
    String filename = dir + "/bulk/import/rfile.rf";
    FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(),
            DefaultConfiguration.getInstance());
    writer.startDefaultLocalityGroup();
    writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")),
            new Value("value".getBytes()));
    writer.close();
    fs.mkdirs(new Path(dir + "/bulk/fail"));
    client.importDirectory(creds, "bar", dir + "/bulk/import", dir + "/bulk/fail", true);
    scanner = client.createScanner(creds, "bar", null);
    more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
    assertEquals(1, more.results.size());
    ByteBuffer maxRow = client.getMaxRow(creds, "bar", null, null, false, null, false);
    assertEquals(s2bb("a"), maxRow);

    assertFalse(client.testTableClassLoad(creds, "bar", "abc123", SortedKeyValueIterator.class.getName()));
    assertTrue(client.testTableClassLoad(creds, "bar", VersioningIterator.class.getName(),
            SortedKeyValueIterator.class.getName()));
}

From source file:org.apache.accumulo.server.client.BulkImporter.java

License:Apache License

public static List<TabletLocation> findOverlappingTablets(ClientContext context, VolumeManager vm,
        TabletLocator locator, Path file, Text startRow, Text endRow) throws Exception {
    List<TabletLocation> result = new ArrayList<>();
    Collection<ByteSequence> columnFamilies = Collections.emptyList();
    String filename = file.toString();
    // log.debug(filename + " finding overlapping tablets " + startRow + " -> " + endRow);
    FileSystem fs = vm.getVolumeByPath(file).getFileSystem();
    FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(filename, fs, fs.getConf())
            .withTableConfiguration(context.getConfiguration()).seekToBeginning().build();
    try {/*from  www  . j  a  v a 2s.  c  o  m*/
        Text row = startRow;
        if (row == null)
            row = new Text();
        while (true) {
            // log.debug(filename + " Seeking to row " + row);
            reader.seek(new Range(row, null), columnFamilies, false);
            if (!reader.hasTop()) {
                // log.debug(filename + " not found");
                break;
            }
            row = reader.getTopKey().getRow();
            TabletLocation tabletLocation = locator.locateTablet(context, row, false, true);
            // log.debug(filename + " found row " + row + " at location " + tabletLocation);
            result.add(tabletLocation);
            row = tabletLocation.tablet_extent.getEndRow();
            if (row != null && (endRow == null || row.compareTo(endRow) < 0)) {
                row = new Text(row);
                row.append(byte0, 0, byte0.length);
            } else
                break;
        }
    } finally {
        reader.close();
    }
    // log.debug(filename + " to be sent to " + result);
    return result;
}

From source file:org.apache.accumulo.server.client.BulkImporterTest.java

License:Apache License

@Test
public void testFindOverlappingTablets() throws Exception {
    MockTabletLocator locator = new MockTabletLocator();
    FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance());
    ClientContext context = EasyMock.createMock(ClientContext.class);
    EasyMock.expect(context.getConfiguration()).andReturn(DefaultConfiguration.getInstance()).anyTimes();
    EasyMock.replay(context);/*from  w ww  .j a v  a 2 s  . c  o m*/
    String file = "target/testFile.rf";
    fs.delete(new Path(file), true);
    FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder().forFile(file, fs, fs.getConf())
            .withTableConfiguration(context.getConfiguration()).build();
    writer.startDefaultLocalityGroup();
    Value empty = new Value(new byte[] {});
    writer.append(new Key("a", "cf", "cq"), empty);
    writer.append(new Key("a", "cf", "cq1"), empty);
    writer.append(new Key("a", "cf", "cq2"), empty);
    writer.append(new Key("a", "cf", "cq3"), empty);
    writer.append(new Key("a", "cf", "cq4"), empty);
    writer.append(new Key("a", "cf", "cq5"), empty);
    writer.append(new Key("d", "cf", "cq"), empty);
    writer.append(new Key("d", "cf", "cq1"), empty);
    writer.append(new Key("d", "cf", "cq2"), empty);
    writer.append(new Key("d", "cf", "cq3"), empty);
    writer.append(new Key("d", "cf", "cq4"), empty);
    writer.append(new Key("d", "cf", "cq5"), empty);
    writer.append(new Key("dd", "cf", "cq1"), empty);
    writer.append(new Key("ichabod", "cf", "cq"), empty);
    writer.append(new Key("icky", "cf", "cq1"), empty);
    writer.append(new Key("iffy", "cf", "cq2"), empty);
    writer.append(new Key("internal", "cf", "cq3"), empty);
    writer.append(new Key("is", "cf", "cq4"), empty);
    writer.append(new Key("iterator", "cf", "cq5"), empty);
    writer.append(new Key("xyzzy", "cf", "cq"), empty);
    writer.close();
    VolumeManager vm = VolumeManagerImpl.get(context.getConfiguration());
    List<TabletLocation> overlaps = BulkImporter.findOverlappingTablets(context, vm, locator, new Path(file));
    Assert.assertEquals(5, overlaps.size());
    Collections.sort(overlaps);
    Assert.assertEquals(new KeyExtent(tableId, new Text("a"), null), overlaps.get(0).tablet_extent);
    Assert.assertEquals(new KeyExtent(tableId, new Text("d"), new Text("cm")), overlaps.get(1).tablet_extent);
    Assert.assertEquals(new KeyExtent(tableId, new Text("dm"), new Text("d")), overlaps.get(2).tablet_extent);
    Assert.assertEquals(new KeyExtent(tableId, new Text("j"), new Text("i")), overlaps.get(3).tablet_extent);
    Assert.assertEquals(new KeyExtent(tableId, null, new Text("l")), overlaps.get(4).tablet_extent);

    List<TabletLocation> overlaps2 = BulkImporter.findOverlappingTablets(context, vm, locator, new Path(file),
            new KeyExtent(tableId, new Text("h"), new Text("b")));
    Assert.assertEquals(3, overlaps2.size());
    Assert.assertEquals(new KeyExtent(tableId, new Text("d"), new Text("cm")), overlaps2.get(0).tablet_extent);
    Assert.assertEquals(new KeyExtent(tableId, new Text("dm"), new Text("d")), overlaps2.get(1).tablet_extent);
    Assert.assertEquals(new KeyExtent(tableId, new Text("j"), new Text("i")), overlaps2.get(2).tablet_extent);
    Assert.assertEquals(locator.invalidated, 1);
}

From source file:org.apache.accumulo.server.fs.VolumeManagerImpl.java

License:Apache License

@Override
public FSDataOutputStream create(Path path, boolean overwrite, int bufferSize, short replication,
        long blockSize) throws IOException {
    requireNonNull(path);//  ww w. j a va 2 s  .  co m

    Volume v = getVolumeByPath(path);
    FileSystem fs = v.getFileSystem();
    blockSize = correctBlockSize(fs.getConf(), blockSize);
    bufferSize = correctBufferSize(fs.getConf(), bufferSize);
    return fs.create(path, overwrite, bufferSize, replication, blockSize);
}

From source file:org.apache.accumulo.server.fs.VolumeManagerImpl.java

License:Apache License

@Override
public FSDataOutputStream createSyncable(Path logPath, int bufferSize, short replication, long blockSize)
        throws IOException {
    Volume v = getVolumeByPath(logPath);
    FileSystem fs = v.getFileSystem();
    blockSize = correctBlockSize(fs.getConf(), blockSize);
    bufferSize = correctBufferSize(fs.getConf(), bufferSize);
    EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
    log.debug("creating " + logPath + " with CreateFlag set: " + set);
    try {/*from   w  ww.j a v a  2s  .  co  m*/
        return fs.create(logPath, FsPermission.getDefault(), set, bufferSize, replication, blockSize, null);
    } catch (Exception ex) {
        log.debug("Exception", ex);
        return fs.create(logPath, true, bufferSize, replication, blockSize);
    }
}

From source file:org.apache.accumulo.server.fs.VolumeManagerImpl.java

License:Apache License

protected void ensureSyncIsEnabled() {
    for (Entry<String, Volume> entry : getFileSystems().entrySet()) {
        FileSystem fs = entry.getValue().getFileSystem();

        if (fs instanceof DistributedFileSystem) {
            // Avoid use of DFSConfigKeys since it's private
            final String DFS_SUPPORT_APPEND = "dfs.support.append",
                    DFS_DATANODE_SYNCONCLOSE = "dfs.datanode.synconclose";
            final String ticketMessage = "See ACCUMULO-623 and ACCUMULO-1637 for more details.";

            // If either of these parameters are configured to be false, fail.
            // This is a sign that someone is writing bad configuration.
            if (!fs.getConf().getBoolean(DFS_SUPPORT_APPEND, true)) {
                String msg = "Accumulo requires that " + DFS_SUPPORT_APPEND + " not be configured as false. "
                        + ticketMessage;
                // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
                log.error("FATAL {}", msg);
                throw new RuntimeException(msg);
            }//www  . j a  v  a 2s  .c o  m

            // Warn if synconclose isn't set
            if (!fs.getConf().getBoolean(DFS_DATANODE_SYNCONCLOSE, false)) {
                // Only warn once per process per volume URI
                synchronized (WARNED_ABOUT_SYNCONCLOSE) {
                    if (!WARNED_ABOUT_SYNCONCLOSE.contains(entry.getKey())) {
                        WARNED_ABOUT_SYNCONCLOSE.add(entry.getKey());
                        log.warn(DFS_DATANODE_SYNCONCLOSE
                                + " set to false in hdfs-site.xml: data loss is possible on hard system reset or power loss");
                    }
                }
            }
        }
    }
}

From source file:org.apache.accumulo.server.fs.VolumeManagerImpl.java

License:Apache License

@Override
public boolean moveToTrash(Path path) throws IOException {
    FileSystem fs = getVolumeByPath(path).getFileSystem();
    Trash trash = new Trash(fs, fs.getConf());
    return trash.moveToTrash(path);
}

From source file:org.apache.accumulo.server.init.Initialize.java

License:Apache License

private static void createMetadataFile(VolumeManager volmanager, String fileName, Tablet... tablets)
        throws IOException {
    // sort file contents in memory, then play back to the file
    TreeMap<Key, Value> sorted = new TreeMap<>();
    for (Tablet tablet : tablets) {
        createEntriesForTablet(sorted, tablet);
    }//from  ww w.j a va 2 s  . c om
    FileSystem fs = volmanager.getVolumeByPath(new Path(fileName)).getFileSystem();
    FileSKVWriter tabletWriter = FileOperations.getInstance().newWriterBuilder()
            .forFile(fileName, fs, fs.getConf())
            .withTableConfiguration(AccumuloConfiguration.getDefaultConfiguration()).build();
    tabletWriter.startDefaultLocalityGroup();

    for (Entry<Key, Value> entry : sorted.entrySet()) {
        tabletWriter.append(entry.getKey(), entry.getValue());
    }

    tabletWriter.close();
}

From source file:org.apache.accumulo.server.logger.LogWriter.java

License:Apache License

/**
 * //from w w  w. j  ava  2s.c o m
 * @param fs
 *          The HDFS instance shared by master/tservers.
 * @param logDirectory
 *          The local directory to write the recovery logs.
 * @param instanceId
 *          The accumulo instance for which we are logging.
 */
public LogWriter(AccumuloConfiguration acuConf, FileSystem fs, Collection<String> logDirectories,
        String instanceId, int threadPoolSize, boolean archive) {
    this.acuConf = acuConf;
    this.fs = fs;
    this.roots = new ArrayList<String>(logDirectories);
    this.instanceId = instanceId;
    this.copyThreadPool = Executors.newFixedThreadPool(threadPoolSize);
    try {
        this.logArchiver = new LogArchiver(acuConf, TraceFileSystem.wrap(FileSystem.getLocal(fs.getConf())), fs,
                new ArrayList<String>(logDirectories), archive);
    } catch (IOException e1) {
        throw new RuntimeException(e1);
    }

    // Register the metrics MBean
    try {
        metrics.register();
    } catch (Exception e) {
        log.error("Exception registering MBean with MBean Server", e);
    }
}