Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:org.apache.accumulo.server.util.MetadataTableUtil.java

License:Apache License

/**
 * During an upgrade from 1.6 to 1.7, we need to add the replication table
 *///from   w w  w  . j  a v  a 2  s  . c o  m
public static void createReplicationTable(ClientContext context) throws IOException {
    String dir = VolumeManagerImpl.get().choose(Optional.of(ReplicationTable.ID), ServerConstants.getBaseUris())
            + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID
            + Constants.DEFAULT_TABLET_LOCATION;

    Mutation m = new Mutation(new Text(KeyExtent.getMetadataEntry(ReplicationTable.ID, null)));
    m.put(DIRECTORY_COLUMN.getColumnFamily(), DIRECTORY_COLUMN.getColumnQualifier(), 0,
            new Value(dir.getBytes(UTF_8)));
    m.put(TIME_COLUMN.getColumnFamily(), TIME_COLUMN.getColumnQualifier(), 0,
            new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
    m.put(PREV_ROW_COLUMN.getColumnFamily(), PREV_ROW_COLUMN.getColumnQualifier(), 0,
            KeyExtent.encodePrevEndRow(null));
    update(getMetadataTable(context), null, m);
}

From source file:org.apache.accumulo.server.util.RandomizeVolumes.java

License:Apache License

public static int randomize(Connector c, String tableName)
        throws IOException, AccumuloSecurityException, AccumuloException, TableNotFoundException {
    final VolumeManager vm = VolumeManagerImpl.get();
    if (vm.getVolumes().size() < 2) {
        log.error("There are not enough volumes configured");
        return 1;
    }/* w ww  .  j a  va 2 s .  co m*/
    String tableId = c.tableOperations().tableIdMap().get(tableName);
    if (null == tableId) {
        log.error("Could not determine the table ID for table " + tableName);
        return 2;
    }
    TableState tableState = TableManager.getInstance().getTableState(tableId);
    if (TableState.OFFLINE != tableState) {
        log.info("Taking " + tableName + " offline");
        c.tableOperations().offline(tableName, true);
        log.info(tableName + " offline");
    }
    SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker");
    log.info("Rewriting entries for " + tableName);
    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    DIRECTORY_COLUMN.fetch(scanner);
    scanner.setRange(TabletsSection.getRange(tableId));
    BatchWriter writer = c.createBatchWriter(MetadataTable.NAME, null);
    int count = 0;
    for (Entry<Key, Value> entry : scanner) {
        String oldLocation = entry.getValue().toString();
        String directory;
        if (oldLocation.contains(":")) {
            String[] parts = oldLocation.split(Path.SEPARATOR);
            String tableIdEntry = parts[parts.length - 2];
            if (!tableIdEntry.equals(tableId)) {
                log.error(
                        "Unexpected table id found: " + tableIdEntry + ", expected " + tableId + "; skipping");
                continue;
            }
            directory = parts[parts.length - 1];
        } else {
            directory = oldLocation.substring(Path.SEPARATOR.length());
        }
        Key key = entry.getKey();
        Mutation m = new Mutation(key.getRow());

        final String newLocation = vm.choose(Optional.of(tableId), ServerConstants.getBaseUris())
                + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR
                + directory;
        m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8)));
        if (log.isTraceEnabled()) {
            log.trace("Replacing " + oldLocation + " with " + newLocation);
        }
        writer.addMutation(m);
        pool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    vm.mkdirs(new Path(newLocation));
                } catch (IOException ex) {
                    // nevermind
                }
            }
        });
        count++;
    }
    writer.close();
    pool.shutdown();
    while (!pool.isTerminated()) {
        log.trace("Waiting for mkdir() calls to finish");
        try {
            pool.awaitTermination(5, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            break;
        }
    }
    log.info("Updated " + count + " entries for table " + tableName);
    if (TableState.OFFLINE != tableState) {
        c.tableOperations().online(tableName, true);
        log.info("table " + tableName + " back online");
    }
    return 0;
}

From source file:org.apache.accumulo.test.replication.UnusedWalDoesntCloseReplicationStatusIT.java

License:Apache License

@Test
public void test() throws Exception {
    File accumuloDir = this.getCluster().getConfig().getAccumuloDir();
    final Connector conn = getConnector();
    final String tableName = getUniqueNames(1)[0];

    conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
    conn.tableOperations().create(tableName);

    final String tableId = conn.tableOperations().tableIdMap().get(tableName);
    final int numericTableId = Integer.parseInt(tableId);
    final int fakeTableId = numericTableId + 1;

    Assert.assertNotNull("Did not find table ID", tableId);

    conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    // just sleep
    conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
            ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));

    FileSystem fs = FileSystem.getLocal(new Configuration());
    File tserverWalDir = new File(accumuloDir, ServerConstants.WAL_DIR + Path.SEPARATOR + "faketserver+port");
    File tserverWal = new File(tserverWalDir, UUID.randomUUID().toString());
    fs.mkdirs(new Path(tserverWalDir.getAbsolutePath()));

    // Make a fake WAL with no data in it for our real table
    FSDataOutputStream out = fs.create(new Path(tserverWal.getAbsolutePath()));

    out.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8));

    DataOutputStream dos = new DataOutputStream(out);
    dos.writeUTF("NullCryptoModule");

    // Fake a single update WAL that has a mutation for another table
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();

    key.event = OPEN;//from  w  w  w .  j  a  v a 2s  .  c om
    key.tserverSession = tserverWal.getAbsolutePath();
    key.filename = tserverWal.getAbsolutePath();
    key.write(out);
    value.write(out);

    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(Integer.toString(fakeTableId), null, null);
    key.seq = 1l;
    key.tid = 1;

    key.write(dos);
    value.write(dos);

    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.filename = tserverWal.getAbsolutePath();
    value.mutations = Arrays.<Mutation>asList(new ServerMutation(new Text("row")));

    key.write(dos);
    value.write(dos);

    key.event = LogEvents.COMPACTION_START;
    key.filename = accumuloDir.getAbsolutePath() + "/tables/" + fakeTableId + "/t-000001/A000001.rf";
    value.mutations = Collections.emptyList();

    key.write(dos);
    value.write(dos);

    key.event = LogEvents.COMPACTION_FINISH;
    value.mutations = Collections.emptyList();

    key.write(dos);
    value.write(dos);

    dos.close();

    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("m");
    m.put("m", "m", "M");
    bw.addMutation(m);
    bw.close();

    log.info("State of metadata table after inserting a record");

    Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
    for (Entry<Key, Value> entry : s) {
        System.out.println(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
    }

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.ReplicationSection.getRange());
    for (Entry<Key, Value> entry : s) {
        System.out.println(entry.getKey().toStringNoTruncate() + " "
                + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
    }

    log.info("Offline'ing table");

    conn.tableOperations().offline(tableName, true);

    // Add our fake WAL to the log column for this table
    String walUri = tserverWal.toURI().toString();
    KeyExtent extent = new KeyExtent(tableId, null, null);
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(extent.getMetadataEntry());
    m.put(MetadataSchema.TabletsSection.LogColumnFamily.NAME, new Text("localhost:12345/" + walUri),
            new Value((walUri + "|1").getBytes(UTF_8)));
    bw.addMutation(m);

    // Add a replication entry for our fake WAL
    m = new Mutation(MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri).toString());
    m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId),
            new Value(StatusUtil.fileCreated(System.currentTimeMillis()).toByteArray()));
    bw.addMutation(m);
    bw.close();

    log.info("State of metadata after injecting WAL manually");

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
    for (Entry<Key, Value> entry : s) {
        log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
    }

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.ReplicationSection.getRange());
    for (Entry<Key, Value> entry : s) {
        log.info(entry.getKey().toStringNoTruncate() + " "
                + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
    }

    log.info("Bringing table online");
    conn.tableOperations().online(tableName, true);

    Assert.assertEquals(1, Iterables.size(conn.createScanner(tableName, Authorizations.EMPTY)));

    log.info("Table has performed recovery, state of metadata:");

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
    for (Entry<Key, Value> entry : s) {
        log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
    }

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.ReplicationSection.getRange());
    for (Entry<Key, Value> entry : s) {
        Status status = Status.parseFrom(entry.getValue().get());
        log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(status));
        Assert.assertFalse("Status record was closed and it should not be", status.getClosed());
    }
}

From source file:org.apache.accumulo.test.RewriteTabletDirectoriesIT.java

License:Apache License

@Test
public void test() throws Exception {
    Connector c = getConnector();/*w w w . java 2s .  c o  m*/
    c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);

    // Write some data to a table and add some splits
    BatchWriter bw = c.createBatchWriter(tableName, null);
    final SortedSet<Text> splits = new TreeSet<>();
    for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
        splits.add(new Text(split));
        Mutation m = new Mutation(new Text(split));
        m.put(new byte[] {}, new byte[] {}, new byte[] {});
        bw.addMutation(m);
    }
    bw.close();
    c.tableOperations().addSplits(tableName, splits);

    BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
    DIRECTORY_COLUMN.fetch(scanner);
    String tableId = c.tableOperations().tableIdMap().get(tableName);
    assertNotNull("TableID for " + tableName + " was null", tableId);
    scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
    // verify the directory entries are all on v1, make a few entries relative
    bw = c.createBatchWriter(MetadataTable.NAME, null);
    int count = 0;
    for (Entry<Key, Value> entry : scanner) {
        assertTrue("Expected " + entry.getValue() + " to contain " + v1,
                entry.getValue().toString().contains(v1.toString()));
        count++;
        if (count % 2 == 0) {
            String parts[] = entry.getValue().toString().split("/");
            Key key = entry.getKey();
            Mutation m = new Mutation(key.getRow());
            m.put(key.getColumnFamily(), key.getColumnQualifier(),
                    new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
            bw.addMutation(m);
        }
    }
    bw.close();
    assertEquals(splits.size() + 1, count);

    // This should fail: only one volume
    assertEquals(1, cluster.exec(RandomizeVolumes.class, "-z", cluster.getZooKeepers(), "-i",
            c.getInstance().getInstanceName(), "-t", tableName).waitFor());

    cluster.stop();

    // add the 2nd volume
    Configuration conf = new Configuration(false);
    conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
    conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString());
    BufferedOutputStream fos = new BufferedOutputStream(
            new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
    conf.writeXml(fos);
    fos.close();

    // initialize volume
    assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
    cluster.start();
    c = getConnector();

    // change the directory entries
    assertEquals(0, cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).waitFor());

    // verify a more equal sharing
    int v1Count = 0, v2Count = 0;
    for (Entry<Key, Value> entry : scanner) {
        if (entry.getValue().toString().contains(v1.toString())) {
            v1Count++;
        }
        if (entry.getValue().toString().contains(v2.toString())) {
            v2Count++;
        }
    }

    log.info("Count for volume1: " + v1Count);
    log.info("Count for volume2: " + v2Count);

    assertEquals(splits.size() + 1, v1Count + v2Count);
    // a fair chooser will differ by less than count(volumes)
    assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count + " "
            + v2Count, Math.abs(v1Count - v2Count) < 2);
    // verify we can read the old data
    count = 0;
    for (Entry<Key, Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
        assertTrue("Found unexpected entry in table: " + entry, splits.contains(entry.getKey().getRow()));
        count++;
    }
    assertEquals(splits.size(), count);
}

From source file:org.apache.accumulo.tserver.log.DfsLogger.java

License:Apache License

/**
 * Opens a Write-Ahead Log file and writes the necessary header information and OPEN entry to the file. The file is ready to be used for ingest if this method
 * returns successfully. If an exception is thrown from this method, it is the callers responsibility to ensure that {@link #close()} is called to prevent
 * leaking the file handle and/or syncing thread.
 *
 * @param address//  www .  j a v a2s  . c o m
 *          The address of the host using this WAL
 */
public synchronized void open(String address) throws IOException {
    String filename = UUID.randomUUID().toString();
    log.debug("Address is " + address);
    String logger = Joiner.on("+").join(address.split(":"));

    log.debug("DfsLogger.open() begin");
    VolumeManager fs = conf.getFileSystem();

    logPath = fs.choose(Optional.<String>empty(), ServerConstants.getBaseUris()) + Path.SEPARATOR
            + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;

    metaReference = toString();
    LoggerOperation op = null;
    try {
        short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
        if (replication == 0)
            replication = fs.getDefaultReplication(new Path(logPath));
        long blockSize = conf.getConfiguration().getMemoryInBytes(Property.TSERV_WAL_BLOCKSIZE);
        if (blockSize == 0)
            blockSize = (long) (conf.getConfiguration().getMemoryInBytes(Property.TSERV_WALOG_MAX_SIZE) * 1.1);
        if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
            logFile = fs.createSyncable(new Path(logPath), 0, replication, blockSize);
        else
            logFile = fs.create(new Path(logPath), true, 0, replication, blockSize);
        sync = logFile.getClass().getMethod("hsync");
        flush = logFile.getClass().getMethod("hflush");

        // Initialize the crypto operations.
        org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory
                .getCryptoModule(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));

        // Initialize the log file with a header and the crypto params used to set up this log file.
        logFile.write(LOG_FILE_HEADER_V3.getBytes(UTF_8));

        CryptoModuleParameters params = CryptoModuleFactory
                .createParamsObjectFromAccumuloConfiguration(conf.getConfiguration());

        NoFlushOutputStream nfos = new NoFlushOutputStream(logFile);
        params.setPlaintextOutputStream(nfos);

        // In order to bootstrap the reading of this file later, we have to record the CryptoModule that was used to encipher it here,
        // so that that crypto module can re-read its own parameters.

        logFile.writeUTF(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));

        params = cryptoModule.getEncryptingOutputStream(params);
        OutputStream encipheringOutputStream = params.getEncryptedOutputStream();

        // If the module just kicks back our original stream, then just use it, don't wrap it in
        // another data OutputStream.
        if (encipheringOutputStream == nfos) {
            log.debug("No enciphering, using raw output stream");
            encryptingLogFile = nfos;
        } else {
            log.debug("Enciphering found, wrapping in DataOutputStream");
            encryptingLogFile = new DataOutputStream(encipheringOutputStream);
        }

        LogFileKey key = new LogFileKey();
        key.event = OPEN;
        key.tserverSession = filename;
        key.filename = filename;
        op = logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), Durability.SYNC);
    } catch (Exception ex) {
        if (logFile != null)
            logFile.close();
        logFile = null;
        encryptingLogFile = null;
        throw new IOException(ex);
    }

    syncThread = new Daemon(new LoggingRunnable(log, new LogSyncingTask()));
    syncThread.setName("Accumulo WALog thread " + toString());
    syncThread.start();
    op.await();
    log.debug("Got new write-ahead log: " + this);
}

From source file:org.apache.accumulo.tserver.tablet.Tablet.java

License:Apache License

private static String createTabletDirectory(VolumeManager fs, String tableId, Text endRow) {
    String lowDirectory;/*w ww .ja  v a  2 s  .  c  o m*/

    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    String volume = fs.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR
            + Path.SEPARATOR;

    while (true) {
        try {
            if (endRow == null) {
                lowDirectory = Constants.DEFAULT_TABLET_LOCATION;
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath)) {
                    FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory())
                            .toString();
                }
                log.warn("Failed to create " + lowDirectoryPath + " for unknown reason");
            } else {
                lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath))
                    throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath);
                if (fs.mkdirs(lowDirectoryPath)) {
                    FileSystem lowDirectoryFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath
                            .makeQualified(lowDirectoryFs.getUri(), lowDirectoryFs.getWorkingDirectory())
                            .toString();
                }
            }
        } catch (IOException e) {
            log.warn(e);
        }

        log.warn("Failed to create dir for tablet in table " + tableId + " in volume " + volume
                + " + will retry ...");
        sleepUninterruptibly(3, TimeUnit.SECONDS);

    }
}

From source file:org.apache.apex.malhar.lib.dedup.AbstractDeduper.java

License:Apache License

@Override
public void setup(OperatorContext context) {
    ((FileAccessFSImpl) managedState.getFileAccess())
            .setBasePath(context.getValue(DAG.APPLICATION_PATH) + Path.SEPARATOR + BUCKET_DIR);
    managedState.setup(context);/*from   w w w.j  a  v a  2  s .  co  m*/

    if (preserveTupleOrder) {
        decisions = Maps.newLinkedHashMap();
    }
}

From source file:org.apache.apex.malhar.lib.fs.FSRecordCompactionOperator.java

License:Apache License

@Override
public void setup(Context.OperatorContext context) {
    filePath = context.getValue(DAG.APPLICATION_PATH) + Path.SEPARATOR + outputDirectoryName;
    outputFileName = outputFileNamePrefix + context.getValue(DAG.APPLICATION_ID);
    super.setup(context);
}

From source file:org.apache.apex.malhar.lib.fs.FSRecordCompactionOperator.java

License:Apache License

@Override
protected void finalizeFile(String fileName) throws IOException {
    super.finalizeFile(fileName);

    String src = filePath + Path.SEPARATOR + fileName;
    Path srcPath = new Path(src);
    long offset = fs.getFileStatus(srcPath).getLen();

    //Add finalized files to the queue
    OutputMetaData metaData = new OutputMetaData(src, fileName, offset);
    //finalizeFile is called from committed callback.
    //Tuples should be emitted only between beginWindow to endWindow. Thus using emitQueue.
    emitQueue.add(metaData);// www.j  av a 2 s .co  m
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3InitiateFileUploadOperator.java

License:Apache License

@Override
public void setup(Context.OperatorContext context) {
    outputDirectoryPath = StringUtils.removeEnd(outputDirectoryPath, Path.SEPARATOR);
    currentWindowRecoveryState = new ArrayList<>();
    windowDataManager.setup(context);//  w w w .jav  a2s. c  o m
    s3Client = createClient();
}