Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:org.apache.storm.hdfs.spout.TestFileLock.java

License:Apache License

@Test
public void testHeartbeat() throws Exception {
    Path file1 = new Path(filesDir + Path.SEPARATOR + "file1");
    fs.create(file1).close();/*from  w ww.j a v  a2s .  c o m*/

    // acquire lock on file1
    FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1");
    Assert.assertNotNull(lock1);
    Assert.assertTrue(fs.exists(lock1.getLockFile()));

    ArrayList<String> lines = readTextFile(lock1.getLockFile());
    Assert.assertEquals("heartbeats appear to be missing", 1, lines.size());

    // hearbeat upon it
    lock1.heartbeat("1");
    lock1.heartbeat("2");
    lock1.heartbeat("3");

    lines = readTextFile(lock1.getLockFile());
    Assert.assertEquals("heartbeats appear to be missing", 4, lines.size());

    lock1.heartbeat("4");
    lock1.heartbeat("5");
    lock1.heartbeat("6");

    lines = readTextFile(lock1.getLockFile());
    Assert.assertEquals("heartbeats appear to be missing", 7, lines.size());

    lock1.release();
    lines = readTextFile(lock1.getLockFile());
    Assert.assertNull(lines);
    Assert.assertFalse(fs.exists(lock1.getLockFile()));
}

From source file:org.apache.storm.hdfs.spout.TestFileLock.java

License:Apache License

@Test
public void testConcurrentLocking() throws IOException, InterruptedException {
    Path file1 = new Path(filesDir + Path.SEPARATOR + "file1");
    fs.create(file1).close();/*from   w w w  . jav a  2 s .c  o m*/

    FileLockingThread[] thds = startThreads(100, file1, locksDir);
    for (FileLockingThread thd : thds) {
        thd.join();
        if (!thd.cleanExit) {
            System.err.println(thd.getName() + " did not exit cleanly");
        }
        Assert.assertTrue(thd.cleanExit);
    }

    Path lockFile = new Path(locksDir + Path.SEPARATOR + file1.getName());
    Assert.assertFalse(fs.exists(lockFile));
}

From source file:org.apache.storm.hdfs.spout.TestFileLock.java

License:Apache License

@Test
public void testStaleLockDetection_SingleLock() throws Exception {
    final int LOCK_EXPIRY_SEC = 1;
    final int WAIT_MSEC = 1500;
    Path file1 = new Path(filesDir + Path.SEPARATOR + "file1");
    fs.create(file1).close();//  w w w. ja v a  2  s .com
    FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1");
    try {
        // acquire lock on file1
        Assert.assertNotNull(lock1);
        Assert.assertTrue(fs.exists(lock1.getLockFile()));
        Thread.sleep(WAIT_MSEC); // wait for lock to expire
        HdfsUtils.Pair<Path, FileLock.LogEntry> expired = FileLock.locateOldestExpiredLock(fs, locksDir,
                LOCK_EXPIRY_SEC);
        Assert.assertNotNull(expired);

        // heartbeat, ensure its no longer stale and read back the heartbeat data
        lock1.heartbeat("1");
        expired = FileLock.locateOldestExpiredLock(fs, locksDir, 1);
        Assert.assertNull(expired);

        FileLock.LogEntry lastEntry = lock1.getLastLogEntry();
        Assert.assertNotNull(lastEntry);
        Assert.assertEquals("1", lastEntry.fileOffset);

        // wait and check for expiry again
        Thread.sleep(WAIT_MSEC);
        expired = FileLock.locateOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC);
        Assert.assertNotNull(expired);
    } finally {
        lock1.release();
        fs.delete(file1, false);
    }
}

From source file:org.apache.storm.hdfs.spout.TestFileLock.java

License:Apache License

@Test
public void testStaleLockDetection_MultipleLocks() throws Exception {
    final int LOCK_EXPIRY_SEC = 1;
    final int WAIT_MSEC = 1500;
    Path file1 = new Path(filesDir + Path.SEPARATOR + "file1");
    Path file2 = new Path(filesDir + Path.SEPARATOR + "file2");
    Path file3 = new Path(filesDir + Path.SEPARATOR + "file3");

    fs.create(file1).close();//w  w w  .  j  a va 2s . c o  m
    fs.create(file2).close();
    fs.create(file3).close();

    // 1) acquire locks on file1,file2,file3
    FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1");
    FileLock lock2 = FileLock.tryLock(fs, file2, locksDir, "spout2");
    FileLock lock3 = FileLock.tryLock(fs, file3, locksDir, "spout3");
    Assert.assertNotNull(lock1);
    Assert.assertNotNull(lock2);
    Assert.assertNotNull(lock3);

    try {
        HdfsUtils.Pair<Path, FileLock.LogEntry> expired = FileLock.locateOldestExpiredLock(fs, locksDir,
                LOCK_EXPIRY_SEC);
        Assert.assertNull(expired);

        // 2) wait for all 3 locks to expire then heart beat on 2 locks and verify stale lock
        Thread.sleep(WAIT_MSEC);
        lock1.heartbeat("1");
        lock2.heartbeat("1");

        expired = FileLock.locateOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC);
        Assert.assertNotNull(expired);
        Assert.assertEquals("spout3", expired.getValue().componentID);
    } finally {
        lock1.release();
        lock2.release();
        lock3.release();
        fs.delete(file1, false);
        fs.delete(file2, false);
        fs.delete(file3, false);
    }
}

From source file:org.apache.storm.hdfs.spout.TestFileLock.java

License:Apache License

@Test
public void testLockRecovery() throws Exception {
    final int LOCK_EXPIRY_SEC = 1;
    final int WAIT_MSEC = LOCK_EXPIRY_SEC * 1000 + 500;
    Path file1 = new Path(filesDir + Path.SEPARATOR + "file1");
    Path file2 = new Path(filesDir + Path.SEPARATOR + "file2");
    Path file3 = new Path(filesDir + Path.SEPARATOR + "file3");

    fs.create(file1).close();/*  w  w  w.j  a v a  2 s.c om*/
    fs.create(file2).close();
    fs.create(file3).close();

    // 1) acquire locks on file1,file2,file3
    FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1");
    FileLock lock2 = FileLock.tryLock(fs, file2, locksDir, "spout2");
    FileLock lock3 = FileLock.tryLock(fs, file3, locksDir, "spout3");
    Assert.assertNotNull(lock1);
    Assert.assertNotNull(lock2);
    Assert.assertNotNull(lock3);

    try {
        HdfsUtils.Pair<Path, FileLock.LogEntry> expired = FileLock.locateOldestExpiredLock(fs, locksDir,
                LOCK_EXPIRY_SEC);
        Assert.assertNull(expired);

        // 1) Simulate lock file lease expiring and getting closed by HDFS
        closeUnderlyingLockFile(lock3);

        // 2) wait for all 3 locks to expire then heart beat on 2 locks
        Thread.sleep(WAIT_MSEC * 2); // wait for locks to expire
        lock1.heartbeat("1");
        lock2.heartbeat("1");

        // 3) Take ownership of stale lock
        FileLock lock3b = FileLock.acquireOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC, "spout1");
        Assert.assertNotNull(lock3b);
        Assert.assertEquals("Expected lock3 file", Path.getPathWithoutSchemeAndAuthority(lock3b.getLockFile()),
                lock3.getLockFile());
    } finally {
        lock1.release();
        lock2.release();
        lock3.release();
        fs.delete(file1, false);
        fs.delete(file2, false);
        try {
            fs.delete(file3, false);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:org.apache.storm.hdfs.spout.TestProgressTracker.java

License:Apache License

@Test
public void testBasic() throws Exception {
    ProgressTracker tracker = new ProgressTracker();
    baseFolder = tempFolder.newFolder("trackertest");

    Path file = new Path(baseFolder.toString() + Path.SEPARATOR + "testHeadTrimming.txt");
    createTextFile(file, 10);//from  w w w .j  av  a 2  s . c o  m

    // create reader and do some checks
    TextFileReader reader = new TextFileReader(fs, file, null);
    FileOffset pos0 = tracker.getCommitPosition();
    Assert.assertNull(pos0);

    TextFileReader.Offset currOffset = reader.getFileOffset();
    Assert.assertNotNull(currOffset);
    Assert.assertEquals(0, currOffset.charOffset);

    // read 1st line and ack
    Assert.assertNotNull(reader.next());
    TextFileReader.Offset pos1 = reader.getFileOffset();
    tracker.recordAckedOffset(pos1);

    TextFileReader.Offset pos1b = (TextFileReader.Offset) tracker.getCommitPosition();
    Assert.assertEquals(pos1, pos1b);

    // read 2nd line and ACK
    Assert.assertNotNull(reader.next());
    TextFileReader.Offset pos2 = reader.getFileOffset();
    tracker.recordAckedOffset(pos2);

    tracker.dumpState(System.err);
    TextFileReader.Offset pos2b = (TextFileReader.Offset) tracker.getCommitPosition();
    Assert.assertEquals(pos2, pos2b);

    // read lines 3..7, don't ACK .. commit pos should remain same
    Assert.assertNotNull(reader.next());//3
    TextFileReader.Offset pos3 = reader.getFileOffset();
    Assert.assertNotNull(reader.next());//4
    TextFileReader.Offset pos4 = reader.getFileOffset();
    Assert.assertNotNull(reader.next());//5
    TextFileReader.Offset pos5 = reader.getFileOffset();
    Assert.assertNotNull(reader.next());//6
    TextFileReader.Offset pos6 = reader.getFileOffset();
    Assert.assertNotNull(reader.next());//7
    TextFileReader.Offset pos7 = reader.getFileOffset();

    // now ack msg 5 and check
    tracker.recordAckedOffset(pos5);
    Assert.assertEquals(pos2, tracker.getCommitPosition()); // should remain unchanged @ 2
    tracker.recordAckedOffset(pos4);
    Assert.assertEquals(pos2, tracker.getCommitPosition()); // should remain unchanged @ 2
    tracker.recordAckedOffset(pos3);
    Assert.assertEquals(pos5, tracker.getCommitPosition()); // should be at 5

    tracker.recordAckedOffset(pos6);
    Assert.assertEquals(pos6, tracker.getCommitPosition()); // should be at 6
    tracker.recordAckedOffset(pos6); // double ack on same msg
    Assert.assertEquals(pos6, tracker.getCommitPosition()); // should still be at 6

    tracker.recordAckedOffset(pos7);
    Assert.assertEquals(pos7, tracker.getCommitPosition()); // should be at 7

    tracker.dumpState(System.err);
}

From source file:org.apache.storm.hive.bolt.HiveSetupUtil.java

License:Apache License

public static void createDbAndTable(HiveConf conf, String databaseName, String tableName, List<String> partVals,
        String[] colNames, String[] colTypes, String[] partNames, String dbLocation) throws Exception {
    IMetaStoreClient client = new HiveMetaStoreClient(conf);
    try {//w ww.j av  a2 s .c o  m
        Database db = new Database();
        db.setName(databaseName);
        db.setLocationUri(dbLocation);
        client.createDatabase(db);

        Table tbl = new Table();
        tbl.setDbName(databaseName);
        tbl.setTableName(tableName);
        tbl.setTableType(TableType.MANAGED_TABLE.toString());
        StorageDescriptor sd = new StorageDescriptor();
        sd.setCols(getTableColumns(colNames, colTypes));
        sd.setNumBuckets(1);
        sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
        if (partNames != null && partNames.length != 0) {
            tbl.setPartitionKeys(getPartitionKeys(partNames));
        }

        tbl.setSd(sd);

        sd.setBucketCols(new ArrayList<String>(2));
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");

        sd.getSerdeInfo().setSerializationLib(OrcSerde.class.getName());
        sd.setInputFormat(OrcInputFormat.class.getName());
        sd.setOutputFormat(OrcOutputFormat.class.getName());

        Map<String, String> tableParams = new HashMap<String, String>();
        tbl.setParameters(tableParams);
        client.createTable(tbl);
        try {
            if (partVals != null && partVals.size() > 0) {
                addPartition(client, tbl, partVals);
            }
        } catch (AlreadyExistsException e) {
        }
    } finally {
        client.close();
    }
}

From source file:org.apache.storm.hive.bolt.HiveSetupUtil.java

License:Apache License

private static void addPartition(IMetaStoreClient client, Table tbl, List<String> partValues)
        throws IOException, TException {
    Partition part = new Partition();
    part.setDbName(tbl.getDbName());//from  ww  w  .  ja v a  2  s .  c o  m
    part.setTableName(tbl.getTableName());
    StorageDescriptor sd = new StorageDescriptor(tbl.getSd());
    sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues));
    part.setSd(sd);
    part.setValues(partValues);
    client.add_partition(part);
}

From source file:org.apache.storm.hive.bolt.HiveSetupUtil.java

License:Apache License

private static String makePartPath(List<FieldSchema> partKeys, List<String> partVals) {
    if (partKeys.size() != partVals.size()) {
        throw new IllegalArgumentException(
                "Partition values:" + partVals + ", does not match the partition Keys in table :" + partKeys);
    }/*from  www . j  a  va2  s  .  c om*/
    StringBuffer buff = new StringBuffer(partKeys.size() * 20);
    int i = 0;
    for (FieldSchema schema : partKeys) {
        buff.append(schema.getName());
        buff.append("=");
        buff.append(partVals.get(i));
        if (i != partKeys.size() - 1) {
            buff.append(Path.SEPARATOR);
        }
        ++i;
    }
    return buff.toString();
}

From source file:org.apache.tajo.yarn.container.WorkerContainerTask.java

License:Apache License

@Override
public ContainerLaunchContext getLaunchContext(Container container) throws IOException {
    // create a container launch context
    ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class);
    UserGroupInformation user = UserGroupInformation.getCurrentUser();
    try {/*  ww  w  .  j  ava 2 s  .  c  o  m*/
        Credentials credentials = user.getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        launchContext.setTokens(securityTokens);
    } catch (IOException e) {
        LOG.warn("Getting current user info failed when trying to launch the container" + e.getMessage());
    }

    FileSystem fs = FileSystem.get(appContext.getConfiguration());

    // Set the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    String suffix = "Tajo" + "/" + appContext.getApplicationId().getId();
    Path parentPath = new Path(fs.getHomeDirectory(), suffix);

    // tar ball
    Path archivePath = new Path(parentPath, System.getenv(Constants.TAJO_ARCHIVE_PATH));
    FileStatus archiveFs = fs.getFileStatus(archivePath);
    LocalResource archiveRsrc = LocalResource.newInstance(ConverterUtils.getYarnUrlFromURI(archivePath.toUri()),
            LocalResourceType.ARCHIVE, LocalResourceVisibility.APPLICATION, archiveFs.getLen(),
            archiveFs.getModificationTime());
    localResources.put("tajo", archiveRsrc);

    Configuration tajoWorkerConf = new Configuration(false);
    tajoWorkerConf.addResource(new Path("conf", "tajo-site.xml"));
    tajoWorkerConf.set(Constants.TAJO_MASTER_UMBILICAL_RPC_ADDRESS, appContext.getMasterHost() + ":26001");
    tajoWorkerConf.set(Constants.CATALOG_ADDRESS, appContext.getMasterHost() + ":26005");
    Path dst = new Path(parentPath, container.getId() + Path.SEPARATOR + "worker-conf");
    fs.mkdirs(dst);
    Path confFile = new Path(dst, "tajo-site.xml");
    FSDataOutputStream fdos = fs.create(confFile);
    tajoWorkerConf.writeXml(fdos);
    fdos.close();
    FileStatus scFileStatus = fs.getFileStatus(dst);
    LocalResource scRsrc = LocalResource.newInstance(ConverterUtils.getYarnUrlFromURI(dst.toUri()),
            LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(),
            scFileStatus.getModificationTime());
    localResources.put("conf", scRsrc);
    launchContext.setLocalResources(localResources);

    // Set the environment
    setupEnv(launchContext);

    // Set the necessary command to execute on the allocated container
    Vector<CharSequence> vargs = new Vector<CharSequence>(5);

    // Set executable command
    // Set args for the shell command if any
    vargs.add("${" + Constants.TAJO_HOME + "}/bin/tajo");
    vargs.add("--config");
    vargs.add("${" + Constants.TAJO_CONF_DIR + "}");
    vargs.add("worker");
    // Add log redirect params
    // Add log redirect params
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    launchContext.setCommands(commands);
    return launchContext;
}