Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:org.apache.drill.exec.work.batch.SpoolingRawBatchBuffer.java

License:Apache License

private Path getPath() {
    ExecProtos.FragmentHandle handle = context.getHandle();

    String qid = QueryIdHelper.getQueryId(handle.getQueryId());

    int majorFragmentId = handle.getMajorFragmentId();
    int minorFragmentId = handle.getMinorFragmentId();

    String fileName = Joiner.on(Path.SEPARATOR).join(getDir(), qid, majorFragmentId, minorFragmentId,
            oppositeId, bufferIndex);//from   ww  w .  j a  v  a  2  s . c o  m

    return new Path(fileName);
}

From source file:org.apache.drill.TestDropTable.java

License:Apache License

@Test
public void testNonHomogenousDrop() throws Exception {
    test("use dfs_test.tmp");
    final String tableName = "homogenous_table";

    // create a parquet table
    test(String.format(CREATE_SIMPLE_TABLE, tableName));

    // create a json table within the same directory
    test("alter session set `store.format` = 'json'");
    final String nestedJsonTable = tableName + Path.SEPARATOR + "json_table";
    test(String.format(CREATE_SIMPLE_TABLE, BACK_TICK + nestedJsonTable + BACK_TICK));

    test("show files from " + tableName);

    boolean dropFailed = false;
    // this should fail, because the directory contains non-homogenous files
    try {/*from   w  ww .  j  a  va 2 s.  co  m*/
        test(String.format(DROP_TABLE, tableName));
    } catch (UserException e) {
        Assert.assertTrue(e.getMessage().contains("VALIDATION ERROR"));
        dropFailed = true;
    }

    Assert.assertTrue("Dropping of non-homogeneous table should have failed", dropFailed);

    // drop the individual json table
    testBuilder().sqlQuery(String.format(DROP_TABLE, BACK_TICK + nestedJsonTable + BACK_TICK)).unOrdered()
            .baselineColumns("ok", "summary")
            .baselineValues(true, String.format("Table [%s] dropped", nestedJsonTable)).go();

    // Now drop should succeed
    testBuilder().sqlQuery(String.format(DROP_TABLE, tableName)).unOrdered().baselineColumns("ok", "summary")
            .baselineValues(true, String.format("Table [%s] dropped", tableName)).go();
}

From source file:org.apache.eagle.storage.hbase.tools.CoprocessorTool.java

License:Apache License

private void registerCoprocessor(String jarPath, String tableName, String localJarPath) throws IOException {
    Configuration configuration = getConf();
    try (FileSystem fs = FileSystem.get(configuration); HBaseAdmin admin = new HBaseAdmin(configuration)) {
        Path path = new Path(fs.getUri() + Path.SEPARATOR + jarPath);
        LOGGER.info("Checking path {} ... ", path.toString());
        if (!fs.exists(path)) {
            LOGGER.info("Path: {} not exist, uploading jar ...", path.toString());
            if (localJarPath == null) {
                throw new IOException(
                        "local jar path is not given, please manually upload coprocessor jar onto hdfs at "
                                + jarPath
                                + " and retry, or provide local coprocessor jar path through CLI argument and upload automatically");
            }/*from   w  w w  . j  ava 2  s.c o m*/
            LOGGER.info("Copying from local {} to {}", localJarPath, jarPath);
            fs.copyFromLocalFile(new Path(localJarPath), path);
            LOGGER.info("Succeed to copied coprocessor jar to {}", path.toString());
        } else {
            LOGGER.info("Path {} already exists", path.toString());
        }
        LOGGER.info("Checking hbase table {}", tableName);
        TableName table = TableName.valueOf(tableName);
        HTableDescriptor tableDescriptor = admin.getTableDescriptor(table);
        LOGGER.info("Table {} found", tableName);
        if (tableDescriptor.hasCoprocessor(AggregateProtocolEndPoint.class.getName())) {
            LOGGER.warn("Table '" + tableName + "' already registered coprocessor: "
                    + AggregateProtocolEndPoint.class.getName() + ", removing firstly");
            tableDescriptor.removeCoprocessor(AggregateProtocolEndPoint.class.getName());
            admin.modifyTable(table, tableDescriptor);
            tableDescriptor = admin.getTableDescriptor(table);
        }
        tableDescriptor.addCoprocessor(AggregateProtocolEndPoint.class.getName(), path,
                Coprocessor.PRIORITY_USER, new HashMap<>());
        admin.modifyTable(table, tableDescriptor);
        LOGGER.info("Succeed to enable coprocessor on table " + tableName);
    }
}

From source file:org.apache.falcon.entity.store.ConfigurationStore.java

License:Apache License

private void loadEntity(final EntityType type) throws FalconException {
    try {/* w  ww . j a v  a  2 s  .  co m*/
        final ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
        FileStatus[] files = fs.globStatus(new Path(storePath, type.name() + Path.SEPARATOR + "*"));
        if (files != null) {

            final ExecutorService service = Executors.newFixedThreadPool(numThreads);
            for (final FileStatus file : files) {
                service.execute(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            String fileName = file.getPath().getName();
                            String encodedEntityName = fileName.substring(0, fileName.length() - 4); // drop
                            // ".xml"
                            String entityName = URLDecoder.decode(encodedEntityName, UTF_8);
                            Entity entity = restore(type, entityName);
                            LOG.info("Restored configuration {}/{}", type, entityName);
                            entityMap.put(entityName, entity);
                        } catch (IOException | FalconException e) {
                            LOG.error("Unable to restore entity of " + file.getPath().getName(), e);
                        }
                    }
                });
            }
            service.shutdown();
            if (service.awaitTermination(restoreTimeOutInMins, TimeUnit.MINUTES)) {
                LOG.info("Restored Configurations for entity type: {} ", type.name());
            } else {
                LOG.warn("Timed out while waiting for all threads to finish while restoring entities "
                        + "for type: {}", type.name());
            }
            // Checking if all entities were loaded
            if (entityMap.size() != files.length) {
                throw new FalconException("Unable to restore configurations for entity type " + type.name());
            }
            for (Entity entity : entityMap.values()) {
                onReload(entity);
            }
        }
    } catch (IOException e) {
        throw new FalconException("Unable to restore configurations", e);
    } catch (InterruptedException e) {
        throw new FalconException(
                "Failed to restore configurations in 10 minutes for entity type " + type.name());
    }
}

From source file:org.apache.falcon.entity.store.ConfigurationStore.java

License:Apache License

/**
 * @param type   - Entity type that is to be stored into persistent storage
 * @param entity - entity to persist. JAXB Annotated entity will be marshalled
 *               to the persistent store. The convention used for storing the
 *               object:: PROP(config.store.uri)/{entitytype}/{entityname}.xml
 * @throws java.io.IOException If any error in accessing the storage
 * @throws FalconException/*from w  w w  .jav a 2s  .c om*/
 */
private void persist(EntityType type, Entity entity) throws IOException, FalconException {
    if (!shouldPersist) {
        return;
    }
    OutputStream out = fs.create(
            new Path(storePath, type + Path.SEPARATOR + URLEncoder.encode(entity.getName(), UTF_8) + ".xml"));
    try {
        type.getMarshaller().marshal(entity, out);
        LOG.info("Persisted configuration {}/{}", type, entity.getName());
    } catch (JAXBException e) {
        LOG.error("Unable to serialize the entity object {}/{}", type, entity.getName(), e);
        throw new StoreAccessException("Unable to serialize the entity object " + type + "/" + entity.getName(),
                e);
    } finally {
        out.close();
    }
}

From source file:org.apache.falcon.entity.store.ConfigurationStore.java

License:Apache License

/**
 * Archive removed configuration in the persistent store.
 *
 * @param type - Entity type to archive/*w  ww.j a  va 2s .  c  om*/
 * @param name - name
 * @throws IOException If any error in accessing the storage
 */
private void archive(EntityType type, String name) throws IOException {
    if (!shouldPersist) {
        return;
    }
    Path archivePath = new Path(storePath, "archive" + Path.SEPARATOR + type);
    HadoopClientFactory.mkdirs(fs, archivePath, STORE_PERMISSION);
    fs.rename(new Path(storePath, type + Path.SEPARATOR + URLEncoder.encode(name, UTF_8) + ".xml"),
            new Path(archivePath, URLEncoder.encode(name, UTF_8) + "." + System.currentTimeMillis()));
    LOG.info("Archived configuration {}/{}", type, name);
}

From source file:org.apache.falcon.entity.store.ConfigurationStore.java

License:Apache License

/**
 * @param type - Entity type to restore from persistent store
 * @param name - Name of the entity to restore.
 * @param <T>  - Actual entity object type
 * @return - De-serialized entity object restored from persistent store
 * @throws IOException     If any error in accessing the storage
 * @throws FalconException//from  ww w . j a  v a 2s .  co m
 */
@SuppressWarnings("unchecked")
private synchronized <T extends Entity> T restore(EntityType type, String name)
        throws IOException, FalconException {

    InputStream in = fs
            .open(new Path(storePath, type + Path.SEPARATOR + URLEncoder.encode(name, UTF_8) + ".xml"));
    XMLInputFactory xif = SchemaHelper.createXmlInputFactory();
    try {
        XMLStreamReader xsr = xif.createXMLStreamReader(in);
        return (T) type.getUnmarshaller().unmarshal(xsr);
    } catch (XMLStreamException xse) {
        throw new StoreAccessException("Unable to un-marshall xml definition for " + type + "/" + name, xse);
    } catch (JAXBException e) {
        throw new StoreAccessException("Unable to un-marshall xml definition for " + type + "/" + name, e);
    } finally {
        in.close();
    }
}

From source file:org.apache.falcon.entity.store.FeedLocationStoreTest.java

License:Apache License

@Test
public void testOnUpdate() throws FalconException {
    Feed f1 = createFeed("f1");
    f1.getLocations().getLocations()//  ww w .  j  av  a2 s. co  m
            .add(createLocation(LocationType.DATA, "/projects/cas/data/hourly/2014/09/09/09"));
    store.publish(EntityType.FEED, f1);

    Feed f2 = createFeed("f1");
    f2.getLocations().getLocations().add(createLocation(LocationType.DATA, "/projects/cas/data/monthly"));
    store.initiateUpdate(f2);
    store.update(EntityType.FEED, f2);
    store.cleanupUpdateInit();
    boolean isArchived = false;
    try {
        Path archivePath = new Path(store.getStorePath(), "archive" + Path.SEPARATOR + "FEED");
        FileStatus[] files = store.getFs().listStatus(archivePath);
        for (FileStatus f : files) {
            String name = f.getPath().getName();
            if (name.startsWith(f2.getName())) {
                isArchived = true;
                break;
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
    Assert.assertTrue(isArchived);
}

From source file:org.apache.falcon.entity.store.FeedLocationStoreTest.java

License:Apache License

@Test
public void testOnRemove() throws FalconException {
    int initialSize = FeedLocationStore.get().store.getSize();
    String feedName = "f1ForRemove";
    Feed f1 = createFeed(feedName);//from   w ww  .j  a  v  a2s .c  om
    f1.getLocations().getLocations()
            .add(createLocation(LocationType.DATA, "/projects/cas/data/hourly/2014/09/09/09"));
    f1.getLocations().getLocations()
            .add(createLocation(LocationType.STATS, "/projects/cas/data/hourly/2014/09/09/09"));

    store.publish(EntityType.FEED, f1);
    Assert.assertEquals(FeedLocationStore.get().store.getSize() - initialSize, 4);
    store.remove(EntityType.FEED, feedName);
    boolean isArchived = false;
    try {
        Path archivePath = new Path(store.getStorePath(), "archive" + Path.SEPARATOR + "FEED");
        FileStatus[] files = store.getFs().listStatus(archivePath);
        for (FileStatus f : files) {
            String name = f.getPath().getName();
            if (name.startsWith(feedName)) {
                isArchived = true;
                break;
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
    Assert.assertTrue(isArchived);
    Assert.assertEquals(FeedLocationStore.get().store.getSize(), initialSize);
}

From source file:org.apache.falcon.snapshots.replication.HdfsSnapshotReplicator.java

License:Apache License

private String getStagingUri(String storageUrl, String dir) {
    storageUrl = StringUtils.removeEnd(storageUrl, Path.SEPARATOR);
    return storageUrl + Path.SEPARATOR + dir;
}