Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:org.apache.hive.hcatalog.streaming.TestStreaming.java

License:Apache License

@Test
public void testTableValidation() throws Exception {
    int bucketCount = 100;

    String dbUri = "raw://" + new Path(dbFolder.newFolder().toString()).toUri().toString();
    String tbl1 = "validation1";
    String tbl2 = "validation2";

    String tableLoc = "'" + dbUri + Path.SEPARATOR + tbl1 + "'";
    String tableLoc2 = "'" + dbUri + Path.SEPARATOR + tbl2 + "'";

    runDDL(driver, "create database testBucketing3");
    runDDL(driver, "use testBucketing3");

    runDDL(driver, "create table " + tbl1 + " ( key1 string, data string ) clustered by ( key1 ) into "
            + bucketCount + " buckets  stored as orc  location " + tableLoc);

    runDDL(driver,//from w  ww. j  a va 2s  .co m
            "create table " + tbl2 + " ( key1 string, data string ) clustered by ( key1 ) into " + bucketCount
                    + " buckets  stored as orc  location " + tableLoc2
                    + " TBLPROPERTIES ('transactional'='false')");

    try {
        HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, "testBucketing3", "validation1", null);
        endPt.newConnection(false, "UT_" + Thread.currentThread().getName());
        Assert.assertTrue("InvalidTable exception was not thrown", false);
    } catch (InvalidTable e) {
        // expecting this exception
    }
    try {
        HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, "testBucketing3", "validation2", null);
        endPt.newConnection(false, "UT_" + Thread.currentThread().getName());
        Assert.assertTrue("InvalidTable exception was not thrown", false);
    } catch (InvalidTable e) {
        // expecting this exception
    }
}

From source file:org.apache.hive.hcatalog.streaming.TestStreaming.java

License:Apache License

public static Path createDbAndTable(Driver driver, String databaseName, String tableName, List<String> partVals,
        String[] colNames, String[] colTypes, String[] bucketCols, String[] partNames, String dbLocation,
        int bucketCount) throws Exception {

    String dbUri = "raw://" + new Path(dbLocation).toUri().toString();
    String tableLoc = dbUri + Path.SEPARATOR + tableName;

    runDDL(driver, "create database IF NOT EXISTS " + databaseName + " location '" + dbUri + "'");
    runDDL(driver, "use " + databaseName);
    String crtTbl = "create table " + tableName + " ( " + getTableColumnsStr(colNames, colTypes) + " )"
            + getPartitionStmtStr(partNames) + " clustered by ( " + join(bucketCols, ",") + " )" + " into "
            + bucketCount + " buckets " + " stored as orc " + " location '" + tableLoc + "'"
            + " TBLPROPERTIES ('transactional'='true') ";
    runDDL(driver, crtTbl);//w w w  . j a v  a 2 s  . c o  m
    if (partNames != null && partNames.length != 0) {
        return addPartition(driver, tableName, partVals, partNames);
    }
    return new Path(tableLoc);
}

From source file:org.apache.hive.hplsql.Copy.java

License:Apache License

/**
 * Run COPY FROM LOCAL statement/*w ww.j a  v a2  s  .c  om*/
 */
public Integer runFromLocal(HplsqlParser.Copy_from_local_stmtContext ctx) {
    trace(ctx, "COPY FROM LOCAL");
    initFileOptions(ctx.copy_file_option());
    HashMap<String, Pair<String, Long>> srcFiles = new HashMap<String, Pair<String, Long>>();
    String src = evalPop(ctx.copy_source(0)).toString();
    String dest = evalPop(ctx.copy_target()).toString();
    int srcItems = ctx.copy_source().size();
    for (int i = 0; i < srcItems; i++) {
        createLocalFileList(srcFiles, evalPop(ctx.copy_source(i)).toString(), null);
    }
    if (info) {
        info(ctx, "Files to copy: " + srcFiles.size() + " (" + Utils.formatSizeInBytes(srcSizeInBytes) + ")");
    }
    if (srcFiles.size() == 0) {
        exec.setHostCode(2);
        return 2;
    }
    timer.start();
    File file = new File();
    FileSystem fs = null;
    int succeed = 0;
    int failed = 0;
    long copiedSize = 0;
    try {
        fs = file.createFs();
        boolean multi = false;
        if (srcFiles.size() > 1) {
            multi = true;
        }
        for (Map.Entry<String, Pair<String, Long>> i : srcFiles.entrySet()) {
            try {
                Path s = new Path(i.getKey());
                Path d = null;
                if (multi) {
                    String relativePath = i.getValue().getLeft();
                    if (relativePath == null) {
                        d = new Path(dest, s.getName());
                    } else {
                        d = new Path(dest, relativePath + Path.SEPARATOR + s.getName());
                    }
                } else {
                    // Path to file is specified (can be relative), so treat target as a file name (hadoop fs -put behavior)
                    if (srcItems == 1 && i.getKey().endsWith(src)) {
                        d = new Path(dest);
                    }
                    // Source directory is specified, so treat the target as a directory 
                    else {
                        d = new Path(dest + Path.SEPARATOR + s.getName());
                    }
                }
                fs.copyFromLocalFile(delete, overwrite, s, d);
                succeed++;
                long size = i.getValue().getRight();
                copiedSize += size;
                if (info) {
                    info(ctx, "Copied: " + file.resolvePath(d) + " (" + Utils.formatSizeInBytes(size) + ")");
                }
            } catch (IOException e) {
                failed++;
                if (!ignore) {
                    throw e;
                }
            }
        }
    } catch (IOException e) {
        exec.signal(e);
        exec.setHostCode(1);
        return 1;
    } finally {
        long elapsed = timer.stop();
        if (info) {
            info(ctx,
                    "COPY completed: " + succeed + " succeed, " + failed + " failed, " + timer.format() + ", "
                            + Utils.formatSizeInBytes(copiedSize) + ", "
                            + Utils.formatBytesPerSec(copiedSize, elapsed));
        }
        if (failed == 0) {
            exec.setHostCode(0);
        } else {
            exec.setHostCode(1);
        }
        file.close();
    }
    return 0;
}

From source file:org.apache.hive.streaming.TestStreaming.java

License:Apache License

@Test
public void testStreamBucketingMatchesRegularBucketing() throws Exception {
    int bucketCount = 100;

    String dbUri = "raw://" + new Path(dbFolder.newFolder().toString()).toUri().toString();
    String tableLoc = "'" + dbUri + Path.SEPARATOR + "streamedtable" + "'";
    String tableLoc2 = "'" + dbUri + Path.SEPARATOR + "finaltable" + "'";
    String tableLoc3 = "'" + dbUri + Path.SEPARATOR + "nobucket" + "'";

    // disabling vectorization as this test yields incorrect results with vectorization
    conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
    try (IDriver driver = DriverFactory.newDriver(conf)) {
        runDDL(driver, "create database testBucketing3");
        runDDL(driver, "use testBucketing3");
        runDDL(driver,/*w  ww .jav  a2s.  c o m*/
                "create table streamedtable ( key1 string,key2 int,data string ) clustered by ( key1,key2 ) into "
                        + bucketCount + " buckets  stored as orc  location " + tableLoc
                        + " TBLPROPERTIES ('transactional'='true')");
        //  In 'nobucket' table we capture bucketid from streamedtable to workaround a hive bug that prevents joins two identically bucketed tables
        runDDL(driver, "create table nobucket ( bucketid int, key1 string,key2 int,data string ) location "
                + tableLoc3);
        runDDL(driver,
                "create table finaltable ( bucketid int, key1 string,key2 int,data string ) clustered by ( key1,key2 ) into "
                        + bucketCount + " buckets  stored as orc location " + tableLoc2
                        + " TBLPROPERTIES ('transactional'='true')");

        String[] records = new String[] { "PSFAHYLZVC,29,EPNMA", "PPPRKWAYAU,96,VUTEE", "MIAOFERCHI,3,WBDSI",
                "CEGQAZOWVN,0,WCUZL", "XWAKMNSVQF,28,YJVHU", "XBWTSAJWME,2,KDQFO", "FUVLQTAXAY,5,LDSDG",
                "QTQMDJMGJH,6,QBOMA", "EFLOTLWJWN,71,GHWPS", "PEQNAOJHCM,82,CAAFI", "MOEKQLGZCP,41,RUACR",
                "QZXMCOPTID,37,LFLWE", "EYALVWICRD,13,JEZLC", "VYWLZAYTXX,16,DMVZX", "OSALYSQIXR,47,HNZVE",
                "JGKVHKCEGQ,25,KSCJB", "WQFMMYDHET,12,DTRWA", "AJOVAYZKZQ,15,YBKFO", "YAQONWCUAU,31,QJNHZ",
                "DJBXUEUOEB,35,IYCBL" };

        StrictDelimitedInputWriter wr = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();
        HiveStreamingConnection connection = HiveStreamingConnection.newBuilder().withDatabase("testBucketing3")
                .withTable("streamedtable").withAgentInfo("UT_" + Thread.currentThread().getName())
                .withRecordWriter(wr).withHiveConf(conf).connect();

        connection.beginTransaction();

        for (String record : records) {
            connection.write(record.getBytes());
        }

        connection.commitTransaction();
        connection.close();

        ArrayList<String> res1 = queryTable(driver,
                "select row__id.bucketid, * from streamedtable order by key2");
        for (String re : res1) {
            LOG.error(re);
        }

        driver.run("insert into nobucket select row__id.bucketid,* from streamedtable");
        runDDL(driver, "insert into finaltable select * from nobucket");
        ArrayList<String> res2 = queryTable(driver,
                "select row__id.bucketid,* from finaltable where row__id.bucketid<>bucketid");
        for (String s : res2) {
            LOG.error(s);
        }
        Assert.assertTrue(res2.isEmpty());
    } finally {
        conf.unset(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname);
    }
}

From source file:org.apache.hive.streaming.TestStreaming.java

License:Apache License

@Test
public void testTableValidation() throws Exception {
    int bucketCount = 100;

    String dbUri = "raw://" + new Path(dbFolder.newFolder().toString()).toUri().toString();
    String tbl1 = "validation1";
    String tbl2 = "validation2";

    String tableLoc = "'" + dbUri + Path.SEPARATOR + tbl1 + "'";
    String tableLoc2 = "'" + dbUri + Path.SEPARATOR + tbl2 + "'";

    runDDL(driver, "create database testBucketing3");
    runDDL(driver, "use testBucketing3");

    runDDL(driver,/*www.  j av a 2  s  . c  o m*/
            "create table " + tbl1 + " ( key1 string, data string ) clustered by ( key1 ) into " + bucketCount
                    + " buckets  stored as orc  location " + tableLoc
                    + " TBLPROPERTIES ('transactional'='false')");

    runDDL(driver,
            "create table " + tbl2 + " ( key1 string, data string ) clustered by ( key1 ) into " + bucketCount
                    + " buckets  stored as orc  location " + tableLoc2
                    + " TBLPROPERTIES ('transactional'='false')");

    StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();
    HiveStreamingConnection connection = null;
    try {
        connection = HiveStreamingConnection.newBuilder().withDatabase("testBucketing3")
                .withTable("validation2").withAgentInfo("UT_" + Thread.currentThread().getName())
                .withRecordWriter(writer).withHiveConf(conf).connect();
        Assert.assertTrue("InvalidTable exception was not thrown", false);
    } catch (InvalidTable e) {
        // expecting this exception
    } finally {
        if (connection != null) {
            connection.close();
        }
    }
    try {
        connection = HiveStreamingConnection.newBuilder().withDatabase("testBucketing3")
                .withTable("validation2").withAgentInfo("UT_" + Thread.currentThread().getName())
                .withRecordWriter(writer).withHiveConf(conf).connect();
        Assert.assertTrue("InvalidTable exception was not thrown", false);
    } catch (InvalidTable e) {
        // expecting this exception
    } finally {
        if (connection != null) {
            connection.close();
        }
    }
}

From source file:org.apache.hive.streaming.TestStreaming.java

License:Apache License

private static Path createDbAndTable(IDriver driver, String databaseName, String tableName,
        List<String> partVals, String[] colNames, String[] colTypes, String[] bucketCols, String[] partNames,
        String dbLocation, int bucketCount) throws Exception {

    String dbUri = "raw://" + new Path(dbLocation).toUri().toString();
    String tableLoc = dbUri + Path.SEPARATOR + tableName;

    runDDL(driver, "create database IF NOT EXISTS " + databaseName + " location '" + dbUri + "'");
    runDDL(driver, "use " + databaseName);
    String crtTbl = "create table " + tableName + " ( " + getTableColumnsStr(colNames, colTypes) + " )"
            + getPartitionStmtStr(partNames) + " clustered by ( " + join(bucketCols, ",") + " )" + " into "
            + bucketCount + " buckets " + " stored as orc " + " location '" + tableLoc + "'"
            + " TBLPROPERTIES ('transactional'='true') ";
    runDDL(driver, crtTbl);/*from  w  w w.  j  a va 2s. c o m*/
    if (partNames != null && partNames.length != 0) {
        return addPartition(driver, tableName, partVals, partNames);
    }
    return new Path(tableLoc);
}

From source file:org.apache.hive.streaming.TestStreamingDynamicPartitioning.java

License:Apache License

@Test
public void testDPStreamBucketingMatchesRegularBucketing() throws Exception {
    int bucketCount = 100;

    String dbUri = "raw://" + new Path(dbFolder.newFolder().toString()).toUri().toString();
    String tableLoc = "'" + dbUri + Path.SEPARATOR + "streamedtable" + "'";
    String tableLoc2 = "'" + dbUri + Path.SEPARATOR + "finaltable" + "'";
    String tableLoc3 = "'" + dbUri + Path.SEPARATOR + "nobucket" + "'";

    // disabling vectorization as this test yields incorrect results with vectorization
    conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
    try (IDriver driver = DriverFactory.newDriver(conf)) {
        runDDL(driver, "create database testBucketing3");
        runDDL(driver, "use testBucketing3");
        runDDL(driver, "create table streamedtable ( key1 string,key2 int,data string ) partitioned by (year "
                + "int) clustered by " + "( " + "key1,key2 ) into " + bucketCount
                + " buckets  stored as orc  location " + tableLoc + " TBLPROPERTIES ('transactional'='true')");
        //  In 'nobucket' table we capture bucketid from streamedtable to workaround a hive bug that prevents joins two identically bucketed tables
        runDDL(driver,//from  w  ww  .  j  a v a 2  s . co  m
                "create table nobucket ( bucketid int, key1 string,key2 int,data string ) partitioned by "
                        + "(year int) location " + tableLoc3);
        runDDL(driver, "create table finaltable ( bucketid int, key1 string,key2 int,data string ) partitioned "
                + "by (year int) clustered by ( key1,key2 ) into " + bucketCount
                + " buckets  stored as orc location " + tableLoc2 + " TBLPROPERTIES ('transactional'='true')");

        String[] records = new String[] { "PSFAHYLZVC,29,EPNMA,2017", "PPPRKWAYAU,96,VUTEE,2017",
                "MIAOFERCHI,3,WBDSI,2017", "CEGQAZOWVN,0,WCUZL,2017", "XWAKMNSVQF,28,YJVHU,2017",
                "XBWTSAJWME,2,KDQFO,2017", "FUVLQTAXAY,5,LDSDG,2017", "QTQMDJMGJH,6,QBOMA,2018",
                "EFLOTLWJWN,71,GHWPS,2018", "PEQNAOJHCM,82,CAAFI,2018", "MOEKQLGZCP,41,RUACR,2018",
                "QZXMCOPTID,37,LFLWE,2018", "EYALVWICRD,13,JEZLC,2018", "VYWLZAYTXX,16,DMVZX,2018",
                "OSALYSQIXR,47,HNZVE,2018", "JGKVHKCEGQ,25,KSCJB,2018", "WQFMMYDHET,12,DTRWA,2018",
                "AJOVAYZKZQ,15,YBKFO,2018", "YAQONWCUAU,31,QJNHZ,2018", "DJBXUEUOEB,35,IYCBL,2018" };

        StrictDelimitedInputWriter wr = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();

        HiveStreamingConnection connection = HiveStreamingConnection.newBuilder().withDatabase("testBucketing3")
                .withTable("streamedtable").withAgentInfo("UT_" + Thread.currentThread().getName())
                .withRecordWriter(wr).withHiveConf(conf).connect();

        connection.beginTransaction();

        for (String record : records) {
            connection.write(record.getBytes());
        }

        connection.commitTransaction();
        connection.close();

        ArrayList<String> res1 = queryTable(driver,
                "select row__id.bucketid, * from streamedtable order by key2");
        for (String re : res1) {
            System.out.println(re);
            assertTrue(re.endsWith("2017") || re.endsWith("2018"));
        }

        driver.run("insert into nobucket partition(year) select row__id.bucketid,* from streamedtable");
        ArrayList<String> res = queryTable(driver, "select * from nobucket");
        assertEquals(records.length, res.size());
        runDDL(driver, " insert into finaltable partition(year) select * from nobucket");
        res = queryTable(driver, "select * from finaltable");
        assertEquals(records.length, res.size());
        ArrayList<String> res2 = queryTable(driver,
                "select row__id.bucketid,* from finaltable where row__id.bucketid<>bucketid");
        for (String s : res2) {
            LOG.error(s);
        }
        Assert.assertTrue(res2.isEmpty());

        res2 = queryTable(driver, "select * from finaltable where year=2018");
        assertEquals(13, res2.size());
        for (String s : res2) {
            assertTrue(s.endsWith("2018"));
        }

        res2 = queryTable(driver, "show partitions finaltable");
        assertEquals(2, res2.size());
        assertEquals("year=2017", res2.get(0));
        assertEquals("year=2018", res2.get(1));
    } finally {
        conf.unset(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname);
    }
}

From source file:org.apache.hive.streaming.TestStreamingDynamicPartitioning.java

License:Apache License

@Test
public void testTableValidation() throws Exception {
    int bucketCount = 100;

    String dbUri = "raw://" + new Path(dbFolder.newFolder().toString()).toUri().toString();
    String tbl1 = "validation1";
    String tbl2 = "validation2";

    String tableLoc = "'" + dbUri + Path.SEPARATOR + tbl1 + "'";
    String tableLoc2 = "'" + dbUri + Path.SEPARATOR + tbl2 + "'";

    runDDL(driver, "create database testBucketing3");
    runDDL(driver, "use testBucketing3");

    runDDL(driver,/*from w  ww. j a  v  a2  s.  c  o m*/
            "create table " + tbl1 + " ( key1 string, data string ) clustered by ( key1 ) into " + bucketCount
                    + " buckets  stored as orc  location " + tableLoc
                    + " TBLPROPERTIES ('transactional'='false')");

    runDDL(driver,
            "create table " + tbl2 + " ( key1 string, data string ) clustered by ( key1 ) into " + bucketCount
                    + " buckets  stored as orc  location " + tableLoc2
                    + " TBLPROPERTIES ('transactional'='false')");

    StrictDelimitedInputWriter wr = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();
    HiveStreamingConnection connection = null;
    try {
        connection = HiveStreamingConnection.newBuilder().withDatabase("testBucketing3")
                .withTable("validation2").withAgentInfo("UT_" + Thread.currentThread().getName())
                .withRecordWriter(wr).withHiveConf(conf).connect();
        Assert.assertTrue("InvalidTable exception was not thrown", false);
    } catch (InvalidTable e) {
        // expecting this exception
    } finally {
        if (connection != null) {
            connection.close();
        }
    }
    try {
        connection = HiveStreamingConnection.newBuilder().withDatabase("testBucketing3")
                .withTable("validation2").withAgentInfo("UT_" + Thread.currentThread().getName())
                .withRecordWriter(wr).withHiveConf(conf).connect();
        Assert.assertTrue("InvalidTable exception was not thrown", false);
    } catch (InvalidTable e) {
        // expecting this exception
    } finally {
        if (connection != null) {
            connection.close();
        }
    }
}

From source file:org.apache.hive.streaming.TestStreamingDynamicPartitioning.java

License:Apache License

private static Path createDbAndTable(IDriver driver, String databaseName, String tableName,
        List<String> partVals, String[] colNames, String[] colTypes, String[] bucketCols, String[] partNames,
        String dbLocation, int bucketCount) throws Exception {

    String dbUri = "raw://" + new Path(dbLocation).toUri().toString();
    String tableLoc = dbUri + Path.SEPARATOR + tableName;

    runDDL(driver, "create database IF NOT EXISTS " + databaseName + " location '" + dbUri + "'");
    runDDL(driver, "use " + databaseName);
    String crtTbl = "create table " + tableName + " ( " + getTableColumnsStr(colNames, colTypes) + " )"
            + getPartitionStmtStr(partNames) + " clustered by ( " + join(bucketCols, ",") + " )" + " into "
            + bucketCount + " buckets " + " stored as orc " + " location '" + tableLoc + "'"
            + " TBLPROPERTIES ('transactional'='true') ";
    runDDL(driver, crtTbl);/*from  w w w .  j a  v a  2 s. c  o m*/
    if (partNames != null && partNames.length != 0 && partVals != null) {
        return addPartition(driver, tableName, partVals, partNames);
    }
    return new Path(tableLoc);
}

From source file:org.apache.hive.streaming.TestStreamingDynamicPartitioning.java

License:Apache License

private static Path createDbAndTable(IDriver driver, String databaseName, String tableName,
        List<String> partVals, String[] colNames, String[] colTypes, String[] bucketCols, String[] partNames,
        String dbLocation, int bucketCount, String partLine) throws Exception {

    String dbUri = "raw://" + new Path(dbLocation).toUri().toString();
    String tableLoc = dbUri + Path.SEPARATOR + tableName;

    runDDL(driver, "create database IF NOT EXISTS " + databaseName + " location '" + dbUri + "'");
    runDDL(driver, "use " + databaseName);
    String crtTbl = "create table " + tableName + " ( " + getTableColumnsStr(colNames, colTypes) + " )"
            + partLine + " clustered by ( " + join(bucketCols, ",") + " )" + " into " + bucketCount
            + " buckets " + " stored as orc " + " location '" + tableLoc + "'"
            + " TBLPROPERTIES ('transactional'='true') ";
    runDDL(driver, crtTbl);/* w w w.  jav a2  s  .  c  o m*/
    if (partNames != null && partNames.length != 0 && partVals != null) {
        return addPartition(driver, tableName, partVals, partNames);
    }
    return new Path(tableLoc);
}