Example usage for org.apache.hadoop.mapred JobConf setEnum

List of usage examples for org.apache.hadoop.mapred JobConf setEnum

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf setEnum.

Prototype

public <T extends Enum<T>> void setEnum(String name, T value) 

Source Link

Document

Set the value of the name property to the given type.

Usage

From source file:com.facebook.presto.hive.parquet.ParquetTester.java

License:Apache License

public void assertRoundTrip(ObjectInspector objectInspector, Iterable<?> writeValues, Iterable<?> readValues,
        Type type) throws Exception {
    for (WriterVersion version : versions) {
        for (CompressionCodecName compressionCodecName : compressions) {
            try (TempFile tempFile = new TempFile("test", "parquet")) {
                JobConf jobConf = new JobConf();
                jobConf.setEnum(ParquetOutputFormat.COMPRESSION, compressionCodecName);
                jobConf.setBoolean(ParquetOutputFormat.ENABLE_DICTIONARY, true);
                jobConf.setEnum(ParquetOutputFormat.WRITER_VERSION, version);
                writeParquetColumn(jobConf, tempFile.getFile(), compressionCodecName, objectInspector,
                        writeValues.iterator());
                assertFileContents(jobConf, objectInspector, tempFile, readValues, type);
            }/*from w ww.  j  a v a 2 s  .  c  o m*/
        }
    }
}

From source file:io.prestosql.plugin.hive.parquet.ParquetTester.java

License:Apache License

void assertRoundTrip(List<ObjectInspector> objectInspectors, Iterable<?>[] writeValues,
        Iterable<?>[] readValues, List<String> columnNames, List<Type> columnTypes,
        Optional<MessageType> parquetSchema, boolean singleLevelArray) throws Exception {
    for (WriterVersion version : versions) {
        for (CompressionCodecName compressionCodecName : compressions) {
            for (ConnectorSession session : sessions) {
                try (TempFile tempFile = new TempFile("test", "parquet")) {
                    JobConf jobConf = new JobConf();
                    jobConf.setEnum(COMPRESSION, compressionCodecName);
                    jobConf.setBoolean(ENABLE_DICTIONARY, true);
                    jobConf.setEnum(WRITER_VERSION, version);
                    writeParquetColumn(jobConf, tempFile.getFile(), compressionCodecName,
                            createTableProperties(columnNames, objectInspectors),
                            getStandardStructObjectInspector(columnNames, objectInspectors),
                            getIterators(writeValues), parquetSchema, singleLevelArray);
                    assertFileContents(session, tempFile.getFile(), getIterators(readValues), columnNames,
                            columnTypes);
                }//from   w ww.  j  a va 2s .co m
            }
        }
    }
}

From source file:io.prestosql.plugin.hive.parquet.ParquetTester.java

License:Apache License

void assertMaxReadBytes(List<ObjectInspector> objectInspectors, Iterable<?>[] writeValues,
        Iterable<?>[] readValues, List<String> columnNames, List<Type> columnTypes,
        Optional<MessageType> parquetSchema, DataSize maxReadBlockSize) throws Exception {
    WriterVersion version = PARQUET_1_0;
    CompressionCodecName compressionCodecName = UNCOMPRESSED;
    HiveClientConfig config = new HiveClientConfig().setHiveStorageFormat(HiveStorageFormat.PARQUET)
            .setUseParquetColumnNames(false).setParquetMaxReadBlockSize(maxReadBlockSize);
    ConnectorSession session = new TestingConnectorSession(
            new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig())
                    .getSessionProperties());

    try (TempFile tempFile = new TempFile("test", "parquet")) {
        JobConf jobConf = new JobConf();
        jobConf.setEnum(COMPRESSION, compressionCodecName);
        jobConf.setBoolean(ENABLE_DICTIONARY, true);
        jobConf.setEnum(WRITER_VERSION, version);
        writeParquetColumn(jobConf, tempFile.getFile(), compressionCodecName,
                createTableProperties(columnNames, objectInspectors),
                getStandardStructObjectInspector(columnNames, objectInspectors), getIterators(writeValues),
                parquetSchema, false);/*  ww w .j ava2s  .com*/

        Iterator<?>[] expectedValues = getIterators(readValues);
        try (ConnectorPageSource pageSource = getFileFormat().createFileFormatReader(session, HDFS_ENVIRONMENT,
                tempFile.getFile(), columnNames, columnTypes)) {
            assertPageSource(columnTypes, expectedValues, pageSource,
                    Optional.of(getParquetMaxReadBlockSize(session).toBytes()));
            assertFalse(stream(expectedValues).allMatch(Iterator::hasNext));
        }
    }
}