Example usage for org.apache.hadoop.fs Path toString

List of usage examples for org.apache.hadoop.fs Path toString

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:com.facebook.presto.hive.metastore.file.FileHiveMetastore.java

License:Apache License

@Override
public synchronized Optional<Database> getDatabase(String databaseName) {
    requireNonNull(databaseName, "databaseName is null");

    Path databaseMetadataDirectory = getDatabaseMetadataDirectory(databaseName);
    return readSchemaFile("database", databaseMetadataDirectory, databaseCodec)
            .map(databaseMetadata -> databaseMetadata.toDatabase(databaseName,
                    databaseMetadataDirectory.toString()));
}

From source file:com.facebook.presto.hive.metastore.file.FileHiveMetastore.java

License:Apache License

@Override
public synchronized Optional<Table> getTable(String databaseName, String tableName) {
    requireNonNull(databaseName, "databaseName is null");
    requireNonNull(tableName, "tableName is null");

    Path tableMetadataDirectory = getTableMetadataDirectory(databaseName, tableName);
    return readSchemaFile("table", tableMetadataDirectory, tableCodec).map(
            tableMetadata -> tableMetadata.toTable(databaseName, tableName, tableMetadataDirectory.toString()));
}

From source file:com.facebook.presto.hive.metastore.file.FileHiveMetastore.java

License:Apache License

@Override
public synchronized Optional<Partition> getPartition(String databaseName, String tableName,
        List<String> partitionValues) {
    requireNonNull(databaseName, "databaseName is null");
    requireNonNull(tableName, "tableName is null");
    requireNonNull(partitionValues, "partitionValues is null");

    Optional<Table> tableReference = getTable(databaseName, tableName);
    if (!tableReference.isPresent()) {
        return Optional.empty();
    }/*from   www  .jav  a 2s.  c  om*/
    Table table = tableReference.get();

    Path partitionDirectory = getPartitionMetadataDirectory(table, partitionValues);
    return readSchemaFile("partition", partitionDirectory, partitionCodec)
            .map(partitionMetadata -> partitionMetadata.toPartition(databaseName, tableName, partitionValues,
                    partitionDirectory.toString()));
}

From source file:com.facebook.presto.hive.metastore.glue.GlueHiveMetastore.java

License:Apache License

private static void deleteDir(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path,
        boolean recursive) {
    try {/*from  w  w w. j ava2s.  co m*/
        hdfsEnvironment.getFileSystem(context, path).delete(path, recursive);
    } catch (Exception e) {
        // don't fail if unable to delete path
        log.warn(e, "Failed to delete path: " + path.toString());
    }
}

From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java

License:Apache License

private void recursiveDeleteFilesAndLog(String user, Path directory, List<String> filePrefixes,
        boolean deleteEmptyDirectories, String reason) {
    RecursiveDeleteResult recursiveDeleteResult = recursiveDeleteFiles(hdfsEnvironment, user, directory,
            filePrefixes, deleteEmptyDirectories);
    if (!recursiveDeleteResult.getNotDeletedEligibleItems().isEmpty()) {
        logCleanupFailure("Error deleting directory %s for %s. Some eligible items can not be deleted: %s.",
                directory.toString(), reason, recursiveDeleteResult.getNotDeletedEligibleItems());
    } else if (deleteEmptyDirectories && !recursiveDeleteResult.isDirectoryNoLongerExists()) {
        logCleanupFailure("Error deleting directory %s for %s. Can not delete the directory.",
                directory.toString(), reason);
    }//from  w w w  .  j  av  a  2  s. c om
}

From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java

License:Apache License

/**
 * Attempt to recursively remove eligible files and/or directories in {@code directory}.
 *
 * When {@code filePrefixes} is not present, all files (but not necessarily directories) will be
 * ineligible. If all files shall be deleted, you can use an empty string as {@code filePrefixes}.
 *
 * When {@code deleteEmptySubDirectory} is true, any empty directory (including directories that
 * were originally empty, and directories that become empty after files prefixed with
 * {@code filePrefixes} are deleted) will be eligible.
 *
 * This method will not delete anything that's neither a directory nor a file.
 *
 * @param filePrefixes  prefix of files that should be deleted
 * @param deleteEmptyDirectories  whether empty directories should be deleted
 *//*from   w ww.  j  a va2s .  co  m*/
private static RecursiveDeleteResult recursiveDeleteFiles(HdfsEnvironment hdfsEnvironment, String user,
        Path directory, List<String> filePrefixes, boolean deleteEmptyDirectories) {
    FileSystem fileSystem;
    try {
        fileSystem = hdfsEnvironment.getFileSystem(user, directory);

        if (!fileSystem.exists(directory)) {
            return new RecursiveDeleteResult(true, ImmutableList.of());
        }
    } catch (IOException e) {
        ImmutableList.Builder<String> notDeletedItems = ImmutableList.builder();
        notDeletedItems.add(directory.toString() + "/**");
        return new RecursiveDeleteResult(false, notDeletedItems.build());
    }

    return doRecursiveDeleteFiles(fileSystem, directory, filePrefixes, deleteEmptyDirectories);
}

From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java

License:Apache License

private static RecursiveDeleteResult doRecursiveDeleteFiles(FileSystem fileSystem, Path directory,
        List<String> filePrefixes, boolean deleteEmptyDirectories) {
    FileStatus[] allFiles;//from   www.j a va 2s  .  c  o  m
    try {
        allFiles = fileSystem.listStatus(directory);
    } catch (IOException e) {
        ImmutableList.Builder<String> notDeletedItems = ImmutableList.builder();
        notDeletedItems.add(directory.toString() + "/**");
        return new RecursiveDeleteResult(false, notDeletedItems.build());
    }

    boolean allDescendentsDeleted = true;
    ImmutableList.Builder<String> notDeletedEligibleItems = ImmutableList.builder();
    for (FileStatus fileStatus : allFiles) {
        if (HadoopFileStatus.isFile(fileStatus)) {
            Path filePath = fileStatus.getPath();
            String fileName = filePath.getName();
            boolean eligible = false;
            for (String filePrefix : filePrefixes) {
                if (fileName.startsWith(filePrefix)) {
                    eligible = true;
                    break;
                }
            }
            if (eligible) {
                if (!deleteIfExists(fileSystem, filePath, false)) {
                    allDescendentsDeleted = false;
                    notDeletedEligibleItems.add(filePath.toString());
                }
            } else {
                allDescendentsDeleted = false;
            }
        } else if (HadoopFileStatus.isDirectory(fileStatus)) {
            RecursiveDeleteResult subResult = doRecursiveDeleteFiles(fileSystem, fileStatus.getPath(),
                    filePrefixes, deleteEmptyDirectories);
            if (!subResult.isDirectoryNoLongerExists()) {
                allDescendentsDeleted = false;
            }
            if (!subResult.getNotDeletedEligibleItems().isEmpty()) {
                notDeletedEligibleItems.addAll(subResult.getNotDeletedEligibleItems());
            }
        } else {
            allDescendentsDeleted = false;
            notDeletedEligibleItems.add(fileStatus.getPath().toString());
        }
    }
    if (allDescendentsDeleted && deleteEmptyDirectories) {
        verify(notDeletedEligibleItems.build().isEmpty());
        if (!deleteIfExists(fileSystem, directory, false)) {
            return new RecursiveDeleteResult(false, ImmutableList.of(directory.toString() + "/"));
        }
        return new RecursiveDeleteResult(true, ImmutableList.of());
    }
    return new RecursiveDeleteResult(false, notDeletedEligibleItems.build());
}

From source file:com.facebook.presto.hive.orc.OrcPageSourceFactory.java

License:Apache License

public static OrcPageSource createOrcPageSource(MetadataReader metadataReader, HdfsEnvironment hdfsEnvironment,
        String sessionUser, Configuration configuration, Path path, long start, long length,
        List<HiveColumnHandle> columns, boolean useOrcColumnNames,
        TupleDomain<HiveColumnHandle> effectivePredicate, DateTimeZone hiveStorageTimeZone,
        TypeManager typeManager, DataSize maxMergeDistance, DataSize maxBufferSize, DataSize streamBufferSize,
        boolean orcBloomFiltersEnabled) {
    OrcDataSource orcDataSource;/*from w w w .j a va 2 s  .  c om*/
    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(sessionUser, path, configuration);
        long size = fileSystem.getFileStatus(path).getLen();
        FSDataInputStream inputStream = fileSystem.open(path);
        orcDataSource = new HdfsOrcDataSource(path.toString(), size, maxMergeDistance, maxBufferSize,
                streamBufferSize, inputStream);
    } catch (Exception e) {
        if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed")
                || e instanceof FileNotFoundException) {
            throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, splitError(e, path, start, length), e);
    }

    AggregatedMemoryContext systemMemoryUsage = new AggregatedMemoryContext();
    try {
        OrcReader reader = new OrcReader(orcDataSource, metadataReader, maxMergeDistance, maxBufferSize);

        List<HiveColumnHandle> physicalColumns = getPhysicalHiveColumnHandles(columns, useOrcColumnNames,
                reader, path);
        ImmutableMap.Builder<Integer, Type> includedColumns = ImmutableMap.builder();
        ImmutableList.Builder<ColumnReference<HiveColumnHandle>> columnReferences = ImmutableList.builder();
        for (HiveColumnHandle column : physicalColumns) {
            if (column.getColumnType() == REGULAR) {
                Type type = typeManager.getType(column.getTypeSignature());
                includedColumns.put(column.getHiveColumnIndex(), type);
                columnReferences.add(new ColumnReference<>(column, column.getHiveColumnIndex(), type));
            }
        }

        OrcPredicate predicate = new TupleDomainOrcPredicate<>(effectivePredicate, columnReferences.build(),
                orcBloomFiltersEnabled);

        OrcRecordReader recordReader = reader.createRecordReader(includedColumns.build(), predicate, start,
                length, hiveStorageTimeZone, systemMemoryUsage);

        return new OrcPageSource(recordReader, orcDataSource, physicalColumns, typeManager, systemMemoryUsage);
    } catch (Exception e) {
        try {
            orcDataSource.close();
        } catch (IOException ignored) {
        }
        if (e instanceof PrestoException) {
            throw (PrestoException) e;
        }
        String message = splitError(e, path, start, length);
        if (e.getClass().getSimpleName().equals("BlockMissingException")) {
            throw new PrestoException(HIVE_MISSING_DATA, message, e);
        }
        throw new PrestoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
    }
}

From source file:com.facebook.presto.hive.OrcFileWriterFactory.java

License:Apache License

@Override
public Optional<HiveFileWriter> createFileWriter(Path path, List<String> inputColumnNames,
        StorageFormat storageFormat, Properties schema, JobConf configuration, ConnectorSession session) {
    if (!HiveSessionProperties.isOrcOptimizedWriterEnabled(session)) {
        return Optional.empty();
    }//from w w w .j a v  a  2  s .c o m

    boolean isDwrf;
    if (OrcOutputFormat.class.getName().equals(storageFormat.getOutputFormat())) {
        isDwrf = false;
    } else if (com.facebook.hive.orc.OrcOutputFormat.class.getName().equals(storageFormat.getOutputFormat())) {
        isDwrf = true;
    } else {
        return Optional.empty();
    }

    CompressionKind compression = getCompression(schema, configuration);

    // existing tables and partitions may have columns in a different order than the writer is providing, so build
    // an index to rearrange columns in the proper order
    List<String> fileColumnNames = Splitter.on(',').trimResults().omitEmptyStrings()
            .splitToList(schema.getProperty(META_TABLE_COLUMNS, ""));
    List<Type> fileColumnTypes = toHiveTypes(schema.getProperty(META_TABLE_COLUMN_TYPES, "")).stream()
            .map(hiveType -> hiveType.getType(typeManager)).collect(toList());

    int[] fileInputColumnIndexes = fileColumnNames.stream().mapToInt(inputColumnNames::indexOf).toArray();

    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getUser(), path, configuration);
        OutputStream outputStream = fileSystem.create(path);

        Optional<Supplier<OrcDataSource>> validationInputFactory = Optional.empty();
        if (HiveSessionProperties.isOrcOptimizedWriterValidate(session)) {
            validationInputFactory = Optional.of(() -> {
                try {
                    return new HdfsOrcDataSource(new OrcDataSourceId(path.toString()),
                            fileSystem.getFileStatus(path).getLen(), getOrcMaxMergeDistance(session),
                            getOrcMaxBufferSize(session), getOrcStreamBufferSize(session), false,
                            fileSystem.open(path), stats);
                } catch (IOException e) {
                    throw new PrestoException(HIVE_WRITE_VALIDATION_FAILED, e);
                }
            });
        }

        Callable<Void> rollbackAction = () -> {
            fileSystem.delete(path, false);
            return null;
        };

        return Optional.of(new OrcFileWriter(outputStream, rollbackAction, isDwrf, fileColumnNames,
                fileColumnTypes, compression, fileInputColumnIndexes,
                ImmutableMap.<String, String>builder()
                        .put(HiveMetadata.PRESTO_VERSION_NAME, nodeVersion.toString())
                        .put(HiveMetadata.PRESTO_QUERY_ID_NAME, session.getQueryId()).build(),
                hiveStorageTimeZone, validationInputFactory));
    } catch (IOException e) {
        throw new PrestoException(HIVE_WRITER_OPEN_ERROR, "Error creating ORC file", e);
    }
}

From source file:com.facebook.presto.hive.parquet.HdfsParquetDataSource.java

License:Apache License

public HdfsParquetDataSource(Path path, long size, FSDataInputStream inputStream) {
    this.name = path.toString();
    this.size = size;
    this.inputStream = inputStream;
}