Example usage for org.apache.hadoop.fs Path equals

List of usage examples for org.apache.hadoop.fs Path equals

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path equals.

Prototype

@Override
    public boolean equals(Object o) 

Source Link

Usage

From source file:fuse4j.hadoopfs.HdfsClientImpl.java

License:Apache License

@Override
public boolean rename(int uid, String src, String dst) {
    FileSystem dfs = null;// ww w  .  j  a v  a 2 s  .c o  m
    try {
        dfs = getDfs(uid);
        Path srcPath = new Path(src);
        Path dstPath = new Path(dst);
        if (srcPath.equals(dstPath)) {
            //source and destination are the same path
            return false;
        }
        if (dfs.isFile(dstPath) && dfs.isFile(srcPath)) {
            //TODO: temporary fix to overwrite files
            //delete destination file if exists.
            //"HDFS-654"  fixes the problem allowing atomic rename when dst exists
            dfs.delete(dstPath);
        }
        return dfs.rename(srcPath, dstPath);
    } catch (Exception ioe) {
        // fall through to failure
        System.out.println(ioe);
    }
    return false;
}

From source file:gobblin.data.management.retention.dataset.DatasetBase.java

License:Open Source License

private void deleteEmptyParentDirectories(Path datasetRoot, Path parent) throws IOException {
    if (!parent.equals(datasetRoot) && this.fs.listStatus(parent).length == 0) {
        this.fs.delete(parent, false);
        deleteEmptyParentDirectories(datasetRoot, parent.getParent());
    }/*from w w  w  . j  a  v  a  2s  . c o m*/
}

From source file:gobblin.source.extractor.hadoop.HadoopFsHelper.java

License:Apache License

public void lsr(Path p, List<String> results) throws IOException {
    if (!this.fs.getFileStatus(p).isDirectory()) {
        results.add(p.toString());//from www . j  av  a2s. co m
    }
    Path qualifiedPath = this.fs.makeQualified(p);
    for (FileStatus status : this.fs.listStatus(p)) {
        if (status.isDirectory()) {
            // Fix for hadoop issue: https://issues.apache.org/jira/browse/HADOOP-12169
            if (!qualifiedPath.equals(status.getPath())) {
                lsr(status.getPath(), results);
            }
        } else {
            results.add(status.getPath().toString());
        }
    }
}

From source file:io.pravega.segmentstore.storage.impl.hdfs.MockFileSystem.java

License:Open Source License

@Override
public FileStatus getFileStatus(Path f) throws IOException {
    if (f.equals(root.getPath())) {
        return root;
    }/*w w w. ja v  a  2s  .c o  m*/
    return getFileData(f).getStatus();
}

From source file:io.prestosql.plugin.hive.AbstractTestHiveClient.java

License:Apache License

/**
 * @return query id/*  w w  w  .java  2  s.co m*/
 */
private String insertData(SchemaTableName tableName, MaterializedResult data) throws Exception {
    Path writePath;
    Path targetPath;
    String queryId;
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
        queryId = session.getQueryId();
        writePath = getStagingPathRoot(insertTableHandle);
        targetPath = getTargetPathRoot(insertTableHandle);

        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session,
                insertTableHandle);

        // write data
        sink.appendPage(data.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());

        // commit the insert
        metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
        transaction.commit();
    }

    // check that temporary files are removed
    if (!writePath.equals(targetPath)) {
        HdfsContext context = new HdfsContext(newSession(), tableName.getSchemaName(),
                tableName.getTableName());
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, writePath);
        assertFalse(fileSystem.exists(writePath));
    }

    return queryId;
}

From source file:io.prestosql.plugin.hive.AbstractTestHiveClient.java

License:Apache License

private void doTestTransactionDeleteInsert(HiveStorageFormat storageFormat, SchemaTableName tableName,
        Domain domainToDrop, MaterializedResult insertData, MaterializedResult expectedData,
        TransactionDeleteInsertTestTag tag, boolean expectQuerySucceed,
        Optional<ConflictTrigger> conflictTrigger) throws Exception {
    Path writePath = null;
    Path targetPath = null;//from   ww  w  . ja v a  2s .c  o m

    try (Transaction transaction = newTransaction()) {
        try {
            ConnectorMetadata metadata = transaction.getMetadata();
            ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
            ConnectorSession session;
            rollbackIfEquals(tag, ROLLBACK_RIGHT_AWAY);

            // Query 1: delete
            session = newSession();
            HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle)
                    .get("pk2");
            TupleDomain<ColumnHandle> tupleDomain = TupleDomain
                    .withColumnDomains(ImmutableMap.of(dsColumnHandle, domainToDrop));
            Constraint<ColumnHandle> constraint = new Constraint<>(tupleDomain,
                    convertToPredicate(tupleDomain));
            List<ConnectorTableLayoutResult> tableLayoutResults = metadata.getTableLayouts(session, tableHandle,
                    constraint, Optional.empty());
            ConnectorTableLayoutHandle tableLayoutHandle = getOnlyElement(tableLayoutResults).getTableLayout()
                    .getHandle();
            metadata.metadataDelete(session, tableHandle, tableLayoutHandle);
            rollbackIfEquals(tag, ROLLBACK_AFTER_DELETE);

            // Query 2: insert
            session = newSession();
            ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
            rollbackIfEquals(tag, ROLLBACK_AFTER_BEGIN_INSERT);
            writePath = getStagingPathRoot(insertTableHandle);
            targetPath = getTargetPathRoot(insertTableHandle);
            ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(),
                    session, insertTableHandle);
            sink.appendPage(insertData.toPage());
            rollbackIfEquals(tag, ROLLBACK_AFTER_APPEND_PAGE);
            Collection<Slice> fragments = getFutureValue(sink.finish());
            rollbackIfEquals(tag, ROLLBACK_AFTER_SINK_FINISH);
            metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
            rollbackIfEquals(tag, ROLLBACK_AFTER_FINISH_INSERT);

            assertEquals(tag, COMMIT);

            if (conflictTrigger.isPresent()) {
                JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
                List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes)
                        .map(partitionUpdateCodec::fromJson).collect(toList());
                conflictTrigger.get().triggerConflict(session, tableName, insertTableHandle, partitionUpdates);
            }
            transaction.commit();
            if (conflictTrigger.isPresent()) {
                assertTrue(expectQuerySucceed);
                conflictTrigger.get().verifyAndCleanup(tableName);
            }
        } catch (TestingRollbackException e) {
            transaction.rollback();
        } catch (PrestoException e) {
            assertFalse(expectQuerySucceed);
            if (conflictTrigger.isPresent()) {
                conflictTrigger.get().verifyAndCleanup(tableName);
            }
        }
    }

    // check that temporary files are removed
    if (writePath != null && !writePath.equals(targetPath)) {
        HdfsContext context = new HdfsContext(newSession(), tableName.getSchemaName(),
                tableName.getTableName());
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, writePath);
        assertFalse(fileSystem.exists(writePath));
    }

    try (Transaction transaction = newTransaction()) {
        // verify partitions
        List<String> partitionNames = transaction.getMetastore(tableName.getSchemaName())
                .getPartitionNames(tableName.getSchemaName(), tableName.getTableName())
                .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
        assertEqualsIgnoreOrder(partitionNames,
                expectedData.getMaterializedRows().stream()
                        .map(row -> format("pk1=%s/pk2=%s", row.getField(1), row.getField(2))).distinct()
                        .collect(toList()));

        // load the new table
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(
                metadata.getColumnHandles(session, tableHandle).values());

        // verify the data
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session,
                TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(result.getMaterializedRows(), expectedData.getMaterializedRows());
    }
}

From source file:io.prestosql.plugin.hive.LocationHandle.java

License:Apache License

public LocationHandle(Path targetPath, Path writePath, boolean isExistingTable, WriteMode writeMode) {
    if (writeMode.isWritePathSameAsTargetPath() && !targetPath.equals(writePath)) {
        throw new IllegalArgumentException(
                format("targetPath is expected to be same as writePath for writeMode %s", writeMode));
    }/*from   w  w  w .  j av a2 s  .  c o  m*/
    this.targetPath = requireNonNull(targetPath, "targetPath is null");
    this.writePath = requireNonNull(writePath, "writePath is null");
    this.isExistingTable = isExistingTable;
    this.writeMode = requireNonNull(writeMode, "writeMode is null");
}

From source file:io.prestosql.plugin.hive.metastore.file.FileHiveMetastore.java

License:Apache License

@Override
public synchronized void createTable(Table table, PrincipalPrivileges principalPrivileges) {
    verifyTableNotExists(table.getDatabaseName(), table.getTableName());

    Path tableMetadataDirectory = getTableMetadataDirectory(table);

    // validate table location
    if (table.getTableType().equals(VIRTUAL_VIEW.name())) {
        checkArgument(table.getStorage().getLocation().isEmpty(), "Storage location for view must be empty");
    } else if (table.getTableType().equals(MANAGED_TABLE.name())) {
        if (!tableMetadataDirectory.equals(new Path(table.getStorage().getLocation()))) {
            throw new PrestoException(HIVE_METASTORE_ERROR,
                    "Table directory must be " + tableMetadataDirectory);
        }/*from   ww  w. j  ava 2  s. co  m*/
    } else if (table.getTableType().equals(EXTERNAL_TABLE.name())) {
        try {
            Path externalLocation = new Path(table.getStorage().getLocation());
            FileSystem externalFileSystem = hdfsEnvironment.getFileSystem(hdfsContext, externalLocation);
            if (!externalFileSystem.isDirectory(externalLocation)) {
                throw new PrestoException(HIVE_METASTORE_ERROR, "External table location does not exist");
            }
            if (isChildDirectory(catalogDirectory, externalLocation)) {
                throw new PrestoException(HIVE_METASTORE_ERROR,
                        "External table location can not be inside the system metadata directory");
            }
        } catch (IOException e) {
            throw new PrestoException(HIVE_METASTORE_ERROR, "Could not validate external location", e);
        }
    } else {
        throw new PrestoException(NOT_SUPPORTED, "Table type not supported: " + table.getTableType());
    }

    writeSchemaFile("table", tableMetadataDirectory, tableCodec, new TableMetadata(table), false);

    for (Entry<String, Collection<HivePrivilegeInfo>> entry : principalPrivileges.getUserPrivileges().asMap()
            .entrySet()) {
        setTablePrivileges(new PrestoPrincipal(USER, entry.getKey()), table.getDatabaseName(),
                table.getTableName(), entry.getValue());
    }
    for (Entry<String, Collection<HivePrivilegeInfo>> entry : principalPrivileges.getRolePrivileges().asMap()
            .entrySet()) {
        setTablePrivileges(new PrestoPrincipal(ROLE, entry.getKey()), table.getDatabaseName(),
                table.getTableName(), entry.getValue());
    }
}

From source file:oracle.kv.hadoop.hive.table.TableHiveInputSplit.java

License:Open Source License

@Override
public boolean equals(Object obj) {
    if (!(obj instanceof TableHiveInputSplit)) {
        return false;
    }//from   w  w  w. j  a v  a2 s .  c  o m

    if (obj == this) {
        return true;
    }

    final TableHiveInputSplit obj1 = this;
    final TableHiveInputSplit obj2 = (TableHiveInputSplit) obj;

    final Path path1 = obj1.getPath();
    final Path path2 = obj2.getPath();

    if (path1 != null) {
        if (!path1.equals(path2)) {
            return false;
        }
    } else {
        if (path2 != null) {
            return false;
        }
    }
    return obj1.v2Split.equals(obj2.v2Split);
}

From source file:org.apache.accumulo.server.fs.VolumeUtil.java

License:Apache License

public static String switchVolume(String path, FileType ft, List<Pair<Path, Path>> replacements) {
    if (replacements.size() == 0) {
        log.trace("Not switching volume because there are no replacements");
        return null;
    }/*from   w w w  .  j  ava 2  s  . c  o  m*/

    if (!path.contains(":")) {
        // ignore relative paths
        return null;
    }

    Path p = new Path(path);

    // removing slash because new Path("hdfs://nn1").equals(new Path("hdfs://nn1/")) evaluates to false
    Path volume = removeTrailingSlash(ft.getVolume(p));

    for (Pair<Path, Path> pair : replacements) {
        Path key = removeTrailingSlash(pair.getFirst());

        if (key.equals(volume)) {
            String replacement = new Path(pair.getSecond(), ft.removeVolume(p)).toString();
            log.trace("Replacing " + path + " with " + replacement);
            return replacement;
        }
    }

    log.trace("Could not find replacement for " + ft + " at " + path);

    return null;
}