Example usage for org.apache.hadoop.fs Path getParent

List of usage examples for org.apache.hadoop.fs Path getParent

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getParent.

Prototype

public Path getParent() 

Source Link

Document

Returns the parent of a path or null if at root.

Usage

From source file:org.apache.accumulo.master.tableOps.bulkVer1.CleanUpBulkImport.java

License:Apache License

@Override
public Repo<Master> call(long tid, Master master) throws Exception {
    master.updateBulkImportStatus(source, BulkImportState.CLEANUP);
    log.debug("removing the bulkDir processing flag file in " + bulk);
    Path bulkDir = new Path(bulk);
    MetadataTableUtil.removeBulkLoadInProgressFlag(master,
            "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
    log.debug("removing the metadata table markers for loaded files");
    Connector conn = master.getConnector();
    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
    log.debug("releasing HDFS reservations for " + source + " and " + error);
    Utils.unreserveHdfsDirectory(source, tid);
    Utils.unreserveHdfsDirectory(error, tid);
    Utils.getReadLock(tableId, tid).unlock();
    log.debug("completing bulkDir import transaction " + tid);
    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
    master.removeBulkImportStatus(source);
    return null;//w  w w.j av a 2s  .co m
}

From source file:org.apache.accumulo.master.tableOps.bulkVer2.BulkImportMove.java

License:Apache License

/**
 * For every entry in renames, move the file from the key path to the value path
 *///w  ww.jav  a 2 s  . c o  m
private void moveFiles(String fmtTid, Path sourceDir, Path bulkDir, Master master, final VolumeManager fs,
        Map<String, String> renames) throws Exception {
    MetadataTableUtil.addBulkLoadInProgressFlag(master,
            "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());

    int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS);
    SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulkDir move");
    List<Future<Boolean>> results = new ArrayList<>();

    for (Map.Entry<String, String> renameEntry : renames.entrySet()) {
        results.add(workers.submit(() -> {
            final Path originalPath = new Path(sourceDir, renameEntry.getKey());
            Path newPath = new Path(bulkDir, renameEntry.getValue());
            Boolean success = fs.rename(originalPath, newPath);
            if (success && log.isTraceEnabled())
                log.trace("tid {} moved {} to {}", fmtTid, originalPath, newPath);
            return success;
        }));
    }
    workers.shutdown();
    while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {
    }

    for (Future<Boolean> future : results) {
        try {
            if (!future.get()) {
                throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null,
                        TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER,
                        "Failed to move files from " + bulkInfo.sourceDir);
            }
        } catch (ExecutionException ee) {
            throw new AcceptableThriftTableOperationException(bulkInfo.tableId.canonicalID(), null,
                    TableOperation.BULK_IMPORT, TableOperationExceptionType.OTHER, ee.getCause().getMessage());
        }
    }
}

From source file:org.apache.accumulo.master.tableOps.bulkVer2.CleanUpBulkImport.java

License:Apache License

@Override
public Repo<Master> call(long tid, Master master) throws Exception {
    log.debug("removing the bulkDir processing flag file in " + info.bulkDir);
    Path bulkDir = new Path(info.bulkDir);
    MetadataTableUtil.removeBulkLoadInProgressFlag(master,
            "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
    MetadataTableUtil.addDeleteEntry(master, info.tableId, bulkDir.toString());
    if (info.tableState == TableState.ONLINE) {
        log.debug("removing the metadata table markers for loaded files");
        Connector conn = master.getConnector();
        MetadataTableUtil.removeBulkLoadEntries(conn, info.tableId, tid);
    }//  w  w w  .  j  av a 2  s.co  m
    Utils.unreserveHdfsDirectory(info.sourceDir, tid);
    Utils.getReadLock(info.tableId, tid).unlock();
    // delete json renames and mapping files
    Path renamingFile = new Path(bulkDir, Constants.BULK_RENAME_FILE);
    Path mappingFile = new Path(bulkDir, Constants.BULK_LOAD_MAPPING);
    try {
        master.getFileSystem().delete(renamingFile);
        master.getFileSystem().delete(mappingFile);
    } catch (IOException ioe) {
        log.debug("Failed to delete renames and/or loadmap", ioe);
    }

    log.debug("completing bulkDir import transaction " + tid);
    if (info.tableState == TableState.ONLINE) {
        ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
    }
    return null;
}

From source file:org.apache.accumulo.master.tableOps.CleanUpBulkImport.java

License:Apache License

@Override
public Repo<Master> call(long tid, Master master) throws Exception {
    master.updateBulkImportStatus(source, BulkImportState.CLEANUP);
    log.debug("removing the bulk processing flag file in " + bulk);
    Path bulkDir = new Path(bulk);
    MetadataTableUtil.removeBulkLoadInProgressFlag(master,
            "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
    log.debug("removing the metadata table markers for loaded files");
    Connector conn = master.getConnector();
    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
    log.debug("releasing HDFS reservations for " + source + " and " + error);
    Utils.unreserveHdfsDirectory(source, tid);
    Utils.unreserveHdfsDirectory(error, tid);
    Utils.getReadLock(tableId, tid).unlock();
    log.debug("completing bulk import transaction " + tid);
    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
    master.removeBulkImportStatus(source);
    return null;/*from ww w  . j a  v a  2s  . co  m*/
}

From source file:org.apache.accumulo.server.fs.FileRef.java

License:Apache License

static Path extractSuffix(Path path) {
    String pstr = path.toString();
    int index = pstr.lastIndexOf(FileType.TABLE.getDirectory());
    if (index < 0)
        throw new IllegalArgumentException("Invalid table path " + pstr);

    try {/*w  w w  .j a v  a 2s  .c o  m*/
        Path parent = path.getParent().getParent();
        if (!parent.getName().equals(FileType.TABLE.getDirectory())
                && !parent.getParent().getName().equals(FileType.TABLE.getDirectory()))
            throw new IllegalArgumentException("Invalid table path " + pstr);
    } catch (NullPointerException npe) {
        throw new IllegalArgumentException("Invalid table path " + pstr);
    }

    return new Path(pstr.substring(index + FileType.TABLE.getDirectory().length() + 1));
}

From source file:org.apache.accumulo.server.fs.VolumeUtil.java

License:Apache License

private static String decommisionedTabletDir(AccumuloServerContext context, ZooLock zooLock, VolumeManager vm,
        KeyExtent extent, String metaDir) throws IOException {
    Path dir = new Path(metaDir);
    if (isActiveVolume(dir))
        return metaDir;

    if (!dir.getParent().getParent().getName().equals(ServerConstants.TABLE_DIR)) {
        throw new IllegalArgumentException("Unexpected table dir " + dir);
    }/*w w  w .  j  av a 2s  .co m*/

    Path newDir = new Path(vm.choose(Optional.of(extent.getTableId()), ServerConstants.getBaseUris())
            + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + dir.getParent().getName()
            + Path.SEPARATOR + dir.getName());

    log.info("Updating directory for " + extent + " from " + dir + " to " + newDir);
    if (extent.isRootTablet()) {
        // the root tablet is special case, its files need to be copied if its dir is changed

        // this code needs to be idempotent

        FileSystem fs1 = vm.getVolumeByPath(dir).getFileSystem();
        FileSystem fs2 = vm.getVolumeByPath(newDir).getFileSystem();

        if (!same(fs1, dir, fs2, newDir)) {
            if (fs2.exists(newDir)) {
                Path newDirBackup = getBackupName(fs2, newDir);
                // never delete anything because were dealing with the root tablet
                // one reason this dir may exist is because this method failed previously
                log.info("renaming " + newDir + " to " + newDirBackup);
                if (!fs2.rename(newDir, newDirBackup)) {
                    throw new IOException("Failed to rename " + newDir + " to " + newDirBackup);
                }
            }

            // do a lot of logging since this is the root tablet
            log.info("copying " + dir + " to " + newDir);
            if (!FileUtil.copy(fs1, dir, fs2, newDir, false, CachedConfiguration.getInstance())) {
                throw new IOException("Failed to copy " + dir + " to " + newDir);
            }

            // only set the new location in zookeeper after a successful copy
            log.info("setting root tablet location to " + newDir);
            MetadataTableUtil.setRootTabletDir(newDir.toString());

            // rename the old dir to avoid confusion when someone looks at filesystem... its ok if we fail here and this does not happen because the location in
            // zookeeper is the authority
            Path dirBackup = getBackupName(fs1, dir);
            log.info("renaming " + dir + " to " + dirBackup);
            fs1.rename(dir, dirBackup);

        } else {
            log.info("setting root tablet location to " + newDir);
            MetadataTableUtil.setRootTabletDir(newDir.toString());
        }

        return newDir.toString();
    } else {
        MetadataTableUtil.updateTabletDir(extent, newDir.toString(), context, zooLock);
        return newDir.toString();
    }
}

From source file:org.apache.accumulo.server.fs.VolumeUtil.java

License:Apache License

private static Path getBackupName(FileSystem fs, Path path) {
    return new Path(path.getParent(), path.getName() + "_" + System.currentTimeMillis() + "_"
            + (rand.nextInt(Integer.MAX_VALUE) + 1) + ".bak");
}

From source file:org.apache.accumulo.server.master.recovery.RecoveryPath.java

License:Apache License

public static Path getRecoveryPath(VolumeManager fs, Path walPath) throws IOException {
    if (walPath.depth() >= 3 && walPath.toUri().getScheme() != null) {
        // its a fully qualified path
        String uuid = walPath.getName();
        // drop uuid
        walPath = walPath.getParent();
        // recovered 1.4 WALs won't have a server component
        if (!walPath.getName().equals(FileType.WAL.getDirectory())) {
            // drop server
            walPath = walPath.getParent();
        }//w  w w .  ja v a  2  s  . c  o  m

        if (!walPath.getName().equals(FileType.WAL.getDirectory()))
            throw new IllegalArgumentException("Bad path " + walPath);

        // drop wal
        walPath = walPath.getParent();

        walPath = new Path(walPath, FileType.RECOVERY.getDirectory());
        walPath = new Path(walPath, uuid);

        return walPath;
    }

    throw new IllegalArgumentException("Bad path " + walPath);

}

From source file:org.apache.accumulo.server.master.tableOps.BulkImport.java

License:Apache License

private String prepareBulkImport(VolumeManager fs, String dir, String tableId) throws IOException {
    Path bulkDir = createNewBulkDir(fs, tableId);

    MetadataTableUtil.addBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());

    Path dirPath = new Path(dir);
    FileStatus[] mapFiles = fs.listStatus(dirPath);

    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();

    for (FileStatus fileStatus : mapFiles) {
        String sa[] = fileStatus.getPath().getName().split("\\.");
        String extension = "";
        if (sa.length > 1) {
            extension = sa[sa.length - 1];

            if (!FileOperations.getValidExtensions().contains(extension)) {
                log.warn(fileStatus.getPath() + " does not have a valid extension, ignoring");
                continue;
            }/*from w ww .  jav a  2 s. c  om*/
        } else {
            // assume it is a map file
            extension = Constants.MAPFILE_EXTENSION;
        }

        if (extension.equals(Constants.MAPFILE_EXTENSION)) {
            if (!fileStatus.isDir()) {
                log.warn(fileStatus.getPath() + " is not a map file, ignoring");
                continue;
            }

            if (fileStatus.getPath().getName().equals("_logs")) {
                log.info(
                        fileStatus.getPath() + " is probably a log directory from a map/reduce task, skipping");
                continue;
            }
            try {
                FileStatus dataStatus = fs
                        .getFileStatus(new Path(fileStatus.getPath(), MapFile.DATA_FILE_NAME));
                if (dataStatus.isDir()) {
                    log.warn(fileStatus.getPath() + " is not a map file, ignoring");
                    continue;
                }
            } catch (FileNotFoundException fnfe) {
                log.warn(fileStatus.getPath() + " is not a map file, ignoring");
                continue;
            }
        }

        String newName = "I" + namer.getNextName() + "." + extension;
        Path newPath = new Path(bulkDir, newName);
        try {
            fs.rename(fileStatus.getPath(), newPath);
            log.debug("Moved " + fileStatus.getPath() + " to " + newPath);
        } catch (IOException E1) {
            log.error("Could not move: " + fileStatus.getPath().toString() + " " + E1.getMessage());
        }
    }
    return bulkDir.toString();
}

From source file:org.apache.accumulo.server.master.tableOps.BulkImport.java

License:Apache License

@Override
public Repo<Master> call(long tid, Master master) throws Exception {
    log.debug("removing the bulk processing flag file in " + bulk);
    Path bulkDir = new Path(bulk);
    MetadataTableUtil/* w ww  .j a va  2  s.  co m*/
            .removeBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
    MetadataTableUtil.addDeleteEntry(tableId, "/" + bulkDir.getName());
    log.debug("removing the metadata table markers for loaded files");
    Connector conn = master.getConnector();
    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
    log.debug("releasing HDFS reservations for " + source + " and " + error);
    Utils.unreserveHdfsDirectory(source, tid);
    Utils.unreserveHdfsDirectory(error, tid);
    Utils.getReadLock(tableId, tid).unlock();
    log.debug("completing bulk import transaction " + tid);
    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
    return null;
}