Example usage for org.apache.hadoop.hdfs.client HdfsAdmin HdfsAdmin

List of usage examples for org.apache.hadoop.hdfs.client HdfsAdmin HdfsAdmin

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.client HdfsAdmin HdfsAdmin.

Prototype

public HdfsAdmin(URI uri, Configuration conf) throws IOException 

Source Link

Document

Create a new HdfsAdmin client.

Usage

From source file:alluxio.underfs.hdfs.activesync.SupportedHdfsActiveSyncProvider.java

License:Apache License

/**
 * Constructor for supported Hdfs Active Sync Provider.
 *
 * @param uri the hdfs uri/*from   w w w .  java 2s . c  o m*/
 * @param conf the hdfs conf
 * @param ufsConf Alluxio UFS configuration
 */
public SupportedHdfsActiveSyncProvider(URI uri, org.apache.hadoop.conf.Configuration conf,
        UnderFileSystemConfiguration ufsConf) throws IOException {
    mHdfsAdmin = new HdfsAdmin(uri, conf);
    mChangedFiles = new ConcurrentHashMap<>();
    mActivity = new ConcurrentHashMap<>();
    mAge = new ConcurrentHashMap<>();
    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
    mReadLock = lock.readLock();
    mWriteLock = lock.writeLock();
    mExecutorService = Executors.newFixedThreadPool(
            ufsConf.getInt(PropertyKey.MASTER_UFS_ACTIVE_SYNC_THREAD_POOL_SIZE),
            ThreadFactoryUtils.build("SupportedHdfsActiveSyncProvider-%d", true));
    mPollingThread = null;
    mUfsUriList = new CopyOnWriteArrayList<>();
    mEventMissed = false;
    mTxIdMap = new ConcurrentHashMap<>();
    mCurrentTxId = SyncInfo.INVALID_TXID;
    mActiveUfsSyncMaxActivity = ufsConf.getInt(PropertyKey.MASTER_UFS_ACTIVE_SYNC_MAX_ACTIVITIES);
    mActiveUfsSyncMaxAge = ufsConf.getInt(PropertyKey.MASTER_UFS_ACTIVE_SYNC_MAX_AGE);
    mActiveUfsPollTimeoutMs = ufsConf.getMs(PropertyKey.MASTER_UFS_ACTIVE_SYNC_POLL_TIMEOUT);
    mActiveUfsSyncEventRateInterval = ufsConf.getMs(PropertyKey.MASTER_UFS_ACTIVE_SYNC_EVENT_RATE_INTERVAL);
}

From source file:hdfs.jsr203.HadoopWatchKey.java

License:Apache License

public HadoopWatchKey(HadoopWatchService watcher, HadoopPath path) throws IOException {
    assert path != null;

    this.watcher = watcher;
    this.path = path;

    URI uri = path.getFileSystem().getHDFS().getUri();
    this.dfs = new HdfsAdmin(uri, path.getFileSystem().getHDFS().getConf());
    this.stream = this.dfs.getInotifyEventStream();
}

From source file:hdfs.jsr203.HadoopWatchService.java

License:Apache License

public HadoopWatchService(HadoopFileSystem fileSystem) throws IOException {
    this.fileSystem = fileSystem;
    HdfsAdmin dfs = new HdfsAdmin(fileSystem.getHDFS().getUri(), fileSystem.getHDFS().getConf());
    stream = dfs.getInotifyEventStream();
}

From source file:org.apache.impala.common.FileSystemUtil.java

License:Apache License

/**
 * Returns true if path p1 and path p2 are in the same encryption zone in HDFS.
 * Returns false if they are in different encryption zones or if either of the paths
 * are not on HDFS./*from  www. j  a  va  2  s  .co  m*/
 */
private static boolean arePathsInSameHdfsEncryptionZone(FileSystem fs, Path p1, Path p2) throws IOException {
    // Only distributed file systems have encryption zones.
    if (!isDistributedFileSystem(p1) || !isDistributedFileSystem(p2))
        return false;
    HdfsAdmin hdfsAdmin = new HdfsAdmin(fs.getUri(), CONF);
    EncryptionZone z1 = hdfsAdmin.getEncryptionZoneForPath(p1);
    EncryptionZone z2 = hdfsAdmin.getEncryptionZoneForPath(p2);
    if (z1 == null && z2 == null)
        return true;
    if (z1 == null || z2 == null)
        return false;
    return z1.equals(z2);
}

From source file:org.apache.nifi.processors.hadoop.inotify.GetHDFSEvents.java

License:Apache License

protected HdfsAdmin getHdfsAdmin() {
    try {//from w w  w  .j a  v a 2  s.c o m
        // Currently HdfsAdmin is the only public API that allows access to the inotify API. Because of this we need to have super user rights in HDFS.
        return new HdfsAdmin(getFileSystem().getUri(), getFileSystem().getConf());
    } catch (IOException e) {
        getLogger().error(
                "Unable to get and instance of HDFS admin. You must be an HDFS super user to view HDFS events.");
        throw new ProcessException(e);
    }
}