Example usage for org.apache.hadoop.hdfs.protocol CacheDirectiveEntry getStats

List of usage examples for org.apache.hadoop.hdfs.protocol CacheDirectiveEntry getStats

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol CacheDirectiveEntry getStats.

Prototype

public CacheDirectiveStats getStats() 

Source Link

Usage

From source file:com.mellanox.r4h.DistributedFileSystem.java

License:Apache License

/**
 * List cache directives. Incrementally fetches results from the server.
 * /*ww w.  java 2  s  .  c  o m*/
 * @param filter
 *            Filter parameters to use when listing the directives, null to
 *            list all directives visible to us.
 * @return A RemoteIterator which returns CacheDirectiveInfo objects.
 */
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws IOException {
    if (filter == null) {
        filter = new CacheDirectiveInfo.Builder().build();
    }
    if (filter.getPath() != null) {
        filter = new CacheDirectiveInfo.Builder(filter)
                .setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).build();
    }
    final RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(filter);
    return new RemoteIterator<CacheDirectiveEntry>() {
        @Override
        public boolean hasNext() throws IOException {
            return iter.hasNext();
        }

        @Override
        public CacheDirectiveEntry next() throws IOException {
            // Although the paths we get back from the NameNode should always be
            // absolute, we call makeQualified to add the scheme and authority of
            // this DistributedFilesystem.
            CacheDirectiveEntry desc = iter.next();
            CacheDirectiveInfo info = desc.getInfo();
            Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
            return new CacheDirectiveEntry(new CacheDirectiveInfo.Builder(info).setPath(p).build(),
                    desc.getStats());
        }
    };
}

From source file:org.apache.impala.util.HdfsCachingUtil.java

License:Apache License

/**
 * Waits on a cache directive to either complete or stop making progress. Progress is
 * checked by polling the HDFS caching stats every
 * DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS. We verify the request's
 * "currentBytesCached" is increasing compared to "bytesNeeded".
 * If "currentBytesCached" == "bytesNeeded" or if no progress is made for a
 * MAX_UNCHANGED_CACHING_REFRESH_INTERVALS, this function returns.
 *//*from  www  .ja  v a2s.c o  m*/
public static void waitForDirective(long directiveId) throws ImpalaRuntimeException {
    long bytesNeeded = 0L;
    long currentBytesCached = 0L;
    CacheDirectiveEntry cacheDir = getDirective(directiveId);
    if (cacheDir == null)
        return;

    bytesNeeded = cacheDir.getStats().getBytesNeeded();
    currentBytesCached = cacheDir.getStats().getBytesCached();
    if (LOG.isTraceEnabled()) {
        LOG.trace(String.format("Waiting on cache directive id: %d. Bytes " + "cached (%d) / needed (%d)",
                directiveId, currentBytesCached, bytesNeeded));
    }
    // All the bytes are cached, just return.
    if (bytesNeeded == currentBytesCached)
        return;

    // The refresh interval is how often HDFS will update cache directive stats. We use
    // this value to determine how frequently we should poll for changes.
    long hdfsRefreshIntervalMs = getDfs().getConf().getLong(
            DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
            DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
    Preconditions.checkState(hdfsRefreshIntervalMs > 0);

    // Loop until either MAX_UNCHANGED_CACHING_REFRESH_INTERVALS have passed with no
    // changes or all required data is cached.
    int unchangedCounter = 0;
    while (unchangedCounter < MAX_UNCHANGED_CACHING_REFRESH_INTERVALS) {
        long previousBytesCached = currentBytesCached;
        cacheDir = getDirective(directiveId);
        if (cacheDir == null)
            return;
        currentBytesCached = cacheDir.getStats().getBytesCached();
        bytesNeeded = cacheDir.getStats().getBytesNeeded();
        if (currentBytesCached == bytesNeeded) {
            if (LOG.isTraceEnabled()) {
                LOG.trace(String.format(
                        "Cache directive id: %d has completed." + "Bytes cached (%d) / needed (%d)",
                        directiveId, currentBytesCached, bytesNeeded));
            }
            return;
        }

        if (currentBytesCached == previousBytesCached) {
            ++unchangedCounter;
        } else {
            unchangedCounter = 0;
        }
        try {
            // Sleep for the refresh interval + a little bit more to ensure a full interval
            // has completed. A value of 25% the refresh interval was arbitrarily chosen.
            Thread.sleep((long) (hdfsRefreshIntervalMs * 1.25));
        } catch (InterruptedException e) {
            /* ignore */ }
    }
    LOG.warn(String.format(
            "No changes in cached bytes in: %d(ms). All data may not "
                    + "be cached. Final stats for cache directive id: %d. Bytes cached (%d)/needed " + "(%d)",
            hdfsRefreshIntervalMs * MAX_UNCHANGED_CACHING_REFRESH_INTERVALS, directiveId, currentBytesCached,
            bytesNeeded));
}