Example usage for org.apache.hadoop.hdfs.protocol CacheDirectiveEntry getInfo

List of usage examples for org.apache.hadoop.hdfs.protocol CacheDirectiveEntry getInfo

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol CacheDirectiveEntry getInfo.

Prototype

public CacheDirectiveInfo getInfo() 

Source Link

Usage

From source file:com.mellanox.r4h.DistributedFileSystem.java

License:Apache License

/**
 * List cache directives. Incrementally fetches results from the server.
 * //from   ww w  .  j ava  2  s .  c  om
 * @param filter
 *            Filter parameters to use when listing the directives, null to
 *            list all directives visible to us.
 * @return A RemoteIterator which returns CacheDirectiveInfo objects.
 */
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws IOException {
    if (filter == null) {
        filter = new CacheDirectiveInfo.Builder().build();
    }
    if (filter.getPath() != null) {
        filter = new CacheDirectiveInfo.Builder(filter)
                .setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).build();
    }
    final RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(filter);
    return new RemoteIterator<CacheDirectiveEntry>() {
        @Override
        public boolean hasNext() throws IOException {
            return iter.hasNext();
        }

        @Override
        public CacheDirectiveEntry next() throws IOException {
            // Although the paths we get back from the NameNode should always be
            // absolute, we call makeQualified to add the scheme and authority of
            // this DistributedFilesystem.
            CacheDirectiveEntry desc = iter.next();
            CacheDirectiveInfo info = desc.getInfo();
            Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
            return new CacheDirectiveEntry(new CacheDirectiveInfo.Builder(info).setPath(p).build(),
                    desc.getStats());
        }
    };
}

From source file:org.apache.impala.util.HdfsCachingUtil.java

License:Apache License

/**
 * Given a cache directive ID, returns the pool the directive is cached in.
 * Returns null if no outstanding cache directive match this ID.
 *//*from   w  w w.  ja v a  2 s.  co  m*/
public static String getCachePool(long directiveId) throws ImpalaRuntimeException {
    CacheDirectiveEntry entry = getDirective(directiveId);
    return entry == null ? null : entry.getInfo().getPool();
}

From source file:org.apache.impala.util.HdfsCachingUtil.java

License:Apache License

/**
 * Given a cache directive ID, returns the replication factor for the directive.
 * Returns null if no outstanding cache directives match this ID.
 *//*from  w ww.j a  v a  2s. co m*/
public static Short getCacheReplication(long directiveId) throws ImpalaRuntimeException {
    CacheDirectiveEntry entry = getDirective(directiveId);
    return entry != null ? entry.getInfo().getReplication() : null;
}

From source file:org.apache.impala.util.HdfsCachingUtil.java

License:Apache License

/**
 * Returns a boolean indicating if the given thrift caching operation would perform an
 * update on an already existing cache directive.
 *///from w  w w . java 2 s  . com
public static boolean isUpdateOp(THdfsCachingOp op, Map<String, String> params) throws ImpalaRuntimeException {

    Long directiveId = Long.parseLong(params.get(CACHE_DIR_ID_PROP_NAME));
    CacheDirectiveEntry entry = getDirective(directiveId);
    Preconditions.checkNotNull(entry);

    // Verify cache pool
    if (!op.getCache_pool_name().equals(entry.getInfo().getPool())) {
        return false;
    }

    // Check cache replication factor
    if ((op.isSetReplication() && op.getReplication() != entry.getInfo().getReplication())
            || (!op.isSetReplication() && entry.getInfo()
                    .getReplication() != JniCatalogConstants.HDFS_DEFAULT_CACHE_REPLICATION_FACTOR)) {
        return true;
    }
    return false;
}

From source file:org.apache.impala.util.HdfsCachingUtil.java

License:Apache License

/**
 * Validates the properties of the chosen cache pool. Throws on error.
 *///from   www  .  ja  v  a  2  s .c o  m
public static void validateCachePool(THdfsCachingOp op, Long directiveId, TableName table,
        HdfsPartition partition) throws ImpalaRuntimeException {

    CacheDirectiveEntry entry = getDirective(directiveId);
    Preconditions.checkNotNull(entry);

    if (!op.getCache_pool_name().equals(entry.getInfo().getPool())) {
        throw new ImpalaRuntimeException(String.format(
                "Cannot cache partition in "
                        + "pool '%s' because it is already cached in '%s'. To change the cache "
                        + "pool for this partition, first uncache using: ALTER TABLE %s.%s " + "%sSET UNCACHED",
                op.getCache_pool_name(), entry.getInfo().getPool(), table.getDb(), table,
                // Insert partition string if partition non null
                partition != null
                        ? String.format(" PARTITION(%s) ", partition.getPartitionName().replaceAll("/", ", "))
                        : ""));
    }
}

From source file:org.apache.impala.util.HdfsCachingUtil.java

License:Apache License

/**
 * Validates and returns true if a parameter map contains a cache directive ID and
 * validates it against the NameNode to make sure it exists. If the cache
 * directive ID does not exist, we remove the value from the parameter map,
 * issue a log message and return false. As the value is not written back to the
 * Hive MS from this method, the result will be only valid until the next metadata
 * fetch. Lastly, we update the cache replication factor in the parameters with the
 * value read from HDFS.//from   w  ww.ja  va 2 s.  co  m
 */
public static boolean validateCacheParams(Map<String, String> params) {
    Long directiveId = getCacheDirectiveId(params);
    if (directiveId == null)
        return false;

    CacheDirectiveEntry entry = null;
    try {
        entry = getDirective(directiveId);
    } catch (ImpalaRuntimeException e) {
        if (e.getCause() != null && e.getCause() instanceof RemoteException) {
            // This exception signals that the cache directive no longer exists.
            LOG.error("Cache directive does not exist", e);
            params.remove(CACHE_DIR_ID_PROP_NAME);
            params.remove(CACHE_DIR_REPLICATION_PROP_NAME);
        } else {
            // This exception signals that there was a connection problem with HDFS.
            LOG.error("IO Exception, possible connectivity issues with HDFS", e);
        }
        return false;
    }
    Preconditions.checkNotNull(entry);

    // On the upgrade path the property might not exist, if it exists
    // and is different from the one from the meta store, issue a warning.
    String replicationFactor = params.get(CACHE_DIR_REPLICATION_PROP_NAME);
    if (replicationFactor != null && Short.parseShort(replicationFactor) != entry.getInfo().getReplication()) {
        LOG.info("Replication factor for entry in HDFS differs from value in Hive MS: "
                + entry.getInfo().getPath().toString() + " " + entry.getInfo().getReplication().toString()
                + " != " + params.get(CACHE_DIR_REPLICATION_PROP_NAME));
    }
    params.put(CACHE_DIR_REPLICATION_PROP_NAME, String.valueOf(entry.getInfo().getReplication()));
    return true;
}