Example usage for org.apache.hadoop.fs BlockLocation getLength

List of usage examples for org.apache.hadoop.fs BlockLocation getLength

Introduction

In this page you can find the example usage for org.apache.hadoop.fs BlockLocation getLength.

Prototype

public long getLength() 

Source Link

Document

Get the length of the block

Usage

From source file:org.apache.drill.exec.store.TestAffinityCalculator.java

License:Apache License

@Test
public void testBuildRangeMap() {
    BlockLocation[] blocks = buildBlockLocations(new String[4], 256 * 1024 * 1024);
    long tA = System.nanoTime();
    ImmutableRangeMap.Builder<Long, BlockLocation> blockMapBuilder = new ImmutableRangeMap.Builder<Long, BlockLocation>();
    for (BlockLocation block : blocks) {
        long start = block.getOffset();
        long end = start + block.getLength();
        Range<Long> range = Range.closedOpen(start, end);
        blockMapBuilder = blockMapBuilder.put(range, block);
    }//from  w  w  w . ja v  a 2 s  .c o m
    ImmutableRangeMap<Long, BlockLocation> map = blockMapBuilder.build();
    long tB = System.nanoTime();
    System.out.println(String.format("Took %f ms to build range map", (tB - tA) / 1e6));
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.delegate.HadoopIgfsSecondaryFileSystemDelegateImpl.java

License:Apache License

/**
 * Convert IGFS affinity block location into Hadoop affinity block location.
 *
 * @param block IGFS affinity block location.
 * @return Hadoop affinity block location.
 *//*  w  w  w  . ja  va2 s  . co  m*/
private IgfsBlockLocation convertBlockLocation(BlockLocation block) {
    try {
        String[] names = block.getNames();
        String[] hosts = block.getHosts();

        return new IgfsBlockLocationImpl(block.getOffset(), block.getLength(), Arrays.asList(names),
                Arrays.asList(hosts));
    } catch (IOException e) {
        throw handleSecondaryFsError(e, "Failed convert block location: " + block);
    }
}

From source file:org.apache.impala.catalog.HdfsTable.java

License:Apache License

/**
 * Drops and re-loads the block metadata for all partitions in 'partsByPath' whose
 * location is under the given 'dirPath'. It involves the following steps:
 * - Clear the current block metadata of the partitions.
 * - Call FileSystem.listStatus() on 'dirPath' to fetch the BlockLocations for each
 *   file under it recursively./*  w  w w . ja va2 s  .c o m*/
 * - For every valid data file, map it to a partition from 'partsByPath' (if one exists)
 *   and enumerate all its blocks and their corresponding hosts and disk IDs.
 * Requires that 'dirPath' and all paths in 'partsByPath' have consistent qualification
 * (either fully qualified or unqualified), for isDescendantPath().
 * TODO: Split this method into more logical methods for cleaner code.
 */
private void loadBlockMetadata(Path dirPath, HashMap<Path, List<HdfsPartition>> partsByPath) {
    try {
        FileSystem fs = dirPath.getFileSystem(CONF);
        // No need to load blocks for empty partitions list.
        if (partsByPath.size() == 0 || !fs.exists(dirPath))
            return;
        if (LOG.isTraceEnabled()) {
            LOG.trace("Loading block md for " + name_ + " directory " + dirPath.toString());
        }

        // Clear the state of partitions under dirPath since they are going to be updated
        // based on the current snapshot of files in the directory.
        List<HdfsPartition> dirPathPartitions = partsByPath.get(dirPath);
        if (dirPathPartitions != null) {
            // The dirPath is a partition directory. This means the path is the root of an
            // unpartitioned table, or the path of at least one partition.
            for (HdfsPartition partition : dirPathPartitions) {
                partition.setFileDescriptors(new ArrayList<FileDescriptor>());
            }
        } else {
            // The dirPath is not a partition directory. We expect it to be an ancestor of
            // partition paths (e.g., the table root). Clear all partitions whose paths are
            // a descendant of dirPath.
            for (Map.Entry<Path, List<HdfsPartition>> entry : partsByPath.entrySet()) {
                Path partDir = entry.getKey();
                if (!FileSystemUtil.isDescendantPath(partDir, dirPath))
                    continue;
                for (HdfsPartition partition : entry.getValue()) {
                    partition.setFileDescriptors(new ArrayList<FileDescriptor>());
                }
            }
        }

        // For file systems that do not support BlockLocation API, we manually synthesize
        // block location metadata based on file formats.
        if (!FileSystemUtil.supportsStorageIds(fs)) {
            synthesizeBlockMetadata(fs, dirPath, partsByPath);
            return;
        }

        int unknownDiskIdCount = 0;
        RemoteIterator<LocatedFileStatus> fileStatusIter = fs.listFiles(dirPath, true);
        while (fileStatusIter.hasNext()) {
            LocatedFileStatus fileStatus = fileStatusIter.next();
            if (!FileSystemUtil.isValidDataFile(fileStatus))
                continue;
            // Find the partition that this file belongs (if any).
            Path partPathDir = fileStatus.getPath().getParent();
            Preconditions.checkNotNull(partPathDir);

            List<HdfsPartition> partitions = partsByPath.get(partPathDir);
            // Skip if this file does not belong to any known partition.
            if (partitions == null) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("File " + fileStatus.getPath().toString() + " doesn't correspond "
                            + " to a known partition. Skipping metadata load for this file.");
                }
                continue;
            }
            String fileName = fileStatus.getPath().getName();
            FileDescriptor fd = new FileDescriptor(fileName, fileStatus.getLen(),
                    fileStatus.getModificationTime());
            BlockLocation[] locations = fileStatus.getBlockLocations();
            String partPathDirName = partPathDir.toString();
            for (BlockLocation loc : locations) {
                Set<String> cachedHosts = Sets.newHashSet(loc.getCachedHosts());
                // Enumerate all replicas of the block, adding any unknown hosts
                // to hostIndex_. We pick the network address from getNames() and
                // map it to the corresponding hostname from getHosts().
                List<BlockReplica> replicas = Lists.newArrayListWithExpectedSize(loc.getNames().length);
                for (int i = 0; i < loc.getNames().length; ++i) {
                    TNetworkAddress networkAddress = BlockReplica.parseLocation(loc.getNames()[i]);
                    replicas.add(new BlockReplica(hostIndex_.getIndex(networkAddress),
                            cachedHosts.contains(loc.getHosts()[i])));
                }
                FileBlock currentBlock = new FileBlock(loc.getOffset(), loc.getLength(), replicas);
                THdfsFileBlock tHdfsFileBlock = currentBlock.toThrift();
                fd.addThriftFileBlock(tHdfsFileBlock);
                unknownDiskIdCount += loadDiskIds(loc, tHdfsFileBlock);
            }
            if (LOG.isTraceEnabled()) {
                LOG.trace("Adding file md dir: " + partPathDirName + " file: " + fileName);
            }
            // Update the partitions' metadata that this file belongs to.
            for (HdfsPartition partition : partitions) {
                partition.getFileDescriptors().add(fd);
                numHdfsFiles_++;
                totalHdfsBytes_ += fd.getFileLength();
            }
        }
        if (unknownDiskIdCount > 0) {
            if (LOG.isWarnEnabled()) {
                LOG.warn("Unknown disk id count for filesystem " + fs + ":" + unknownDiskIdCount);
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(
                "Error loading block metadata for directory " + dirPath.toString() + ": " + e.getMessage(), e);
    }
}

From source file:org.apache.solr.store.hdfs.HdfsLocalityReporter.java

License:Apache License

/**
 * Provide statistics on HDFS block locality, both in terms of bytes and block counts.
 *///from w ww . j  a v a  2s . c  om
@Override
public NamedList getStatistics() {
    long totalBytes = 0;
    long localBytes = 0;
    int totalCount = 0;
    int localCount = 0;

    for (Iterator<HdfsDirectory> iterator = cache.keySet().iterator(); iterator.hasNext();) {
        HdfsDirectory hdfsDirectory = iterator.next();

        if (hdfsDirectory.isClosed()) {
            iterator.remove();
        } else {
            try {
                refreshDirectory(hdfsDirectory);
                Map<FileStatus, BlockLocation[]> blockMap = cache.get(hdfsDirectory);

                // For every block in every file in this directory, count it
                for (BlockLocation[] locations : blockMap.values()) {
                    for (BlockLocation bl : locations) {
                        totalBytes += bl.getLength();
                        totalCount++;

                        if (Arrays.asList(bl.getHosts()).contains(hostname)) {
                            localBytes += bl.getLength();
                            localCount++;
                        }
                    }
                }
            } catch (IOException e) {
                logger.warn("Could not retrieve locality information for {} due to exception: {}",
                        hdfsDirectory.getHdfsDirPath(), e);
            }
        }
    }

    return createStatistics(totalBytes, localBytes, totalCount, localCount);
}

From source file:org.apache.tajo.storage.fragment.FileFragment.java

License:Apache License

public FileFragment(String tableName, Path uri, BlockLocation blockLocation) throws IOException {
    this.set(tableName, uri, blockLocation.getOffset(), blockLocation.getLength(), blockLocation.getHosts(),
            null);/* w  ww.  j  a  v a2s. c o  m*/
}

From source file:org.springframework.data.hadoop.store.split.AbstractSplitter.java

License:Apache License

/**
 * Gets the block index.//from   w w w .  j a va2 s .co m
 *
 * @param blocks the blk locations
 * @param offset the offset
 * @return the block index
 * @throws IllegalArgumentException if offset is outside of blocks
 */
protected int getBlockIndex(BlockLocation[] blocks, long offset) {
    for (int i = 0; i < blocks.length; i++) {
        if ((blocks[i].getOffset() <= offset) && (offset < blocks[i].getOffset() + blocks[i].getLength())) {
            return i;
        }
    }
    BlockLocation block = blocks[blocks.length - 1];
    long length = block.getOffset() + block.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset + " is outside of file with length=" + length);
}

From source file:org.springframework.data.hadoop.store.split.AbstractSplitterTests.java

License:Apache License

protected static BlockLocation[] findBlocks(ArrayList<BlockLocation> blocks, long start, long length) {
    final ArrayList<BlockLocation> ret = new ArrayList<BlockLocation>();
    for (BlockLocation block : blocks) {
        if (!((start >= (block.getOffset() + block.getLength())) && ((start + length) <= block.getOffset()))) {
            ret.add(block);/* ww  w .  java 2  s . co m*/
        }
    }
    return ret.toArray(new BlockLocation[0]);
}