Example usage for org.apache.hadoop.fs BlockLocation BlockLocation

List of usage examples for org.apache.hadoop.fs BlockLocation BlockLocation

Introduction

In this page you can find the example usage for org.apache.hadoop.fs BlockLocation BlockLocation.

Prototype

public BlockLocation(String[] names, String[] hosts, long offset, long length) 

Source Link

Document

Constructor with host, name, offset and length

Usage

From source file:org.apache.drill.exec.store.TestAffinityCalculator.java

License:Apache License

public BlockLocation[] buildBlockLocations2(String[] hosts, long blockSize) {
    String[] names = new String[hosts.length];

    for (int i = 0; i < hosts.length; i++) {
        hosts[i] = "host" + i;
        names[i] = "host:" + port;
    }/* ww w. j a  v  a  2s  .  com*/

    BlockLocation[] blockLocations = new BlockLocation[4];
    blockLocations[0] = new BlockLocation(new String[] { names[0] }, new String[] { hosts[0] }, 0, blockSize);
    blockLocations[1] = new BlockLocation(new String[] { names[1] }, new String[] { hosts[1] }, blockSize,
            blockSize);
    blockLocations[3] = new BlockLocation(new String[] { names[3] }, new String[] { hosts[3] }, blockSize * 2,
            blockSize);
    blockLocations[2] = new BlockLocation(new String[] { names[2] }, new String[] { hosts[2] }, blockSize * 3,
            blockSize);

    return blockLocations;
}

From source file:org.apache.druid.indexer.hadoop.DatasourceInputFormatTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    segments1 = ImmutableList.of(//w w  w. ja  va 2  s  .  c o  m
            WindowedDataSegment.of(new DataSegment("test1", Intervals.of("2000/3000"), "ver",
                    ImmutableMap.of("type", "local", "path", "/tmp/index1.zip"), ImmutableList.of("host"),
                    ImmutableList.of("visited_sum", "unique_hosts"), NoneShardSpec.instance(), 9, 2)),
            WindowedDataSegment.of(new DataSegment("test1", Intervals.of("2050/3000"), "ver",
                    ImmutableMap.of("type", "hdfs", "path", "/tmp/index2.zip"), ImmutableList.of("host"),
                    ImmutableList.of("visited_sum", "unique_hosts"), NoneShardSpec.instance(), 9, 11)),
            WindowedDataSegment.of(new DataSegment("test1", Intervals.of("2030/3000"), "ver",
                    ImmutableMap.of("type", "hdfs", "path", "/tmp/index3.zip"), ImmutableList.of("host"),
                    ImmutableList.of("visited_sum", "unique_hosts"), NoneShardSpec.instance(), 9, 4)));

    segments2 = ImmutableList.of(WindowedDataSegment.of(new DataSegment("test2", Intervals.of("2000/3000"),
            "ver", ImmutableMap.of("type", "local", "path", "/tmp/index4.zip"), ImmutableList.of("host"),
            ImmutableList.of("visited_sum", "unique_hosts"), NoneShardSpec.instance(), 9, 2)));

    Path path1 = new Path(JobHelper.getURIFromSegment(segments1.get(0).getSegment()));
    Path path2 = new Path(JobHelper.getURIFromSegment(segments1.get(1).getSegment()));
    Path path3 = new Path(JobHelper.getURIFromSegment(segments1.get(2).getSegment()));
    Path path4 = new Path(JobHelper.getURIFromSegment(segments2.get(0).getSegment()));

    // dummy locations for test
    BlockLocation[] locations1 = { new BlockLocation(null, new String[] { "s1", "s2" }, 0, 600),
            new BlockLocation(null, new String[] { "s2", "s3" }, 600, 400) };
    BlockLocation[] locations2 = { new BlockLocation(null, new String[] { "s1", "s2" }, 0, 1000),
            new BlockLocation(null, new String[] { "s1", "s3" }, 1000, 1200),
            new BlockLocation(null, new String[] { "s2", "s3" }, 2200, 1100),
            new BlockLocation(null, new String[] { "s1", "s2" }, 3300, 700) };
    BlockLocation[] locations3 = { new BlockLocation(null, new String[] { "s2", "s3" }, 0, 500) };
    BlockLocation[] locations4 = { new BlockLocation(null, new String[] { "s2", "s3" }, 0, 500) };
    this.locations = ImmutableList.of(
            new LocatedFileStatus(1000, false, 0, 0, 0, 0, null, null, null, null, path1, locations1),
            new LocatedFileStatus(4000, false, 0, 0, 0, 0, null, null, null, null, path2, locations2),
            new LocatedFileStatus(500, false, 0, 0, 0, 0, null, null, null, null, path3, locations3),
            new LocatedFileStatus(500, false, 0, 0, 0, 0, null, null, null, null, path4, locations4));

    config = populateConfiguration(new JobConf(), segments1, 0);
    context = EasyMock.createMock(JobContext.class);
    EasyMock.expect(context.getConfiguration()).andReturn(config);
    EasyMock.replay(context);
}

From source file:org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.java

License:Apache License

/**
 * Convert IGFS affinity block location into Hadoop affinity block location.
 *
 * @param block IGFS affinity block location.
 * @return Hadoop affinity block location.
 *///ww w  . ja  v a  2s.c o  m
private BlockLocation convert(IgfsBlockLocation block) {
    Collection<String> names = block.names();
    Collection<String> hosts = block.hosts();

    return new BlockLocation(names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */,
            hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */, block.start(),
            block.length()) {
        @Override
        public String toString() {
            try {
                return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() + ", hosts="
                        + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']';
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };
}

From source file:org.apache.parquet.hadoop.TestInputFormat.java

License:Apache License

private void withHDFSBlockSize(long... blockSizes) {
    hdfsBlocks = new BlockLocation[blockSizes.length];
    long offset = 0;
    for (int i = 0; i < blockSizes.length; i++) {
        long blockSize = blockSizes[i];
        hdfsBlocks[i] = new BlockLocation(new String[0],
                new String[] { "foo" + i + ".datanode", "bar" + i + ".datanode" }, offset, blockSize);
        offset += blockSize;// w  w w. j  av a  2 s .c  o  m
    }
    fileStatus = new FileStatus(offset, false, 2, 50, 0, new Path("hdfs://foo.namenode:1234/bar"));
}

From source file:org.gridgain.grid.ggfs.hadoop.v1.GridGgfsHadoopFileSystem.java

License:Open Source License

/**
 * Convert GGFS affinity block location into Hadoop affinity block location.
 *
 * @param block GGFS affinity block location.
 * @return Hadoop affinity block location.
 *//*from   w w w. j  av  a 2  s .  c o  m*/
private BlockLocation convert(GridGgfsBlockLocation block) {
    Collection<String> names = block.names();
    Collection<String> hosts = block.hosts();

    return new BlockLocation(names.toArray(new String[names.size()]) /* hostname:portNumber of data nodes */,
            hosts.toArray(new String[hosts.size()]) /* hostnames of data nodes */, block.start(),
            block.length()) {
        @Override
        public String toString() {
            try {
                return "BlockLocation [offset=" + getOffset() + ", length=" + getLength() + ", hosts="
                        + Arrays.asList(getHosts()) + ", names=" + Arrays.asList(getNames()) + ']';
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };
}

From source file:org.springframework.data.hadoop.store.split.AbstractSplitterTests.java

License:Apache License

protected static Path mockWithFileSystem(int blockCount, long blockSize, long extraBlockSize) throws Exception {
    final ArrayList<BlockLocation> blocks = new ArrayList<BlockLocation>();
    long offset = 0;
    int i = 0;//from w ww. j  av  a 2s.c o m
    for (; i < blockCount; i++) {
        blocks.add(new BlockLocation(new String[] { "names" + i }, new String[] { "hosts" + i }, offset,
                blockSize));
        offset += blockSize;
    }

    // extra just means that we add a non full last block
    if (extraBlockSize > 0 && extraBlockSize < blockSize) {
        blocks.add(new BlockLocation(new String[] { "names" + i }, new String[] { "hosts" + i }, offset,
                extraBlockSize));
        offset += extraBlockSize;
    }

    FileStatus mStatus = mock(FileStatus.class);
    Path mPath = mock(Path.class);
    FileSystem mFs = mock(FileSystem.class);
    when(mStatus.getLen()).thenReturn(offset);
    when(mStatus.getBlockSize()).thenReturn(blockSize);
    when(mFs.getFileStatus(mPath)).thenReturn(mStatus);

    when(mFs.getFileBlockLocations((FileStatus) any(), anyLong(), anyLong()))
            .thenAnswer(new Answer<BlockLocation[]>() {

                @Override
                public BlockLocation[] answer(InvocationOnMock invocation) throws Throwable {
                    Object[] arguments = invocation.getArguments();
                    return findBlocks(blocks, (Long) arguments[1], (Long) arguments[2]);
                }
            });

    when(mPath.getFileSystem((Configuration) any())).thenReturn(mFs);
    return mPath;
}

From source file:org.xtreemfs.common.clients.hadoop.XtreemFSFileSystem.java

License:BSD License

@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long length) throws IOException {
    if (file == null) {
        return null;
    }//from   www  .ja  v  a 2  s.  c  o m
    Volume xtreemfsVolume = getVolumeFromPath(file.getPath());
    String pathString = preparePath(file.getPath(), xtreemfsVolume);
    List<StripeLocation> stripeLocations = xtreemfsVolume.getStripeLocations(userCredentials, pathString, start,
            length);

    BlockLocation[] result = new BlockLocation[stripeLocations.size()];
    for (int i = 0; i < result.length; ++i) {
        result[i] = new BlockLocation(stripeLocations.get(i).getUuids(), stripeLocations.get(i).getHostnames(),
                stripeLocations.get(i).getStartSize(), stripeLocations.get(i).getLength());
    }
    return result;
}

From source file:parquet.hadoop.TestInputFormat.java

License:Apache License

private void withHDFSBlockSize(long... blockSizes) {
    hdfsBlocks = new BlockLocation[blockSizes.length];
    long offset = 0;
    for (int i = 0; i < blockSizes.length; i++) {
        long blockSize = blockSizes[i];
        hdfsBlocks[i] = new BlockLocation(new String[0],
                new String[] { "foo" + i + ".datanode", "bar" + i + ".datanode" }, offset, blockSize);
        offset += blockSize;//from ww w. ja va2 s .  co m
    }
}

From source file:tachyon.hadoop.AbstractTFS.java

License:Apache License

@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException {
    if (file == null) {
        return null;
    }/* w w w .jav  a2 s  .co m*/
    if (mStatistics != null) {
        mStatistics.incrementReadOps(1);
    }

    TachyonURI path = new TachyonURI(Utils.getPathWithoutScheme(file.getPath()));
    fromHdfsToTachyon(path);
    long fileId = mTFS.getFileId(path);
    if (fileId == -1) {
        throw new FileNotFoundException("File does not exist: " + file.getPath());
    }

    List<BlockLocation> blockLocations = new ArrayList<BlockLocation>();
    List<FileBlockInfo> blocks = mTFS.getFileBlocks(fileId);
    for (int k = 0; k < blocks.size(); k++) {
        FileBlockInfo info = blocks.get(k);
        long offset = info.getOffset();
        long end = offset + info.blockInfo.getLength();
        // Check if there is any overlapping between [start, start+len] and [offset, end]
        if (end >= start && offset <= start + len) {
            ArrayList<String> names = new ArrayList<String>();
            ArrayList<String> hosts = new ArrayList<String>();
            List<NetAddress> addrs = Lists.newArrayList();
            // add the existing in-memory block locations first
            for (tachyon.thrift.BlockLocation location : info.getBlockInfo().getLocations()) {
                addrs.add(location.getWorkerAddress());
            }
            // then add under file system location
            addrs.addAll(info.getUfsLocations());
            for (NetAddress addr : addrs) {
                // Name format is "hostname:data transfer port"
                String name = addr.host + ":" + addr.dataPort;
                LOG.debug("getFileBlockLocations : adding name : '" + name + "");
                names.add(name);
                hosts.add(addr.host);
            }
            blockLocations.add(new BlockLocation(CommonUtils.toStringArray(names),
                    CommonUtils.toStringArray(hosts), offset, info.blockInfo.getLength()));
        }
    }

    BlockLocation[] ret = new BlockLocation[blockLocations.size()];
    blockLocations.toArray(ret);
    return ret;
}