Example usage for org.apache.hadoop.fs FSDataInputStream FSDataInputStream

List of usage examples for org.apache.hadoop.fs FSDataInputStream FSDataInputStream

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataInputStream FSDataInputStream.

Prototype

public FSDataInputStream(InputStream in) 

Source Link

Usage

From source file:org.commoncrawl.util.shared.RiceCoder.java

License:Apache License

public static void main(String[] args) {
    long foo = Long.MIN_VALUE;
    RiceCoder test = new RiceCoder(54, true);
    test.addItem(0);/*from w w w  . j  a  va  2s. c o  m*/
    test.addItem(1);
    test.addItem(-1);
    test.addItem(Long.MAX_VALUE);
    test.addItem(Long.MAX_VALUE - 1);
    test.addItem(Long.MIN_VALUE + 1);
    test.addItem(Long.MIN_VALUE);

    RiceCodeReader testReader = new RiceCodeReader(54, test.nbits, ByteBuffer.wrap(test.bits), true);

    try {
        Assert.assertTrue(testReader.nextValue() == 0);
        Assert.assertTrue(testReader.nextValue() == 1);
        Assert.assertTrue(testReader.nextValue() == -1);
        Assert.assertTrue(testReader.nextValue() == Long.MAX_VALUE);
        Assert.assertTrue(testReader.nextValue() == Long.MAX_VALUE - 1);
        Assert.assertTrue(testReader.nextValue() == Long.MIN_VALUE + 1);
        Assert.assertTrue(testReader.nextValue() == Long.MIN_VALUE);

    } catch (IOException e1) {
        e1.printStackTrace();
    }

    try {
        RiceCodeReader newReader = new RiceCodeReader(54, test.nbits,
                new FSDataInputStream(new ByteBufferInputStream(ByteBuffer.wrap(test.bits))), 0, true);

        try {
            Assert.assertTrue(newReader.nextValue() == 0);
            Assert.assertTrue(newReader.nextValue() == 1);
            Assert.assertTrue(newReader.nextValue() == -1);
            Assert.assertTrue(newReader.nextValue() == Long.MAX_VALUE);
            Assert.assertTrue(newReader.nextValue() == Long.MAX_VALUE - 1);
            Assert.assertTrue(newReader.nextValue() == Long.MIN_VALUE + 1);
            Assert.assertTrue(newReader.nextValue() == Long.MIN_VALUE);

        } catch (IOException e1) {
            e1.printStackTrace();
        }

    } catch (IOException e) {
        LOG.error(CCStringUtils.stringifyException(e));
    }
}

From source file:org.gridgain.grid.ggfs.hadoop.v1.GridGgfsHadoopFileSystem.java

License:Open Source License

/** {@inheritDoc} */
@Override/*from  w  ww.  j a  v a 2  s .co  m*/
public FSDataInputStream open(Path f, int bufSize) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    try {
        GridGgfsPath path = convert(f);
        GridGgfsMode mode = mode(path);

        if (mode == PROXY) {
            if (secondaryFs == null) {
                assert mgmt;

                throw new IOException("Failed to open file (secondary file system is not initialized): " + f);
            }

            FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize);

            if (clientLog.isLogEnabled()) {
                // At this point we do not know file size, so we perform additional request to remote FS to get it.
                FileStatus status = secondaryFs.getFileStatus(toSecondary(f));

                long size = status != null ? status.getLen() : -1;

                long logId = GridGgfsLogger.nextId();

                clientLog.logOpen(logId, path, PROXY, bufSize, size);

                return new FSDataInputStream(new GridGgfsHadoopProxyInputStream(is, clientLog, logId));
            } else
                return is;
        } else {
            GridGgfsHadoopStreamDelegate stream = seqReadsBeforePrefetchOverride
                    ? rmtClient.open(path, seqReadsBeforePrefetch)
                    : rmtClient.open(path);

            long logId = -1;

            if (clientLog.isLogEnabled()) {
                logId = GridGgfsLogger.nextId();

                clientLog.logOpen(logId, path, mode, bufSize, stream.length());
            }

            if (LOG.isDebugEnabled())
                LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path
                        + ", bufSize=" + bufSize + ']');

            GridGgfsHadoopInputStream ggfsIn = new GridGgfsHadoopInputStream(stream, stream.length(), bufSize,
                    LOG, clientLog, logId);

            if (LOG.isDebugEnabled())
                LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');

            return new FSDataInputStream(ggfsIn);
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.gridgain.grid.ggfs.hadoop.v2.GridGgfsHadoopFileSystem.java

License:Open Source License

/** {@inheritDoc} */
@Override/*from   w  w  w .j a v a  2  s. co m*/
public FSDataInputStream open(Path f, int bufSize) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    try {
        GridGgfsPath path = convert(f);
        GridGgfsMode mode = modeRslvr.resolveMode(path);

        if (mode == PROXY) {
            FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize);

            if (clientLog.isLogEnabled()) {
                // At this point we do not know file size, so we perform additional request to remote FS to get it.
                FileStatus status = secondaryFs.getFileStatus(toSecondary(f));

                long size = status != null ? status.getLen() : -1;

                long logId = GridGgfsLogger.nextId();

                clientLog.logOpen(logId, path, PROXY, bufSize, size);

                return new FSDataInputStream(new GridGgfsHadoopProxyInputStream(is, clientLog, logId));
            } else
                return is;
        } else {
            GridGgfsHadoopStreamDelegate stream = seqReadsBeforePrefetchOverride
                    ? rmtClient.open(path, seqReadsBeforePrefetch)
                    : rmtClient.open(path);

            long logId = -1;

            if (clientLog.isLogEnabled()) {
                logId = GridGgfsLogger.nextId();

                clientLog.logOpen(logId, path, mode, bufSize, stream.length());
            }

            if (LOG.isDebugEnabled())
                LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path
                        + ", bufSize=" + bufSize + ']');

            GridGgfsHadoopInputStream ggfsIn = new GridGgfsHadoopInputStream(stream, stream.length(), bufSize,
                    LOG, clientLog, logId);

            if (LOG.isDebugEnabled())
                LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');

            return new FSDataInputStream(ggfsIn);
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.kitesdk.morphline.hadoop.rcfile.SingleStreamFileSystem.java

License:Apache License

public SingleStreamFileSystem(InputStream inputStream, Path path) throws IOException {
    this.inputStream = new FSDataInputStream(new ForwardOnlySeekable(inputStream));
    this.path = path;
    // Since this is a stream, we dont know the length of the stream. Setting it
    // to the maximum size
    this.fileStatus = new FileStatus(Long.MAX_VALUE, false, 0, 0, 0, path);
}

From source file:org.lafs.hdfs.LAFS.java

License:Apache License

@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
    URL url = new URL(httpURI.toString() + "/uri/" + getLAFSPath(path));

    return new FSDataInputStream(new LAFSInputStream(url));
}

From source file:org.mrgeo.data.shp.SeekableHdfsInput.java

License:Apache License

public SeekableHdfsInput(Path p) throws IOException {
    src = new FSDataInputStream(HadoopFileUtils.open(p));

    //    FileSystem fs = HadoopFileUtils.getFileSystem(p);
    //    src = fs.open(p);
}

From source file:org.mrgeo.imageio.HdfsSeekableStream.java

License:Apache License

public HdfsSeekableStream(Path path) throws IOException {
    //FileSystem fs = HadoopFileUtils.getFileSystem(path);

    stream = new FSDataInputStream(HadoopFileUtils.open(path)); // fs.open(path);

    if (System.getProperty("mrgeo.profile", "false").compareToIgnoreCase("true") == 0) {
        profile = true;/* w ww  . ja  v  a  2 s .  com*/
        if (profile) {
            LeakChecker.instance().add(this, ExceptionUtils.getFullStackTrace(
                    new Throwable("HdfsMrsImageReader creation stack(ignore the Throwable...)")));
        }

    } else {
        profile = false;
    }

}

From source file:org.xtreemfs.common.clients.hadoop.XtreemFSFileSystem.java

License:BSD License

@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
    Volume xtreemfsVolume = getVolumeFromPath(path);
    final String pathString = preparePath(path, xtreemfsVolume);
    final FileHandle fileHandle = xtreemfsVolume.openFile(userCredentials, pathString,
            SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber(), 0);
    if (Logging.isDebug()) {
        Logging.logMessage(Logging.LEVEL_DEBUG, this, "Opening file %s", pathString);
    }//from   w ww .jav a  2  s .  c  o m
    statistics.incrementReadOps(1);
    return new FSDataInputStream(new XtreemFSInputStream(userCredentials, fileHandle, pathString, useReadBuffer,
            readBufferSize, statistics));
}

From source file:ras.test.hadoop.fs.InMemoryFileSystem.java

License:Apache License

/**
 * Opens a file for reading./*from  w ww. j  a  v a  2s. c  o  m*/
 * 
 * @param path
 *            The path to the file
 * @param buffersize
 *            Ignored.
 * 
 * @throws IOException
 *             If the file is not found or the path maps to a directory.
 */
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
    path = makeAbsolute(path);
    FileNode node = getFileNode(path, true);
    checkPermission(node, FsAction.READ);
    return new FSDataInputStream(node.open());
}

From source file:stargate.client.hdfs.StargateFileSystem.java

License:Open Source License

public synchronized FSDataInputStream open(URI resourceURI, int bufferSize) throws IOException {
    if (resourceURI == null) {
        throw new IllegalArgumentException("resourceURI is null");
    }/*w ww . ja v a2 s.  c  o  m*/

    DataObjectPath path = makeDataObjectPath(resourceURI);
    Recipe recipe = this.userInterfaceClient.getRecipe(path);
    if (recipe != null) {
        return new FSDataInputStream(new HTTPChunkInputStream(this.userInterfaceClient, recipe));
    } else {
        throw new IOException("unable to retrieve a recipe of " + path.getPath());
    }
}