Example usage for org.apache.hadoop.fs FSDataInputStream FSDataInputStream

List of usage examples for org.apache.hadoop.fs FSDataInputStream FSDataInputStream

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FSDataInputStream FSDataInputStream.

Prototype

public FSDataInputStream(InputStream in) 

Source Link

Usage

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.java

License:Apache License

public static void exposeCleanupIntervalMillis(FileSystem fs, Path path, long intervalDurationMillis) {
    FSDataInputStream input = null;/*from w w  w.ja  va  2s. c  om*/
    FSDataOutputStream output = null;
    try {
        if (fs.exists(path)) {
            input = new FSDataInputStream(fs.open(path));
            if (intervalDurationMillis == input.readLong()) {
                input.close();
                return;
            }
            input.close();
            fs.delete(path, true);
        }
        output = fs.create(path);
        output.writeLong(intervalDurationMillis);
        output.close();
    } catch (IOException e) {
        return;
    } finally {
        try {
            if (input != null) {
                input.close();
            }
            if (output != null) {
                output.close();
            }
        } catch (IOException e2) {

        }
    }
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.java

License:Open Source License

/**
 * Opens the given file for reading.//from  ww  w. j  a  va 2  s.  c  o  m
 *
 * Note:
 * This function overrides the given bufferSize value with a higher
 * number unless further overridden using configuration
 * parameter fs.gs.io.buffersize.
 *
 * @param hadoopPath File to open.
 * @param bufferSize Size of buffer to use for IO.
 * @return A readable stream.
 * @throws FileNotFoundException if the given path does not exist.
 * @throws IOException if an error occurs.
 */
@Override
public FSDataInputStream open(Path hadoopPath, int bufferSize) throws IOException {

    long startTime = System.nanoTime();
    Preconditions.checkArgument(hadoopPath != null, "hadoopPath must not be null");
    Preconditions.checkArgument(bufferSize > 0, "bufferSize must be a positive integer: %s", bufferSize);

    checkOpen();

    LOG.debug("GHFS.open: {}, bufferSize: {} (override: {})", hadoopPath, bufferSize, bufferSizeOverride);
    bufferSize = bufferSizeOverride;
    URI gcsPath = getGcsPath(hadoopPath);
    GoogleHadoopFSInputStream in = new GoogleHadoopFSInputStream(this, gcsPath, bufferSize, statistics);

    long duration = System.nanoTime() - startTime;
    increment(Counter.OPEN);
    increment(Counter.OPEN_TIME, duration);
    return new FSDataInputStream(in);
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public FSDataInputStream getObject(String hostName, Path path) throws IOException {
    LOG.debug("Opening '{}' for reading.", path);
    String key = pathToKey(hostName, path);
    FileStatus fileStatus = memoryCache.getFileStatus(path.toString());
    if (fileStatus == null) {
        fileStatus = getFileStatus(hostName, path, "getObject");
    }/*from   w w  w.ja v  a 2s .  c om*/
    if (fileStatus.isDirectory()) {
        throw new FileNotFoundException("Can't open " + path + " because it is a directory");
    }
    COSInputStream inputStream = new COSInputStream(mBucket, key, fileStatus.getLen(), mClient, readAhead,
            inputPolicy);

    return new FSDataInputStream(inputStream);
}

From source file:com.ibm.stocator.fs.swift.SwiftAPIClient.java

License:Open Source License

public FSDataInputStream getObject(String hostName, Path path) throws IOException {
    // SwiftInputStream.printStackTrace(" #### getOject hostname = " + hostName + " path= " + path);
    LOG.debug("Get object: {}", path);
    try {/*from  ww w. ja  v a 2s .c o m*/
        SwiftInputStream sis = new SwiftPushdownInputStream(this, hostName, path);
        return new FSDataInputStream(sis);
    } catch (IOException e) {
        LOG.error(e.getMessage());
    }
    return null;
}

From source file:com.jkoolcloud.tnt4j.streams.inputs.HdfsFileLineStreamTest.java

License:Apache License

@Test()
public void test() throws Exception {
    FileSystem fs = mock(FileSystem.class);
    HdfsFileLineStream stream = new HdfsFileLineStream();

    TestFileList files = new TestFileList(false);

    final String fileName = ("file:////" + files.get(0).getParentFile() + File.separator + files.getPrefix() // NON-NLS
            + "*.TST").replace("\\", "/"); // NON-NLS

    Map<String, String> props = new HashMap<>(2);
    props.put(StreamProperties.PROP_FILENAME, fileName);
    props.put(StreamProperties.PROP_RESTORE_STATE, "false"); // NON-NLS

    when(fs.open(any(Path.class))).thenReturn(new FSDataInputStream(new TestInputStreamStub()));
    final FileStatus fileStatusMock = mock(FileStatus.class);
    final FileStatus[] array = new FileStatus[10];
    Arrays.fill(array, fileStatusMock);
    when(fs.listStatus(any(Path.class), any(PathFilter.class))).thenReturn(array);
    when(fileStatusMock.getModificationTime()).thenReturn(1L, 2L, 3L);
    when(fileStatusMock.getPath()).thenReturn(mock(Path.class));
    when(fs.getContentSummary(any(Path.class))).thenReturn(mock(ContentSummary.class));

    Method m = FileSystem.class.getDeclaredMethod("addFileSystemForTesting", URI.class, Configuration.class, // NON-NLS
            FileSystem.class);
    m.setAccessible(true);/*  w  w w  .  jav a 2 s . c  o  m*/
    m.invoke(FileSystem.class, URI.create(fileName), new Configuration(), fs);

    StreamThread st = mock(StreamThread.class);
    st.setName("HdfsFileLineStreamTestThreadName"); // NON-NLS
    stream.setOwnerThread(st);

    stream.setProperties(props.entrySet());
    stream.startStream();

    verify(fileStatusMock, atLeastOnce()).getModificationTime();
    verify(fileStatusMock, atLeastOnce()).getPath();
    verify(fs, atLeastOnce()).listStatus(any(Path.class), any(PathFilter.class));

    stream.cleanup();
}

From source file:com.kenshoo.integrations.plugins.connectors.GCSFileSystem.java

License:Apache License

@Override
public FSDataInputStream open(Path path, int paramInt) throws IOException {
    InputStream is = null;/*www.j  av a2  s .co m*/
    GCSConnector gcsConnector = connect(path);
    String objectName = PathUtil.pathToStorageObjectName(path);
    is = gcsConnector.getInpuStream(objectName);
    FSDataInputStream fsIs = new FSDataInputStream(new SeekableInputStream(is, statistics));
    return fsIs;
}

From source file:com.knewton.mapreduce.SSTableRecordReaderTest.java

License:Apache License

/**
 * Tests to see if tables can be correctly copied locally
 *//*from   w  w  w  . j  a v  a 2s .  c  om*/
@Test
public void testCopyTablesToLocal() throws Exception {
    TaskAttemptContext context = getTaskAttemptContext(true, true, true);
    ssTableColumnRecordReader.initialize(inputSplit, context);

    doCallRealMethod().when(ssTableColumnRecordReader).copyTablesToLocal(any(FileSystem.class),
            any(FileSystem.class), any(Path.class), any(TaskAttemptContext.class));

    FileSystem remoteFS = mock(FileSystem.class);
    FileSystem localFS = mock(FileSystem.class);

    byte[] data = new byte[] { 0xA };
    FSDataInputStream fsIn = new FSDataInputStream(new MemoryDataInputStream(data));
    FSDataOutputStream fsOut = mock(FSDataOutputStream.class);

    when(remoteFS.open(any(Path.class))).thenReturn(fsIn);
    when(localFS.create(any(Path.class), anyBoolean())).thenReturn(fsOut);

    Path dataTablePath = inputSplit.getPath();
    FileStatus fileStatus = mock(FileStatus.class);
    when(fileStatus.getLen()).thenReturn(10L);
    when(fileStatus.isDirectory()).thenReturn(false);
    when(remoteFS.getFileStatus(any(Path.class))).thenReturn(fileStatus);

    ssTableColumnRecordReader.copyTablesToLocal(remoteFS, localFS, dataTablePath, context);
    verify(remoteFS).getFileStatus(dataTablePath);
    ssTableColumnRecordReader.close();
    verify(fsOut).write(any(byte[].class), eq(0), eq(data.length));
    assertEquals(2, ssTableColumnRecordReader.getComponentSize());
}

From source file:com.knewton.mapreduce.SSTableRecordReaderTest.java

License:Apache License

/**
 * Tests to see if tables can be correctly copied locally including the compression info table
 *///from  w w w  .j a va  2 s.  c  om
@Test
public void testCopyTablesToLocalWithCompressionInfo() throws Exception {
    TaskAttemptContext context = getTaskAttemptContext(true, true, true);
    ssTableColumnRecordReader.initialize(inputSplit, context);

    doCallRealMethod().when(ssTableColumnRecordReader).copyTablesToLocal(any(FileSystem.class),
            any(FileSystem.class), any(Path.class), any(TaskAttemptContext.class));

    FileSystem remoteFS = mock(FileSystem.class);
    FileSystem localFS = mock(FileSystem.class);

    byte[] data = new byte[] { 0xA };
    FSDataInputStream fsIn = new FSDataInputStream(new MemoryDataInputStream(data));
    FSDataOutputStream fsOut = mock(FSDataOutputStream.class);

    when(remoteFS.open(any(Path.class))).thenReturn(fsIn);
    when(localFS.create(any(Path.class), anyBoolean())).thenReturn(fsOut);

    Path dataTablePath = inputSplit.getPath();
    FileStatus fileStatus = mock(FileStatus.class);
    when(fileStatus.getLen()).thenReturn(10L);
    when(fileStatus.isDirectory()).thenReturn(false);
    when(remoteFS.getFileStatus(any(Path.class))).thenReturn(fileStatus);

    String str = ssTableColumnRecordReader.getDescriptor().filenameFor(Component.COMPRESSION_INFO);
    when(remoteFS.exists(new Path(str))).thenReturn(true);

    ssTableColumnRecordReader.copyTablesToLocal(remoteFS, localFS, dataTablePath, context);
    verify(remoteFS).getFileStatus(dataTablePath);
    ssTableColumnRecordReader.close();
    verify(fsOut).write(any(byte[].class), eq(0), eq(data.length));
    assertEquals(3, ssTableColumnRecordReader.getComponentSize());
}

From source file:com.knewton.mrtool.io.JsonRecordReaderTest.java

License:Apache License

/**
 * Tests if a JsonRecordReader can be initialized properly without errors.
 * /*from  ww w  .  java  2  s  .c  o  m*/
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testInitializeJsonRecordReader() throws IOException, InterruptedException {
    JsonRecordReader<Text> rr = new JsonRecordReader<Text>() {
        @Override
        protected Class<?> getDataClass(String jsonStr) {
            return Text.class;
        }
    };

    Configuration conf = new Configuration();
    TaskAttemptContext context = new TaskAttemptContext(conf, new TaskAttemptID());
    FileSplit fileSplit = new FileSplit(new Path("recs.2013-03-20_02_52.log"), 0, recommendationBytes.length,
            new String[0]);

    new MockUp<FileSystem>() {
        @Mock
        public FSDataInputStream open(Path f) throws IOException {
            return new FSDataInputStream(new SeekableByteArrayInputStream(recommendationBytes));
        }
    };

    rr.initialize(fileSplit, context);
    assertEquals(Text.class, rr.getDataClass(null));
    rr.close();
}

From source file:com.knewton.mrtool.io.JsonRecordReaderTest.java

License:Apache License

/**
 * Tests the line reader in the record reader to see if records can be read correctly from the
 * beginning of an input stream.//from   w w  w . j  a  v  a 2s.c o m
 * 
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testJsonRecordReader() throws IOException, InterruptedException {
    JsonRecordReader<Text> rr = new JsonRecordReader<Text>() {
        @Override
        protected Class<?> getDataClass(String jsonStr) {
            return Text.class;
        }
    };

    Configuration conf = new Configuration();
    TaskAttemptContext context = new TaskAttemptContext(conf, new TaskAttemptID());
    FileSplit fileSplit = new FileSplit(new Path("recs.2013-03-20_02_52.log"), 0, recommendationBytes.length,
            new String[0]);

    new MockUp<FileSystem>() {
        @Mock
        public FSDataInputStream open(Path f) throws IOException {
            return new FSDataInputStream(new SeekableByteArrayInputStream(recommendationBytes));
        }
    };
    // Initialize it to get the compression codecs
    rr.initialize(fileSplit, context);
    // close the line reader and reopen it.
    rr.close();
    LineReader lineReader = rr.initLineReader(fileSplit, conf);
    Text line = new Text();
    lineReader.readLine(line);
    assertEquals(DummyJsonRecommendations.jsonRecommendations[0], line.toString());

    line = new Text();
    lineReader.readLine(line);
    assertEquals(DummyJsonRecommendations.jsonRecommendations[1], line.toString());
    lineReader.close();
}