List of usage examples for org.apache.hadoop.fs FSDataInputStream FSDataInputStream
public FSDataInputStream(InputStream in)
From source file:org.apache.hama.ml.ann.NeuralNetwork.java
License:Apache License
/** * Read the model meta-data from the specified location. * //from ww w . j a v a 2s. co m * @throws IOException */ protected void readFromModel() throws IOException { Preconditions.checkArgument(this.modelPath != null, "Model path has not been set."); Configuration conf = new Configuration(); FSDataInputStream is = null; try { URI uri = new URI(this.modelPath); FileSystem fs = FileSystem.get(uri, conf); is = new FSDataInputStream(fs.open(new Path(modelPath))); this.readFields(is); } catch (URISyntaxException e) { e.printStackTrace(); } finally { Closeables.close(is, false); } }
From source file:org.apache.hama.ml.perception.SmallMultiLayerPerceptron.java
License:Apache License
/** * Read the model meta-data from the specified location. * //from ww w .j ava2 s . c o m * @throws IOException */ @Override protected void readFromModel() throws IOException { Configuration conf = new Configuration(); try { URI uri = new URI(modelPath); FileSystem fs = FileSystem.get(uri, conf); FSDataInputStream is = new FSDataInputStream(fs.open(new Path(modelPath))); this.readFields(is); if (!this.MLPType.equals(this.getClass().getName())) { throw new IllegalStateException( String.format("Model type incorrect, cannot load model '%s' for '%s'.", this.MLPType, this.getClass().getName())); } } catch (URISyntaxException e) { e.printStackTrace(); } }
From source file:org.apache.hive.common.util.MockFileSystem.java
License:Apache License
@Override public FSDataInputStream open(Path path, int i) throws IOException { statistics.incrementReadOps(1);/* ww w.j av a2 s. c om*/ checkAccess(); MockFile file = findFile(path); if (file != null) return new FSDataInputStream(new MockInputStream(file)); throw new IOException("File not found: " + path); }
From source file:org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.java
License:Apache License
/** {@inheritDoc} */ @Override// w w w .j ava 2 s . co m public FSDataInputStream open(Path f, int bufSize) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = mode(path); if (mode == PROXY) { if (secondaryFs == null) { assert mgmt; throw new IOException("Failed to open file (secondary file system is not initialized): " + f); } FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize); if (clientLog.isLogEnabled()) { // At this point we do not know file size, so we perform additional request to remote FS to get it. FileStatus status = secondaryFs.getFileStatus(toSecondary(f)); long size = status != null ? status.getLen() : -1; long logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, PROXY, bufSize, size); return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId)); } else return is; } else { HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path); long logId = -1; if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, mode, bufSize, stream.length()); } if (LOG.isDebugEnabled()) LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']'); return new FSDataInputStream(igfsIn); } } finally { leaveBusy(); } }
From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java
License:Apache License
/** {@inheritDoc} */ @Override// w ww. j a va 2s. c o m public FSDataInputStream open(Path f, int bufSize) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = modeRslvr.resolveMode(path); if (mode == PROXY) { FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize); if (clientLog.isLogEnabled()) { // At this point we do not know file size, so we perform additional request to remote FS to get it. FileStatus status = secondaryFs.getFileStatus(toSecondary(f)); long size = status != null ? status.getLen() : -1; long logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, PROXY, bufSize, size); return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId)); } else return is; } else { HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path); long logId = -1; if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, mode, bufSize, stream.length()); } if (LOG.isDebugEnabled()) LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']'); return new FSDataInputStream(igfsIn); } } finally { leaveBusy(); } }
From source file:org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem.java
License:Apache License
/** {@inheritDoc} */ @Override/*from ww w. j a va 2 s . c o m*/ public FSDataInputStream open(Path f, int bufSize) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = mode(path); if (mode == PROXY) { if (secondaryFs == null) { assert mgmt; throw new IOException("Failed to open file (secondary file system is not initialized): " + f); } FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize); if (clientLog.isLogEnabled()) { // At this point we do not know file size, so we perform additional request to remote FS to get it. FileStatus status = secondaryFs.getFileStatus(toSecondary(f)); long size = status != null ? status.getLen() : -1; long logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, PROXY, bufSize, size); return new FSDataInputStream(new IgfsHadoopProxyInputStream(is, clientLog, logId)); } else return is; } else { IgfsHadoopStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path); long logId = -1; if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, mode, bufSize, stream.length()); } if (LOG.isDebugEnabled()) LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); IgfsHadoopInputStream igfsIn = new IgfsHadoopInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']'); return new FSDataInputStream(igfsIn); } } finally { leaveBusy(); } }
From source file:org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem.java
License:Apache License
/** {@inheritDoc} */ @Override/*from w w w. j a v a2 s.c om*/ public FSDataInputStream open(Path f, int bufSize) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = modeRslvr.resolveMode(path); if (mode == PROXY) { FSDataInputStream is = secondaryFs.open(toSecondary(f), bufSize); if (clientLog.isLogEnabled()) { // At this point we do not know file size, so we perform additional request to remote FS to get it. FileStatus status = secondaryFs.getFileStatus(toSecondary(f)); long size = status != null ? status.getLen() : -1; long logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, PROXY, bufSize, size); return new FSDataInputStream(new IgfsHadoopProxyInputStream(is, clientLog, logId)); } else return is; } else { IgfsHadoopStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path); long logId = -1; if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, mode, bufSize, stream.length()); } if (LOG.isDebugEnabled()) LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); IgfsHadoopInputStream igfsIn = new IgfsHadoopInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']'); return new FSDataInputStream(igfsIn); } } finally { leaveBusy(); } }
From source file:org.apache.ignite.internal.processors.hadoop.fs.GridHadoopRawLocalFileSystem.java
License:Apache License
/** {@inheritDoc} */ @Override/*from ww w . j av a 2 s . c om*/ public FSDataInputStream open(Path f, int bufferSize) throws IOException { return new FSDataInputStream(new InStream(checkExists(convert(f)))); }
From source file:org.apache.mahout.classifier.chi_rwcs.RuleBase.java
License:Apache License
/** * Load the rule base from a single file or a directory of files * @throws java.io.IOException//from w ww. j a v a2 s.c om */ public static RuleBase load(Configuration conf, Path fuzzy_ChiCSPath) throws IOException { FileSystem fs = fuzzy_ChiCSPath.getFileSystem(conf); Path[] files; if (fs.getFileStatus(fuzzy_ChiCSPath).isDir()) { files = Chi_RWCSUtils.listOutputFiles(fs, fuzzy_ChiCSPath); } else { files = new Path[] { fuzzy_ChiCSPath }; } RuleBase rb = null; for (Path path : files) { FSDataInputStream dataInput = new FSDataInputStream(fs.open(path)); try { if (rb == null) { rb = read(dataInput); } else { rb.readFields(dataInput); } } finally { Closeables.closeQuietly(dataInput); } } return rb; }
From source file:org.apache.mahout.classifier.df.DecisionForest.java
License:Apache License
/** * Load the forest from a single file or a directory of files * @throws java.io.IOException//from w w w . j a v a 2 s . c o m */ public static DecisionForest load(Configuration conf, Path forestPath) throws IOException { FileSystem fs = forestPath.getFileSystem(conf); Path[] files; if (fs.getFileStatus(forestPath).isDir()) { files = DFUtils.listOutputFiles(fs, forestPath); } else { files = new Path[] { forestPath }; } DecisionForest forest = null; for (Path path : files) { FSDataInputStream dataInput = new FSDataInputStream(fs.open(path)); try { if (forest == null) { forest = read(dataInput); } else { forest.readFields(dataInput); } } finally { Closeables.close(dataInput, true); } } return forest; }