List of usage examples for org.apache.hadoop.fs FileContext getFileContext
public static FileContext getFileContext() throws UnsupportedFileSystemException
From source file:com.datatorrent.common.util.AsyncFSStorageAgentTest.java
License:Apache License
@Test public void testDelete() throws IOException { testLoad();//from w w w .j a va 2 s .co m testMeta.storageAgent.delete(1, 1); Path appPath = new Path(testMeta.applicationPath); FileContext fileContext = FileContext.getFileContext(); Assert.assertTrue("operator 2 window 1", fileContext.util().exists(new Path(appPath + "/" + 2 + "/" + 1))); Assert.assertFalse("operator 1 window 1", fileContext.util().exists(new Path(appPath + "/" + 1 + "/" + 1))); }
From source file:com.datatorrent.common.util.FSStorageAgentTest.java
License:Apache License
@Test public void testDelete() throws IOException { testLoad();/*from w w w . j a va 2s . co m*/ testMeta.storageAgent.delete(1, 1); Path appPath = new Path(testMeta.applicationPath); FileContext fileContext = FileContext.getFileContext(); Assert.assertTrue("operator 2 window 1", fileContext.util().exists(new Path(appPath + "/" + 2 + "/" + 1))); Assert.assertFalse("operator 1 window 1", fileContext.util().exists(new Path(appPath + "/" + 1 + "/" + 1))); }
From source file:com.gpiskas.yarn.Utils.java
License:Open Source License
public static void setUpLocalResource(Path resPath, LocalResource res, YarnConfiguration conf) throws IOException { Path qPath = FileContext.getFileContext().makeQualified(resPath); FileStatus status = FileSystem.get(conf).getFileStatus(qPath); res.setResource(ConverterUtils.getYarnUrlFromPath(qPath)); res.setSize(status.getLen());/* w w w . ja va 2 s . com*/ res.setTimestamp(status.getModificationTime()); res.setType(LocalResourceType.FILE); res.setVisibility(LocalResourceVisibility.PUBLIC); }
From source file:com.mellanox.r4h.TestWriteRead.java
License:Apache License
@Before public void initJunitModeTest() throws Exception { LOG.info("initJunitModeTest"); conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // 100K // blocksize cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive();//from w w w . j av a2s. co m mfs = cluster.getFileSystem(); mfc = FileContext.getFileContext(); Path rootdir = new Path(ROOT_DIR); mfs.mkdirs(rootdir); }
From source file:com.mellanox.r4h.TestWriteRead.java
License:Apache License
private void initClusterModeTest() throws IOException { LOG = LogFactory.getLog(TestWriteRead.class); LOG.info("initClusterModeTest"); conf = new Configuration(); mfc = FileContext.getFileContext(); mfs = FileSystem.get(conf);// www . j a v a 2 s . c om }
From source file:edu.uci.ics.hyracks.yarn.common.resources.LocalResourceHelper.java
License:Apache License
private static LocalResource createLocalResourceFromPath(Configuration config, File path) throws IOException { LocalResource lr = Records.newRecord(LocalResource.class); URL url = ConverterUtils//from w w w . j a v a 2s .c o m .getYarnUrlFromPath(FileContext.getFileContext().makeQualified(new Path(path.toURI()))); lr.setResource(url); lr.setVisibility(LocalResourceVisibility.APPLICATION); lr.setTimestamp(path.lastModified()); lr.setSize(path.length()); return lr; }
From source file:org.apache.apex.engine.util.CascadeStorageAgentTest.java
License:Apache License
@Test public void testSingleIndirection() throws IOException { String oldAppPath = testMeta.applicationPath; FSStorageAgent storageAgent = new FSStorageAgent(oldAppPath, null); storageAgent.save("1", 1, 1); storageAgent.save("2", 1, 2); storageAgent.save("3", 2, 1); String newAppPath = oldAppPath + ".new"; CascadeStorageAgent cascade = new CascadeStorageAgent(storageAgent, new FSStorageAgent(newAppPath, null)); long[] operatorIds = cascade.getWindowIds(1); Assert.assertArrayEquals("Returned window ids ", operatorIds, new long[] { 1L, 2L }); operatorIds = cascade.getWindowIds(2); Assert.assertArrayEquals("Returned window ids ", operatorIds, new long[] { 1L }); /* save should happen to new location */ cascade.save("4", 1, 4); FileContext fileContext = FileContext.getFileContext(); Assert.assertFalse("operator 1 window 4 file does not exists in old directory", fileContext.util().exists(new Path(oldAppPath + "/" + 1 + "/" + 4))); Assert.assertTrue("operator 1 window 4 file exists in new directory", fileContext.util().exists(new Path(newAppPath + "/" + 1 + "/" + 4))); // check for delete, // delete for old checkpoint should be ignored cascade.save("5", 1, 5); cascade.delete(1, 2L);/*from ww w .j av a2s . c o m*/ Assert.assertTrue("operator 1 window 2 file exists in old directory", fileContext.util().exists(new Path(oldAppPath + "/" + 1 + "/" + 2))); cascade.delete(1, 4L); Assert.assertFalse("operator 1 window 4 file does not exists in old directory", fileContext.util().exists(new Path(newAppPath + "/" + 1 + "/" + 4))); /* chaining of storage agent */ String latestAppPath = oldAppPath + ".latest"; cascade = new CascadeStorageAgent(storageAgent, new FSStorageAgent(newAppPath, null)); CascadeStorageAgent latest = new CascadeStorageAgent(cascade, new FSStorageAgent(latestAppPath, null)); operatorIds = latest.getWindowIds(1); Assert.assertArrayEquals("Window ids ", operatorIds, new long[] { 1, 2, 5 }); latest.save("6", 1, 6); Assert.assertFalse("operator 1 window 6 file does not exists in old directory", fileContext.util().exists(new Path(oldAppPath + "/" + 1 + "/" + 6))); Assert.assertFalse("operator 1 window 6 file does not exists in old directory", fileContext.util().exists(new Path(newAppPath + "/" + 1 + "/" + 6))); Assert.assertTrue("operator 1 window 6 file exists in new directory", fileContext.util().exists(new Path(latestAppPath + "/" + 1 + "/" + 6))); }
From source file:org.apache.apex.malhar.lib.utils.IOUtilsTest.java
License:Apache License
private void testCopyPartialHelper(int dataSize, int offset, long size) throws IOException { FileUtils.deleteQuietly(new File("target/IOUtilsTest")); File file = new File("target/IOUtilsTest/testCopyPartial/input"); createDataFile(file, dataSize);//from www. j a v a2s . co m FileContext fileContext = FileContext.getFileContext(); DataInputStream inputStream = fileContext.open(new Path(file.getAbsolutePath())); Path output = new Path("target/IOUtilsTest/testCopyPartial/output"); DataOutputStream outputStream = fileContext.create(output, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), Options.CreateOpts.CreateParent.createParent()); if (offset == 0) { IOUtils.copyPartial(inputStream, size, outputStream); } else { IOUtils.copyPartial(inputStream, offset, size, outputStream); } outputStream.close(); Assert.assertTrue("output exists", fileContext.util().exists(output)); Assert.assertEquals("output size", size, fileContext.getFileStatus(output).getLen()); // FileUtils.deleteQuietly(new File("target/IOUtilsTest")); }
From source file:org.apache.drill.yarn.core.DfsFacade.java
License:Apache License
/** * Create a local resource definition for YARN. A local resource is one that * must be localized onto the remote node prior to running a command on that * node./*ww w . j a v a 2s.c o m*/ * <p> * YARN uses the size and timestamp are used to check if the file has changed * on HDFS to check if YARN can use an existing copy, if any. * <p> * Resources are made public. * * @param conf * Configuration created from the Hadoop config files, in this case, * identifies the target file system. * @param resourcePath * the path (relative or absolute) to the file on the configured file * system (usually HDFS). * @return a YARN local resource records that contains information about path, * size, type, resource and so on that YARN requires. * @throws IOException * if the resource does not exist on the configured file system */ public LocalResource makeResource(Path dfsPath, FileStatus dfsFileStatus, LocalResourceType type, LocalResourceVisibility visibility) throws DfsFacadeException { URL destUrl; try { destUrl = ConverterUtils.getYarnUrlFromPath(FileContext.getFileContext().makeQualified(dfsPath)); } catch (UnsupportedFileSystemException e) { throw new DfsFacadeException("Unable to convert dfs file to a URL: " + dfsPath.toString(), e); } LocalResource resource = LocalResource.newInstance(destUrl, type, visibility, dfsFileStatus.getLen(), dfsFileStatus.getModificationTime()); return resource; }
From source file:org.apache.hawq.pxf.plugins.hdfs.SequenceFileAccessor.java
License:Apache License
@Override public boolean openForWrite() throws Exception { FileSystem fs;// www . j ava 2s .c o m Path parent; String fileName = inputData.getDataSource(); conf = new Configuration(); getCompressionCodec(inputData); fileName = updateFileExtension(fileName, codec); // construct the output stream file = new Path(fileName); fs = file.getFileSystem(conf); fc = FileContext.getFileContext(); defaultKey = new LongWritable(inputData.getSegmentId()); if (fs.exists(file)) { throw new IOException("file " + file + " already exists, can't write data"); } parent = file.getParent(); if (!fs.exists(parent)) { fs.mkdirs(parent); LOG.debug("Created new dir " + parent); } writer = null; return true; }