List of usage examples for org.apache.hadoop.fs CreateFlag CREATE
CreateFlag CREATE
To view the source code for org.apache.hadoop.fs CreateFlag CREATE.
Click Source Link
From source file:com.ikanow.aleph2.core.shared.utils.TestJarCacheUtils.java
License:Apache License
@Test public void test_localFilePresentAndNew() throws InterruptedException, ExecutionException, AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, IOException { final FileContext localfs = FileContext.getLocalFSFileContext(new Configuration()); final String expected_cache_name = _globals.local_cached_jar_dir().replace(File.separator, "/") + "test1.cache.jar"; final Path expected_cache_path = localfs.makeQualified(new Path(expected_cache_name)); // Just make sure we've deleted the old file try {/*from w w w . j a va2 s . c o m*/ new File(expected_cache_name).delete(); } catch (Exception e) { } assertTrue("Remote file exists", new File(_test_file_path).exists()); assertFalse("Local file doesn't exist", new File(expected_cache_name).exists()); // Now create the file localfs.create(expected_cache_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)); localfs.setTimes(expected_cache_path, _test_file_time + 10000, _test_file_time + 10000); // check something has happened: assertEquals(_test_file_time + 10000, localfs.getFileStatus(expected_cache_path).getModificationTime()); // Now run the test routine final SharedLibraryBean library_bean = BeanTemplateUtils.build(SharedLibraryBean.class) .with(SharedLibraryBean::path_name, _test_file_path).with(SharedLibraryBean::_id, "test1").done() .get(); final Validation<BasicMessageBean, String> ret_val_1 = JarCacheUtils.getCachedJar( _globals.local_cached_jar_dir(), library_bean, _mock_hdfs, "test1", new TestMessageBean()).get(); assertEquals(expected_cache_path.toString(), ret_val_1.success()); assertTrue("Local file still exists", new File(expected_cache_name).exists()); assertEquals(localfs.getFileStatus(expected_cache_path).getModificationTime(), _test_file_time + 10000); }
From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java
License:Apache License
protected static void copyFile(final String binary_id, final String path, final IStorageService aleph2_fs, final GridFS share_fs) throws IOException { try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { final GridFSDBFile file = share_fs.find(new ObjectId(binary_id)); file.writeTo(out);//w ww .j a va 2 s . c o m final FileContext fs = aleph2_fs.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()).get(); final Path file_path = fs.makeQualified(new Path(path)); try (FSDataOutputStream outer = fs.create(file_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), org.apache.hadoop.fs.Options.CreateOpts.createParent())) { outer.write(out.toByteArray()); } } }
From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java
License:Apache License
/** Utility to add ".DELETED" to the designated bucket * @param to_delete//from w ww .ja va2 s . c o m * @param storage_service * @throws Exception */ protected static void deleteFilePath(final DataBucketBean to_delete, final IStorageService storage_service) throws Exception { final FileContext dfs = storage_service.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()) .get(); final String bucket_root = storage_service.getBucketRootPath() + "/" + to_delete.full_name() + IStorageService.BUCKET_SUFFIX; try (final FSDataOutputStream out = dfs.create(new Path(bucket_root + "/" + DELETE_TOUCH_FILE), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE))) { } //(ie close after creating) }
From source file:com.ikanow.aleph2.storage_service_hdfs.services.TestMockHdfsStorageSystem.java
License:Apache License
@Test public void test_switching_secondaryBuffers() throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IllegalArgumentException, IOException, InterruptedException, ExecutionException { // 0) Setup//from w w w.j a v a 2 s . c o m final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator; final GlobalPropertiesBean globals = BeanTemplateUtils.build(GlobalPropertiesBean.class) .with(GlobalPropertiesBean::local_yarn_config_dir, temp_dir) .with(GlobalPropertiesBean::distributed_root_dir, temp_dir) .with(GlobalPropertiesBean::local_root_dir, temp_dir) .with(GlobalPropertiesBean::distributed_root_dir, temp_dir).done().get(); final MockHdfsStorageService storage_service = new MockHdfsStorageService(globals); // Some buckets final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class) .with(DataBucketBean::full_name, "/test/storage/bucket") .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class).done().get()) .done().get(); setup_bucket(storage_service, bucket, Collections.emptyList()); final FileContext dfs = storage_service.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()) .get(); final String bucket_root = storage_service.getBucketRootPath() + "/" + bucket.full_name(); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW + "test_exdir"), FsPermission.getDirDefault(), true); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test1"), FsPermission.getDirDefault(), true); //(skip the current dir once just to check it doesn't cause problems) dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2"), FsPermission.getDirDefault(), true); dfs.create(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2/test2.json"), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)).close(); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "test_exdir"), FsPermission.getDirDefault(), true); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test3"), FsPermission.getDirDefault(), true); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED + "test_exdir"), FsPermission.getDirDefault(), true); // (retire the primary, copy test2 across) { BasicMessageBean res1 = storage_service.getDataService().get() .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("test2"), Optional.empty()).get(); System.out.println("(res1 = " + res1.message() + ")"); assertTrue("Request returns: " + res1.message(), res1.success()); } assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON))); assertTrue( doesFileExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "test2.json"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED))); assertFalse(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2"))); assertFalse(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test2"))); assertFalse(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test2"))); assertTrue(doesDirExist(dfs, new Path( bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "former_current/test_exdir"))); assertTrue(doesDirExist(dfs, new Path( bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "former_current/test_exdir"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "former_current/test_exdir"))); { BasicMessageBean res2 = storage_service.getDataService().get() .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("test3"), Optional.of("ex_primary")) .get(); System.out.println("(res2 = " + res2.message() + ")"); assertTrue("Request returns: " + res2.message(), res2.success()); } assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "ex_primary"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "ex_primary"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "ex_primary"))); assertTrue(doesFileExist(dfs, new Path( bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "ex_primary/test2.json"))); // return to the primary, delete the current { BasicMessageBean res3 = storage_service.getDataService().get() .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("former_current"), Optional.of("")).get(); System.out.println("(res3 = " + res3.message() + ")"); assertTrue("Request returns: " + res3.message(), res3.success()); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "/test_exdir"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW + "/test_exdir"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED + "/test_exdir"))); } }
From source file:com.mellanox.r4h.DFSClient.java
License:Apache License
/** * Call {@link #create(String, FsPermission, EnumSet, short, long, Progressable, int, ChecksumOpt)} with default <code>permission</code> * {@link FsPermission#getFileDefault()}. * //from ww w. j a v a 2 s . c o m * @param src * File name * @param overwrite * overwrite an existing file if true * @param replication * replication factor for the file * @param blockSize * maximum block size * @param progress * interface for reporting client progress * @param buffersize * underlying buffersize * * @return output stream */ public OutputStream create(String src, boolean overwrite, short replication, long blockSize, Progressable progress, int buffersize) throws IOException { return create(src, FsPermission.getFileDefault(), overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress, buffersize, null); }
From source file:com.mellanox.r4h.DFSClient.java
License:Apache License
/** * Append to an existing file if {@link CreateFlag#APPEND} is present *//* ww w . j ava 2s .co m*/ private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag, int buffersize, Progressable progress) throws IOException { if (flag.contains(CreateFlag.APPEND)) { HdfsFileStatus stat = getFileInfo(src); if (stat == null) { // No file to append to // New file needs to be created if create option is present if (!flag.contains(CreateFlag.CREATE)) { throw new FileNotFoundException( "failed to append to non-existent file " + src + " on client " + clientName); } return null; } return callAppend(src, buffersize, flag, progress, null); } return null; }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return this.create(f, permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), bufferSize, replication, blockSize, progress, null); }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
/** * Same as {@link #create(Path, FsPermission, boolean, int, short, long, Progressable)} with the addition of favoredNodes that is a hint to * where the namenode should place the file blocks. * The favored nodes hint is not persisted in HDFS. Hence it may be honored * at the creation time only. And with favored nodes, blocks will be pinned * on the datanodes to prevent balancing move the block. HDFS could move the * blocks during replication, to move the blocks from favored nodes. A value * of null means no favored nodes for this create *//*from ww w.j ava 2 s . c om*/ public HdfsDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final InetSocketAddress[] favoredNodes) throws IOException { statistics.incrementWriteOps(1); Path absF = fixRelativePart(f); return new FileSystemLinkResolver<HdfsDataOutputStream>() { @Override public HdfsDataOutputStream doCall(final Path p) throws IOException, UnresolvedLinkException { final DFSOutputStream out = dfs.create(getPathName(f), permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), true, replication, blockSize, progress, bufferSize, null, favoredNodes); return dfs.createWrappedOutputStream(out, statistics); } @Override public HdfsDataOutputStream next(final FileSystem fs, final Path p) throws IOException { if (fs instanceof DistributedFileSystem) { DistributedFileSystem myDfs = (DistributedFileSystem) fs; return myDfs.create(p, permission, overwrite, bufferSize, replication, blockSize, progress, favoredNodes); } throw new UnsupportedOperationException("Cannot create with" + " favoredNodes through a symlink to a non-DistributedFileSystem: " + f + " -> " + p); } }.resolve(this, absF); }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
/** * Same as create(), except fails if parent directory doesn't already exist. *///from w w w.jav a 2s . com @Override @SuppressWarnings("deprecation") public FSDataOutputStream createNonRecursive(final Path f, final FsPermission permission, final EnumSet<CreateFlag> flag, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws IOException { statistics.incrementWriteOps(1); if (flag.contains(CreateFlag.OVERWRITE)) { flag.add(CreateFlag.CREATE); } Path absF = fixRelativePart(f); return new FileSystemLinkResolver<FSDataOutputStream>() { @Override public FSDataOutputStream doCall(final Path p) throws IOException, UnresolvedLinkException { final DFSOutputStream dfsos = dfs.create(getPathName(p), permission, flag, false, replication, blockSize, progress, bufferSize, null); return dfs.createWrappedOutputStream(dfsos, statistics); } @Override public FSDataOutputStream next(final FileSystem fs, final Path p) throws IOException { return fs.createNonRecursive(p, permission, flag, bufferSize, replication, blockSize, progress); } }.resolve(this, absF); }
From source file:com.mellanox.r4h.TestWriteRead.java
License:Apache License
/** * Common routine to do position read while open the file for write. * After each iteration of write, do a read of the file from begin to end. * Return 0 on success, else number of failure. *///from ww w . j a v a2s . c o m private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition) throws IOException { int countOfFailures = 0; long byteVisibleToRead = 0; FSDataOutputStream out = null; byte[] outBuffer = new byte[BUFFER_SIZE]; byte[] inBuffer = new byte[BUFFER_SIZE]; for (int i = 0; i < BUFFER_SIZE; i++) { outBuffer[i] = (byte) (i & 0x00ff); } try { Path path = getFullyQualifiedPath(fname); long fileLengthBeforeOpen = 0; if (ifExists(path)) { if (truncateOption) { out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.OVERWRITE)) : mfs.create(path, truncateOption); LOG.info("File already exists. File open with Truncate mode: " + path); } else { out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND)) : mfs.append(path); fileLengthBeforeOpen = getFileLengthFromNN(path); LOG.info("File already exists of size " + fileLengthBeforeOpen + " File open for Append mode: " + path); } } else { out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) : mfs.create(path); } long totalByteWritten = fileLengthBeforeOpen; long totalByteVisible = fileLengthBeforeOpen; long totalByteWrittenButNotVisible = 0; boolean toFlush; for (int i = 0; i < loopN; i++) { toFlush = (i % 2) == 0; writeData(out, outBuffer, chunkSize); totalByteWritten += chunkSize; if (toFlush) { out.hflush(); totalByteVisible += chunkSize + totalByteWrittenButNotVisible; totalByteWrittenButNotVisible = 0; } else { totalByteWrittenButNotVisible += chunkSize; } if (verboseOption) { LOG.info("TestReadWrite - Written " + chunkSize + ". Total written = " + totalByteWritten + ". TotalByteVisible = " + totalByteVisible + " to file " + fname); } byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition); String readmsg = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname; if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) { readmsg = "pass: reader sees expected number of visible byte. " + readmsg + " [pass]"; } else { countOfFailures++; readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]"; if (abortTestOnFailure) { throw new IOException(readmsg); } } LOG.info(readmsg); } // test the automatic flush after close writeData(out, outBuffer, chunkSize); totalByteWritten += chunkSize; totalByteVisible += chunkSize + totalByteWrittenButNotVisible; totalByteWrittenButNotVisible += 0; out.close(); byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition); String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname; String readmsg; if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) { readmsg = "pass: reader sees expected number of visible byte on close. " + readmsg2 + " [pass]"; } else { countOfFailures++; readmsg = "fail: reader sees different number of visible byte on close. " + readmsg2 + " [fail]"; LOG.info(readmsg); if (abortTestOnFailure) throw new IOException(readmsg); } // now check if NN got the same length long lenFromFc = getFileLengthFromNN(path); if (lenFromFc != byteVisibleToRead) { readmsg = "fail: reader sees different number of visible byte from NN " + readmsg2 + " [fail]"; throw new IOException(readmsg); } } catch (IOException e) { throw new IOException("##### Caught Exception in testAppendWriteAndRead. Close file. " + "Total Byte Read so far = " + byteVisibleToRead, e); } finally { if (out != null) out.close(); } return -countOfFailures; }