List of usage examples for org.apache.hadoop.fs CreateFlag OVERWRITE
CreateFlag OVERWRITE
To view the source code for org.apache.hadoop.fs CreateFlag OVERWRITE.
Click Source Link
From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java
License:Apache License
/** Utility to add ".DELETED" to the designated bucket * @param to_delete/*ww w . j av a 2s .c o m*/ * @param storage_service * @throws Exception */ protected static void deleteFilePath(final DataBucketBean to_delete, final IStorageService storage_service) throws Exception { final FileContext dfs = storage_service.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()) .get(); final String bucket_root = storage_service.getBucketRootPath() + "/" + to_delete.full_name() + IStorageService.BUCKET_SUFFIX; try (final FSDataOutputStream out = dfs.create(new Path(bucket_root + "/" + DELETE_TOUCH_FILE), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE))) { } //(ie close after creating) }
From source file:com.ikanow.aleph2.storage_service_hdfs.services.TestMockHdfsStorageSystem.java
License:Apache License
@Test public void test_switching_secondaryBuffers() throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IllegalArgumentException, IOException, InterruptedException, ExecutionException { // 0) Setup//w w w . j a v a 2 s . c o m final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator; final GlobalPropertiesBean globals = BeanTemplateUtils.build(GlobalPropertiesBean.class) .with(GlobalPropertiesBean::local_yarn_config_dir, temp_dir) .with(GlobalPropertiesBean::distributed_root_dir, temp_dir) .with(GlobalPropertiesBean::local_root_dir, temp_dir) .with(GlobalPropertiesBean::distributed_root_dir, temp_dir).done().get(); final MockHdfsStorageService storage_service = new MockHdfsStorageService(globals); // Some buckets final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class) .with(DataBucketBean::full_name, "/test/storage/bucket") .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class).done().get()) .done().get(); setup_bucket(storage_service, bucket, Collections.emptyList()); final FileContext dfs = storage_service.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()) .get(); final String bucket_root = storage_service.getBucketRootPath() + "/" + bucket.full_name(); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW + "test_exdir"), FsPermission.getDirDefault(), true); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test1"), FsPermission.getDirDefault(), true); //(skip the current dir once just to check it doesn't cause problems) dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2"), FsPermission.getDirDefault(), true); dfs.create(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2/test2.json"), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)).close(); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "test_exdir"), FsPermission.getDirDefault(), true); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test3"), FsPermission.getDirDefault(), true); dfs.mkdir(new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED + "test_exdir"), FsPermission.getDirDefault(), true); // (retire the primary, copy test2 across) { BasicMessageBean res1 = storage_service.getDataService().get() .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("test2"), Optional.empty()).get(); System.out.println("(res1 = " + res1.message() + ")"); assertTrue("Request returns: " + res1.message(), res1.success()); } assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON))); assertTrue( doesFileExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "test2.json"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED))); assertFalse(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "test2"))); assertFalse(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "test2"))); assertFalse(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "test2"))); assertTrue(doesDirExist(dfs, new Path( bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "former_current/test_exdir"))); assertTrue(doesDirExist(dfs, new Path( bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "former_current/test_exdir"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "former_current/test_exdir"))); { BasicMessageBean res2 = storage_service.getDataService().get() .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("test3"), Optional.of("ex_primary")) .get(); System.out.println("(res2 = " + res2.message() + ")"); assertTrue("Request returns: " + res2.message(), res2.success()); } assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "ex_primary"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW_SECONDARY + "ex_primary"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY + "ex_primary"))); assertTrue(doesFileExist(dfs, new Path( bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON_SECONDARY + "ex_primary/test2.json"))); // return to the primary, delete the current { BasicMessageBean res3 = storage_service.getDataService().get() .switchCrudServiceToPrimaryBuffer(bucket, Optional.of("former_current"), Optional.of("")).get(); System.out.println("(res3 = " + res3.message() + ")"); assertTrue("Request returns: " + res3.message(), res3.success()); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_JSON + "/test_exdir"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_RAW + "/test_exdir"))); assertTrue(doesDirExist(dfs, new Path(bucket_root + IStorageService.STORED_DATA_SUFFIX_PROCESSED + "/test_exdir"))); } }
From source file:com.mellanox.r4h.DFSClient.java
License:Apache License
/** * Call {@link #create(String, FsPermission, EnumSet, short, long, Progressable, int, ChecksumOpt)} with default <code>permission</code> * {@link FsPermission#getFileDefault()}. * /*from w w w .j a v a 2 s . c om*/ * @param src * File name * @param overwrite * overwrite an existing file if true * @param replication * replication factor for the file * @param blockSize * maximum block size * @param progress * interface for reporting client progress * @param buffersize * underlying buffersize * * @return output stream */ public OutputStream create(String src, boolean overwrite, short replication, long blockSize, Progressable progress, int buffersize) throws IOException { return create(src, FsPermission.getFileDefault(), overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress, buffersize, null); }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return this.create(f, permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), bufferSize, replication, blockSize, progress, null); }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
/** * Same as {@link #create(Path, FsPermission, boolean, int, short, long, Progressable)} with the addition of favoredNodes that is a hint to * where the namenode should place the file blocks. * The favored nodes hint is not persisted in HDFS. Hence it may be honored * at the creation time only. And with favored nodes, blocks will be pinned * on the datanodes to prevent balancing move the block. HDFS could move the * blocks during replication, to move the blocks from favored nodes. A value * of null means no favored nodes for this create *//*from w w w . jav a2 s . c om*/ public HdfsDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final InetSocketAddress[] favoredNodes) throws IOException { statistics.incrementWriteOps(1); Path absF = fixRelativePart(f); return new FileSystemLinkResolver<HdfsDataOutputStream>() { @Override public HdfsDataOutputStream doCall(final Path p) throws IOException, UnresolvedLinkException { final DFSOutputStream out = dfs.create(getPathName(f), permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), true, replication, blockSize, progress, bufferSize, null, favoredNodes); return dfs.createWrappedOutputStream(out, statistics); } @Override public HdfsDataOutputStream next(final FileSystem fs, final Path p) throws IOException { if (fs instanceof DistributedFileSystem) { DistributedFileSystem myDfs = (DistributedFileSystem) fs; return myDfs.create(p, permission, overwrite, bufferSize, replication, blockSize, progress, favoredNodes); } throw new UnsupportedOperationException("Cannot create with" + " favoredNodes through a symlink to a non-DistributedFileSystem: " + f + " -> " + p); } }.resolve(this, absF); }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
/** * Same as create(), except fails if parent directory doesn't already exist. *///from w ww.ja va2 s . c o m @Override @SuppressWarnings("deprecation") public FSDataOutputStream createNonRecursive(final Path f, final FsPermission permission, final EnumSet<CreateFlag> flag, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws IOException { statistics.incrementWriteOps(1); if (flag.contains(CreateFlag.OVERWRITE)) { flag.add(CreateFlag.CREATE); } Path absF = fixRelativePart(f); return new FileSystemLinkResolver<FSDataOutputStream>() { @Override public FSDataOutputStream doCall(final Path p) throws IOException, UnresolvedLinkException { final DFSOutputStream dfsos = dfs.create(getPathName(p), permission, flag, false, replication, blockSize, progress, bufferSize, null); return dfs.createWrappedOutputStream(dfsos, statistics); } @Override public FSDataOutputStream next(final FileSystem fs, final Path p) throws IOException { return fs.createNonRecursive(p, permission, flag, bufferSize, replication, blockSize, progress); } }.resolve(this, absF); }
From source file:com.mellanox.r4h.TestWriteRead.java
License:Apache License
/** * Common routine to do position read while open the file for write. * After each iteration of write, do a read of the file from begin to end. * Return 0 on success, else number of failure. */// w w w . j a v a 2 s . c o m private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition) throws IOException { int countOfFailures = 0; long byteVisibleToRead = 0; FSDataOutputStream out = null; byte[] outBuffer = new byte[BUFFER_SIZE]; byte[] inBuffer = new byte[BUFFER_SIZE]; for (int i = 0; i < BUFFER_SIZE; i++) { outBuffer[i] = (byte) (i & 0x00ff); } try { Path path = getFullyQualifiedPath(fname); long fileLengthBeforeOpen = 0; if (ifExists(path)) { if (truncateOption) { out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.OVERWRITE)) : mfs.create(path, truncateOption); LOG.info("File already exists. File open with Truncate mode: " + path); } else { out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND)) : mfs.append(path); fileLengthBeforeOpen = getFileLengthFromNN(path); LOG.info("File already exists of size " + fileLengthBeforeOpen + " File open for Append mode: " + path); } } else { out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) : mfs.create(path); } long totalByteWritten = fileLengthBeforeOpen; long totalByteVisible = fileLengthBeforeOpen; long totalByteWrittenButNotVisible = 0; boolean toFlush; for (int i = 0; i < loopN; i++) { toFlush = (i % 2) == 0; writeData(out, outBuffer, chunkSize); totalByteWritten += chunkSize; if (toFlush) { out.hflush(); totalByteVisible += chunkSize + totalByteWrittenButNotVisible; totalByteWrittenButNotVisible = 0; } else { totalByteWrittenButNotVisible += chunkSize; } if (verboseOption) { LOG.info("TestReadWrite - Written " + chunkSize + ". Total written = " + totalByteWritten + ". TotalByteVisible = " + totalByteVisible + " to file " + fname); } byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition); String readmsg = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname; if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) { readmsg = "pass: reader sees expected number of visible byte. " + readmsg + " [pass]"; } else { countOfFailures++; readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]"; if (abortTestOnFailure) { throw new IOException(readmsg); } } LOG.info(readmsg); } // test the automatic flush after close writeData(out, outBuffer, chunkSize); totalByteWritten += chunkSize; totalByteVisible += chunkSize + totalByteWrittenButNotVisible; totalByteWrittenButNotVisible += 0; out.close(); byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition); String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname; String readmsg; if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) { readmsg = "pass: reader sees expected number of visible byte on close. " + readmsg2 + " [pass]"; } else { countOfFailures++; readmsg = "fail: reader sees different number of visible byte on close. " + readmsg2 + " [fail]"; LOG.info(readmsg); if (abortTestOnFailure) throw new IOException(readmsg); } // now check if NN got the same length long lenFromFc = getFileLengthFromNN(path); if (lenFromFc != byteVisibleToRead) { readmsg = "fail: reader sees different number of visible byte from NN " + readmsg2 + " [fail]"; throw new IOException(readmsg); } } catch (IOException e) { throw new IOException("##### Caught Exception in testAppendWriteAndRead. Close file. " + "Total Byte Read so far = " + byteVisibleToRead, e); } finally { if (out != null) out.close(); } return -countOfFailures; }
From source file:com.quantcast.qfs.hadoop.Qfs.java
License:Apache License
@Override public FSDataOutputStream createInternal(Path path, EnumSet<CreateFlag> createFlag, FsPermission absolutePermission, int bufferSize, short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt, boolean createParent) throws IOException { CreateFlag.validate(createFlag);/*from w ww . j a v a 2s .co m*/ checkPath(path); if (createParent) { mkdir(path.getParent(), absolutePermission, createParent); } return qfsImpl.create(getUriPath(path), replication, bufferSize, createFlag.contains(CreateFlag.OVERWRITE), absolutePermission.toShort(), createFlag.contains(CreateFlag.APPEND)); }
From source file:com.uber.hoodie.common.file.HoodieAppendLog.java
License:Apache License
/** * Construct the preferred type of SequenceFile Writer. * @param fs The configured filesystem./*from w w w . j ava 2 s. c o m*/ * @param conf The configuration. * @param name The name of the file. * @param keyClass The 'key' type. * @param valClass The 'value' type. * @param bufferSize buffer size for the underlaying outputstream. * @param replication replication factor for the file. * @param blockSize block size for the file. * @param createParent create parent directory if non-existent * @param compressionType The compression type. * @param codec The compression codec. * @param metadata The metadata of the file. * @return Returns the handle to the constructed SequenceFile Writer. * @throws IOException */ @Deprecated public static Writer createWriter(FileSystem fs, Configuration conf, Path name, Class keyClass, Class valClass, int bufferSize, short replication, long blockSize, boolean createParent, CompressionType compressionType, CompressionCodec codec, Metadata metadata) throws IOException { return createWriter(FileContext.getFileContext(fs.getUri(), conf), conf, name, keyClass, valClass, compressionType, codec, metadata, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), CreateOpts.bufferSize(bufferSize), createParent ? CreateOpts.createParent() : CreateOpts.donotCreateParent(), CreateOpts.repFac(replication), CreateOpts.blockSize(blockSize)); }
From source file:ldbc.snb.datagen.serializer.UpdateEventSerializer.java
License:Open Source License
public UpdateEventSerializer(Configuration conf, String fileNamePrefix, int reducerId, int numPartitions) throws IOException { conf_ = conf;//from w w w. j ava 2 s. c o m reducerId_ = reducerId; stringBuffer_ = new StringBuffer(512); data_ = new ArrayList<String>(); list_ = new ArrayList<String>(); currentEvent_ = new UpdateEvent(-1, -1, UpdateEvent.UpdateEventType.NO_EVENT, new String("")); numPartitions_ = numPartitions; stats_ = new UpdateStreamStats(); fileNamePrefix_ = fileNamePrefix; try { streamWriter_ = new SequenceFile.Writer[numPartitions_]; FileContext fc = FileContext.getFileContext(conf); for (int i = 0; i < numPartitions_; ++i) { Path outFile = new Path(fileNamePrefix_ + "_" + i); streamWriter_[i] = SequenceFile.createWriter(fc, conf, outFile, UpdateEventKey.class, Text.class, CompressionType.NONE, new DefaultCodec(), new SequenceFile.Metadata(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), Options.CreateOpts.checksumParam(Options.ChecksumOpt.createDisabled())); FileSystem fs = FileSystem.get(conf); Path propertiesFile = new Path(fileNamePrefix_ + ".properties"); if (fs.exists(propertiesFile)) { FSDataInputStream file = fs.open(propertiesFile); Properties properties = new Properties(); properties.load(file); stats_.minDate_ = Long .parseLong(properties.getProperty("ldbc.snb.interactive.min_write_event_start_time")); stats_.maxDate_ = Long .parseLong(properties.getProperty("ldbc.snb.interactive.max_write_event_start_time")); stats_.count_ = Long.parseLong(properties.getProperty("ldbc.snb.interactive.num_events")); file.close(); fs.delete(propertiesFile, true); } } } catch (IOException e) { throw e; } }