Example usage for org.apache.hadoop.fs CreateFlag OVERWRITE

List of usage examples for org.apache.hadoop.fs CreateFlag OVERWRITE

Introduction

In this page you can find the example usage for org.apache.hadoop.fs CreateFlag OVERWRITE.

Prototype

CreateFlag OVERWRITE

To view the source code for org.apache.hadoop.fs CreateFlag OVERWRITE.

Click Source Link

Document

Truncate/overwrite a file.

Usage

From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java

License:Apache License

@Override
public FSDataOutputStream createNonRecursive(Path path, FsPermission permission, EnumSet<CreateFlag> flags,
        int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
    Path parent = path.getParent();
    if (parent != null) {
        // expect this to raise an exception if there is no parent
        if (!getFileStatus(parent).isDirectory()) {
            throw new FileAlreadyExistsException("Not a directory: " + parent);
        }// w  w w  .j a  v  a 2s . co m
    }
    return create(path, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, replication, blockSize,
            progress);
}

From source file:com.datatorrent.common.util.AsyncFSStorageAgent.java

License:Apache License

public void copyToHDFS(final int operatorId, final long windowId) throws IOException {
    if (this.localBasePath == null) {
        throw new AssertionError("save() was not called before copyToHDFS");
    }//from w  w w  .  ja v  a  2 s  .  c  om
    String operatorIdStr = String.valueOf(operatorId);
    File directory = new File(localBasePath, operatorIdStr);
    String window = Long.toHexString(windowId);
    Path lPath = new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + TMP_FILE);
    File srcFile = new File(directory, String.valueOf(windowId));
    FSDataOutputStream stream = null;
    boolean stateSaved = false;
    try {
        // Create the temporary file with OverWrite option to avoid dangling lease issue and avoid exception if file already exists
        stream = fileContext.create(lPath, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
                Options.CreateOpts.CreateParent.createParent());
        InputStream in = null;
        try {
            in = new FileInputStream(srcFile);
            IOUtils.copyBytes(in, stream, conf, false);
        } finally {
            IOUtils.closeStream(in);
        }
        stateSaved = true;
    } catch (Throwable t) {
        logger.debug("while saving {} {}", operatorId, window, t);
        stateSaved = false;
        throw Throwables.propagate(t);
    } finally {
        try {
            if (stream != null) {
                stream.close();
            }
        } catch (IOException ie) {
            stateSaved = false;
            throw new RuntimeException(ie);
        } finally {
            if (stateSaved) {
                fileContext.rename(lPath,
                        new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + window),
                        Options.Rename.OVERWRITE);
            }
            FileUtil.fullyDelete(srcFile);
        }
    }
}

From source file:com.datatorrent.common.util.FSStorageAgent.java

License:Apache License

@SuppressWarnings("ThrowFromFinallyBlock")
@Override//from  ww w .ja va2 s .  com
public void save(Object object, int operatorId, long windowId) throws IOException {
    String operatorIdStr = String.valueOf(operatorId);
    Path lPath = new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + TMP_FILE);
    String window = Long.toHexString(windowId);
    boolean stateSaved = false;
    FSDataOutputStream stream = null;
    try {
        stream = fileContext.create(lPath, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
                Options.CreateOpts.CreateParent.createParent());
        store(stream, object);
        stateSaved = true;
    } catch (Throwable t) {
        logger.debug("while saving {} {}", operatorId, window, t);
        stateSaved = false;
        DTThrowable.rethrow(t);
    } finally {
        try {
            if (stream != null) {
                stream.close();
            }
        } catch (IOException ie) {
            stateSaved = false;
            throw new RuntimeException(ie);
        } finally {
            if (stateSaved) {
                logger.debug("Saving {}: {}", operatorId, window);
                fileContext.rename(lPath,
                        new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + window),
                        Options.Rename.OVERWRITE);
            }
        }
    }
}

From source file:com.datatorrent.stram.FSRecoveryHandler.java

License:Apache License

@Override
public Object restore() throws IOException {
    FileContext fc = FileContext.getFileContext(fs.getUri());

    // recover from wherever it was left
    if (fc.util().exists(snapshotBackupPath)) {
        LOG.warn("Incomplete checkpoint, reverting to {}", snapshotBackupPath);
        fc.rename(snapshotBackupPath, snapshotPath, Rename.OVERWRITE);

        // combine logs (w/o append, create new file)
        Path tmpLogPath = new Path(basedir, "log.combined");
        FSDataOutputStream fsOut = fc.create(tmpLogPath, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE));
        try {//from  ww w  .j a va2  s . com
            FSDataInputStream fsIn = fc.open(logBackupPath);
            try {
                IOUtils.copy(fsIn, fsOut);
            } finally {
                fsIn.close();
            }

            fsIn = fc.open(logPath);
            try {
                IOUtils.copy(fsIn, fsOut);
            } finally {
                fsIn.close();
            }
        } finally {
            fsOut.close();
        }

        fc.rename(tmpLogPath, logPath, Rename.OVERWRITE);
        fc.delete(logBackupPath, false);
    } else {
        // we have log backup, but no checkpoint backup
        // failure between log rotation and writing checkpoint
        if (fc.util().exists(logBackupPath)) {
            LOG.warn("Found {}, did checkpointing fail?", logBackupPath);
            fc.rename(logBackupPath, logPath, Rename.OVERWRITE);
        }
    }

    if (!fc.util().exists(snapshotPath)) {
        LOG.debug("No existing checkpoint.");
        return null;
    }

    LOG.debug("Reading checkpoint {}", snapshotPath);
    InputStream is = fc.open(snapshotPath);
    // indeterministic class loading behavior
    // http://stackoverflow.com/questions/9110677/readresolve-not-working-an-instance-of-guavas-serializedform-appears
    final ClassLoader loader = Thread.currentThread().getContextClassLoader();
    ObjectInputStream ois = new ObjectInputStream(is) {
        @Override
        protected Class<?> resolveClass(ObjectStreamClass objectStreamClass)
                throws IOException, ClassNotFoundException {
            return Class.forName(objectStreamClass.getName(), true, loader);
        }
    };
    //ObjectInputStream ois = new ObjectInputStream(is);
    try {
        return ois.readObject();
    } catch (ClassNotFoundException cnfe) {
        throw new IOException("Failed to read checkpointed state", cnfe);
    } finally {
        ois.close();
    }
}

From source file:com.ikanow.aleph2.aleph2_rest_utils.FileUtils.java

License:Apache License

public static void writeFile(final FileContext fileContext, final InputStream input, final String path)
        throws AccessControlException, FileAlreadyExistsException, FileNotFoundException,
        ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
    final Path p = new Path(path);
    try (FSDataOutputStream outer = fileContext.create(p, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
            org.apache.hadoop.fs.Options.CreateOpts.createParent())) {
        IOUtils.copyLarge(input, outer, new byte[DEFAULT_BUFFER_SIZE]);
    }//from   ww  w . j a  v  a2s  .  com
}

From source file:com.ikanow.aleph2.core.shared.utils.DirUtils.java

License:Apache License

/** Creates a text file  in the storage service 
 * @param fileContext/*from  w ww  . j a  v a2s  .  c  o  m*/
 * @param fileNameString
 * @param sb
 */
public static void createUTF8File(FileContext fileContext, String fileNameString, StringBuffer sb) {
    if (fileContext != null && fileNameString != null) {
        try {
            Path f = new Path(fileNameString);
            FSDataOutputStream out = fileContext.create(f, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE));
            //Read from input stream and write to output stream until EOF.
            Charset charset = StandardCharsets.UTF_8;
            CharsetEncoder encoder = charset.newEncoder();

            // No allocation performed, just wraps the StringBuilder.
            CharBuffer buffer = CharBuffer.wrap(sb.toString());

            byte[] bytes = encoder.encode(buffer).array();
            out.write(bytes);
            out.close();

        } catch (Exception e) {
            logger.error("createFolderStructure Caught Exception", e);
        }
    }

}

From source file:com.ikanow.aleph2.core.shared.utils.TestJarCacheUtils.java

License:Apache License

@Before
public void setUpDependencies()
        throws AccessControlException, FileAlreadyExistsException, FileNotFoundException,
        ParentNotDirectoryException, UnsupportedFileSystemException, IllegalArgumentException, IOException {

    final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator;

    _globals = new GlobalPropertiesBean(temp_dir, temp_dir, temp_dir, temp_dir);

    _mock_hdfs = new MockStorageService(_globals);

    // Create a pretend "remote" file

    _test_file_path = temp_dir + "/test.jar";
    final Path test_file_path = new Path(_test_file_path);

    final FileContext fs = _mock_hdfs.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()).get();
    fs.create(test_file_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE));

    _test_file_time = fs.getFileStatus(test_file_path).getModificationTime();
}

From source file:com.ikanow.aleph2.core.shared.utils.TestJarCacheUtils.java

License:Apache License

@Test
public void test_localFilePresentButOld()
        throws InterruptedException, ExecutionException, AccessControlException, FileAlreadyExistsException,
        FileNotFoundException, ParentNotDirectoryException, IOException {

    final FileContext localfs = FileContext.getLocalFSFileContext(new Configuration());

    String java_name = _globals.local_cached_jar_dir() + File.separator + "testX.cache.jar";
    final String expected_cache_name = java_name.replace(File.separator, "/");
    final Path expected_cache_path = localfs.makeQualified(new Path(expected_cache_name));

    // Just make sure we've deleted the old file
    try {/*from  w w w . j  av  a 2 s.c o m*/
        System.out.println("Deleted: " + new File(java_name).delete());
    } catch (Exception e) {
        fail("Misc Error: " + e);
    }

    assertTrue("Remote file exists", new File(_test_file_path).exists());
    assertFalse("Local file doesn't exist: " + java_name, new File(java_name).exists());

    // Now create the file

    localfs.create(expected_cache_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE));

    localfs.setTimes(expected_cache_path, 0L, 0L);

    // check something has happened:
    assertEquals(0L, localfs.getFileStatus(expected_cache_path).getModificationTime());
    assertNotEquals(0L, _test_file_time);

    // Now run the test routine

    final SharedLibraryBean library_bean = BeanTemplateUtils.build(SharedLibraryBean.class)
            .with(SharedLibraryBean::path_name, _test_file_path).with(SharedLibraryBean::_id, "testX").done()
            .get();

    final Validation<BasicMessageBean, String> ret_val_1 = JarCacheUtils.getCachedJar(
            _globals.local_cached_jar_dir(), library_bean, _mock_hdfs, "testX", new TestMessageBean()).get();

    assertEquals(expected_cache_path.toString(), ret_val_1.success());

    assertTrue("Local file still exists", new File(expected_cache_name).exists());

    assertTrue("File time should have been updated",
            localfs.getFileStatus(expected_cache_path).getModificationTime() >= _test_file_time);
}

From source file:com.ikanow.aleph2.core.shared.utils.TestJarCacheUtils.java

License:Apache License

@Test
public void test_localFilePresentAndNew()
        throws InterruptedException, ExecutionException, AccessControlException, FileAlreadyExistsException,
        FileNotFoundException, ParentNotDirectoryException, IOException {

    final FileContext localfs = FileContext.getLocalFSFileContext(new Configuration());

    final String expected_cache_name = _globals.local_cached_jar_dir().replace(File.separator, "/")
            + "test1.cache.jar";
    final Path expected_cache_path = localfs.makeQualified(new Path(expected_cache_name));

    // Just make sure we've deleted the old file
    try {// ww  w.j a v a  2 s . c  om
        new File(expected_cache_name).delete();
    } catch (Exception e) {
    }

    assertTrue("Remote file exists", new File(_test_file_path).exists());
    assertFalse("Local file doesn't exist", new File(expected_cache_name).exists());

    // Now create the file

    localfs.create(expected_cache_path, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE));

    localfs.setTimes(expected_cache_path, _test_file_time + 10000, _test_file_time + 10000);

    // check something has happened:
    assertEquals(_test_file_time + 10000, localfs.getFileStatus(expected_cache_path).getModificationTime());

    // Now run the test routine

    final SharedLibraryBean library_bean = BeanTemplateUtils.build(SharedLibraryBean.class)
            .with(SharedLibraryBean::path_name, _test_file_path).with(SharedLibraryBean::_id, "test1").done()
            .get();

    final Validation<BasicMessageBean, String> ret_val_1 = JarCacheUtils.getCachedJar(
            _globals.local_cached_jar_dir(), library_bean, _mock_hdfs, "test1", new TestMessageBean()).get();

    assertEquals(expected_cache_path.toString(), ret_val_1.success());

    assertTrue("Local file still exists", new File(expected_cache_name).exists());

    assertEquals(localfs.getFileStatus(expected_cache_path).getModificationTime(), _test_file_time + 10000);
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java

License:Apache License

protected static void copyFile(final String binary_id, final String path, final IStorageService aleph2_fs,
        final GridFS share_fs) throws IOException {
    try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
        final GridFSDBFile file = share_fs.find(new ObjectId(binary_id));
        file.writeTo(out);/*from  w w w .  ja  va2 s .c om*/
        final FileContext fs = aleph2_fs.getUnderlyingPlatformDriver(FileContext.class, Optional.empty()).get();
        final Path file_path = fs.makeQualified(new Path(path));
        try (FSDataOutputStream outer = fs.create(file_path,
                EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
                org.apache.hadoop.fs.Options.CreateOpts.createParent())) {
            outer.write(out.toByteArray());
        }
    }
}