Example usage for org.apache.hadoop.fs FileUtil copy

List of usage examples for org.apache.hadoop.fs FileUtil copy

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileUtil copy.

Prototype

public static boolean copy(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, boolean deleteSource,
        Configuration conf) throws IOException 

Source Link

Document

Copy files between FileSystems.

Usage

From source file:org.apache.accumulo.test.proxy.SimpleProxyBase.java

License:Apache License

@Test
public void importExportTable() throws Exception {
    // Write some data
    String expected[][] = new String[10][];
    for (int i = 0; i < 10; i++) {
        client.updateAndFlush(creds, tableName, mutation("row" + i, "cf", "cq", "" + i));
        expected[i] = new String[] { "row" + i, "cf", "cq", "" + i };
        client.flushTable(creds, tableName, null, null, true);
    }/*from  w ww . ja  va  2s.com*/
    assertScan(expected, tableName);

    // export/import
    MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
    FileSystem fs = cluster.getFileSystem();
    Path base = cluster.getTemporaryPath();
    Path dir = new Path(base, "test");
    assertTrue(fs.mkdirs(dir));
    Path destDir = new Path(base, "test_dest");
    assertTrue(fs.mkdirs(destDir));
    client.offlineTable(creds, tableName, false);
    client.exportTable(creds, tableName, dir.toString());
    // copy files to a new location
    FSDataInputStream is = fs.open(new Path(dir, "distcp.txt"));
    try (BufferedReader r = new BufferedReader(new InputStreamReader(is, UTF_8))) {
        while (true) {
            String line = r.readLine();
            if (line == null)
                break;
            Path srcPath = new Path(line);
            FileUtil.copy(fs, srcPath, fs, destDir, false, fs.getConf());
        }
    }
    client.deleteTable(creds, tableName);
    client.importTable(creds, "testify", destDir.toString());
    assertScan(expected, "testify");
    client.deleteTable(creds, "testify");

    try {
        // ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
        client.importTable(creds, "testify2", destDir.toString());
        fail();
    } catch (Exception e) {
    }

    assertFalse(client.listTables(creds).contains("testify2"));
}

From source file:org.apache.accumulo.tserver.DirectoryDecommissioner.java

License:Apache License

public static Path checkTabletDirectory(TabletServer tserver, VolumeManager vm, KeyExtent extent, Path dir)
        throws IOException {
    if (isActiveVolume(dir))
        return dir;

    if (!dir.getParent().getParent().getName().equals(ServerConstants.TABLE_DIR)) {
        throw new IllegalArgumentException("Unexpected table dir " + dir);
    }/*w w w .  ja  v a 2s  . co m*/

    Path newDir = new Path(
            vm.choose(ServerConstants.getTablesDirs()) + "/" + dir.getParent().getName() + "/" + dir.getName());

    log.info("Updating directory for " + extent + " from " + dir + " to " + newDir);
    if (extent.isRootTablet()) {
        // the root tablet is special case, its files need to be copied if its dir is changed

        // this code needs to be idempotent

        FileSystem fs1 = vm.getFileSystemByPath(dir);
        FileSystem fs2 = vm.getFileSystemByPath(newDir);

        if (!same(fs1, dir, fs2, newDir)) {
            if (fs2.exists(newDir)) {
                Path newDirBackup = getBackupName(fs2, newDir);
                // never delete anything because were dealing with the root tablet
                // one reason this dir may exist is because this method failed previously
                log.info("renaming " + newDir + " to " + newDirBackup);
                if (!fs2.rename(newDir, newDirBackup)) {
                    throw new IOException("Failed to rename " + newDir + " to " + newDirBackup);
                }
            }

            // do a lot of logging since this is the root tablet
            log.info("copying " + dir + " to " + newDir);
            if (!FileUtil.copy(fs1, dir, fs2, newDir, false, CachedConfiguration.getInstance())) {
                throw new IOException("Failed to copy " + dir + " to " + newDir);
            }

            // only set the new location in zookeeper after a successful copy
            log.info("setting root tablet location to " + newDir);
            MetadataTableUtil.setRootTabletDir(newDir.toString());

            // rename the old dir to avoid confusion when someone looks at filesystem... its ok if we fail here and this does not happen because the location in
            // zookeeper is the authority
            Path dirBackup = getBackupName(fs1, dir);
            log.info("renaming " + dir + " to " + dirBackup);
            fs1.rename(dir, dirBackup);
        } else {
            log.info("setting root tablet location to " + newDir);
            MetadataTableUtil.setRootTabletDir(newDir.toString());
        }

        return dir;
    } else {
        MetadataTableUtil.updateTabletDir(extent, newDir.toString(), SystemCredentials.get(),
                tserver.getLock());
        return newDir;
    }
}

From source file:org.apache.ambari.view.filebrowser.HdfsApi.java

License:Apache License

/**
 * Copy file//from   www  . jav  a  2s  . c  o m
 * @param src source path
 * @param dest destination path
 * @return success
 * @throws IOException
 * @throws InterruptedException
 */
public boolean copy(final String src, final String dest) throws IOException, InterruptedException {
    return ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
        public Boolean run() throws Exception {
            return FileUtil.copy(fs, new Path(src), fs, new Path(dest), false, conf);
        }
    });
}

From source file:org.apache.ambari.view.hive.utils.HdfsApi.java

License:Apache License

/**
 * Copy file//from w w w . j  a va 2 s.c om
 * @param src source path
 * @param dest destination path
 * @return success
 * @throws java.io.IOException
 * @throws InterruptedException
 */
public synchronized void copy(final String src, final String dest) throws IOException, InterruptedException {
    boolean result = ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
        public Boolean run() throws Exception {
            return FileUtil.copy(fs, new Path(src), fs, new Path(dest), false, conf);
        }
    });
    if (!result) {
        throw new ServiceFormattedException("Can't copy source file from " + src + " to " + dest);
    }
}

From source file:org.apache.ambari.view.utils.hdfs.HdfsApi.java

License:Apache License

/**
 * Copy file//from   ww w .  j ava 2 s .co  m
 * @param src source path
 * @param dest destination path
 * @throws java.io.IOException
 * @throws InterruptedException
 */
public void copy(final String src, final String dest)
        throws IOException, InterruptedException, HdfsApiException {
    boolean result = execute(new PrivilegedExceptionAction<Boolean>() {
        public Boolean run() throws Exception {
            return FileUtil.copy(fs, new Path(src), fs, new Path(dest), false, conf);
        }
    });

    if (!result) {
        throw new HdfsApiException("HDFS010 Can't copy source file from \" + src + \" to \" + dest");
    }
}

From source file:org.apache.drill.test.framework.TestDriver.java

License:Apache License

private static void dfsCopy(Path src, Path dest, String fsMode) throws IOException {

    FileSystem fs;/*from   w w w  .jav  a  2  s  . c om*/
    FileSystem localFs = FileSystem.getLocal(conf);

    if (fsMode.equals(LOCALFS)) {
        fs = FileSystem.getLocal(conf);
    } else {
        fs = FileSystem.get(conf);
    }

    try {
        if (localFs.getFileStatus(src).isDirectory()) {
            for (FileStatus file : localFs.listStatus(src)) {
                Path srcChild = file.getPath();
                Path newDest = new Path(dest + "/" + srcChild.getName());
                dfsCopy(srcChild, newDest, fsMode);
            }
        } else {
            if (!fs.exists(dest.getParent())) {
                fs.mkdirs(dest.getParent());
            }
            if (!fs.exists(dest)) {
                FileUtil.copy(localFs, src, fs, dest, false, fs.getConf());
                LOG.debug("Copying file " + src + " to " + dest);
            } else {
                LOG.debug("File " + src + " already exists as " + dest);
            }
        }
    } catch (FileAlreadyExistsException e) {
        LOG.debug("File " + src + " already exists as " + dest);
    } catch (IOException e) {
        LOG.debug("File " + src + " already exists as " + dest);
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2JobResourceManager.java

License:Apache License

/**
 * Process list of resources.//from  ww  w .java 2 s.co m
 *
 * @param jobLocDir Job working directory.
 * @param files Array of {@link URI} or {@link org.apache.hadoop.fs.Path} to process resources.
 * @param download {@code true}, if need to download. Process class path only else.
 * @param extract {@code true}, if need to extract archive.
 * @param clsPathUrls Collection to add resource as classpath resource.
 * @param rsrcNameProp Property for resource name array setting.
 * @throws IOException If failed.
 */
private void processFiles(File jobLocDir, @Nullable Object[] files, boolean download, boolean extract,
        @Nullable Collection<URL> clsPathUrls, @Nullable String rsrcNameProp) throws IOException {
    if (F.isEmptyOrNulls(files))
        return;

    Collection<String> res = new ArrayList<>();

    for (Object pathObj : files) {
        Path srcPath;

        if (pathObj instanceof URI) {
            URI uri = (URI) pathObj;

            srcPath = new Path(uri);
        } else
            srcPath = (Path) pathObj;

        String locName = srcPath.getName();

        File dstPath = new File(jobLocDir.getAbsolutePath(), locName);

        res.add(locName);

        rsrcSet.add(dstPath);

        if (clsPathUrls != null)
            clsPathUrls.add(dstPath.toURI().toURL());

        if (!download)
            continue;

        JobConf cfg = ctx.getJobConf();

        FileSystem dstFs = FileSystem.getLocal(cfg);

        FileSystem srcFs = job.fileSystem(srcPath.toUri(), cfg);

        if (extract) {
            File archivesPath = new File(jobLocDir.getAbsolutePath(), ".cached-archives");

            if (!archivesPath.exists() && !archivesPath.mkdir())
                throw new IOException(
                        "Failed to create directory " + "[path=" + archivesPath + ", jobId=" + jobId + ']');

            File archiveFile = new File(archivesPath, locName);

            FileUtil.copy(srcFs, srcPath, dstFs, new Path(archiveFile.toString()), false, cfg);

            String archiveNameLC = archiveFile.getName().toLowerCase();

            if (archiveNameLC.endsWith(".jar"))
                RunJar.unJar(archiveFile, dstPath);
            else if (archiveNameLC.endsWith(".zip"))
                FileUtil.unZip(archiveFile, dstPath);
            else if (archiveNameLC.endsWith(".tar.gz") || archiveNameLC.endsWith(".tgz")
                    || archiveNameLC.endsWith(".tar"))
                FileUtil.unTar(archiveFile, dstPath);
            else
                throw new IOException("Cannot unpack archive [path=" + srcPath + ", jobId=" + jobId + ']');
        } else
            FileUtil.copy(srcFs, srcPath, dstFs, new Path(dstPath.toString()), false, cfg);
    }

    if (!res.isEmpty() && rsrcNameProp != null)
        ctx.getJobConf().setStrings(rsrcNameProp, res.toArray(new String[res.size()]));
}

From source file:org.apache.ignite.internal.processors.hadoop.v2.GridHadoopV2JobResourceManager.java

License:Apache License

/**
 * Process list of resources./* w  ww . j a v  a2s .  co  m*/
 *
 * @param jobLocDir Job working directory.
 * @param files Array of {@link java.net.URI} or {@link org.apache.hadoop.fs.Path} to process resources.
 * @param download {@code true}, if need to download. Process class path only else.
 * @param extract {@code true}, if need to extract archive.
 * @param clsPathUrls Collection to add resource as classpath resource.
 * @param rsrcNameProp Property for resource name array setting.
 * @throws IOException If failed.
 */
private void processFiles(File jobLocDir, @Nullable Object[] files, boolean download, boolean extract,
        @Nullable Collection<URL> clsPathUrls, @Nullable String rsrcNameProp) throws IOException {
    if (F.isEmptyOrNulls(files))
        return;

    Collection<String> res = new ArrayList<>();

    for (Object pathObj : files) {
        String locName = null;
        Path srcPath;

        if (pathObj instanceof URI) {
            URI uri = (URI) pathObj;

            locName = uri.getFragment();

            srcPath = new Path(uri);
        } else
            srcPath = (Path) pathObj;

        if (locName == null)
            locName = srcPath.getName();

        File dstPath = new File(jobLocDir.getAbsolutePath(), locName);

        res.add(locName);

        rsrcSet.add(dstPath);

        if (clsPathUrls != null)
            clsPathUrls.add(dstPath.toURI().toURL());

        if (!download)
            continue;

        JobConf cfg = ctx.getJobConf();

        FileSystem dstFs = FileSystem.getLocal(cfg);

        FileSystem srcFs = srcPath.getFileSystem(cfg);

        if (extract) {
            File archivesPath = new File(jobLocDir.getAbsolutePath(), ".cached-archives");

            if (!archivesPath.exists() && !archivesPath.mkdir())
                throw new IOException(
                        "Failed to create directory " + "[path=" + archivesPath + ", jobId=" + jobId + ']');

            File archiveFile = new File(archivesPath, locName);

            FileUtil.copy(srcFs, srcPath, dstFs, new Path(archiveFile.toString()), false, cfg);

            String archiveNameLC = archiveFile.getName().toLowerCase();

            if (archiveNameLC.endsWith(".jar"))
                RunJar.unJar(archiveFile, dstPath);
            else if (archiveNameLC.endsWith(".zip"))
                FileUtil.unZip(archiveFile, dstPath);
            else if (archiveNameLC.endsWith(".tar.gz") || archiveNameLC.endsWith(".tgz")
                    || archiveNameLC.endsWith(".tar"))
                FileUtil.unTar(archiveFile, dstPath);
            else
                throw new IOException("Cannot unpack archive [path=" + srcPath + ", jobId=" + jobId + ']');
        } else
            FileUtil.copy(srcFs, srcPath, dstFs, new Path(dstPath.toString()), false, cfg);
    }

    if (!res.isEmpty() && rsrcNameProp != null)
        ctx.getJobConf().setStrings(rsrcNameProp, res.toArray(new String[res.size()]));
}

From source file:org.apache.kylin.common.persistence.JDBCResourceDAO.java

License:Apache License

private Path writeLargeCellToHdfs(String resPath, byte[] largeColumn) throws SQLException {

    boolean isResourceExist;
    FSDataOutputStream out = null;/*from   w  w w .ja va 2 s  .co  m*/
    Path redirectPath = bigCellHDFSPath(resPath);
    Path oldPath = new Path(redirectPath.toString() + "_old");
    try {
        isResourceExist = redirectFileSystem.exists(redirectPath);
        if (isResourceExist) {
            FileUtil.copy(redirectFileSystem, redirectPath, redirectFileSystem, oldPath, false,
                    HadoopUtil.getCurrentConfiguration());
            redirectFileSystem.delete(redirectPath, true);
            logger.debug("a copy of hdfs file {} is made", redirectPath);
        }
        out = redirectFileSystem.create(redirectPath);
        out.write(largeColumn);
        return redirectPath;
    } catch (Exception e) {
        try {
            rollbackLargeCellFromHdfs(resPath);
        } catch (Exception ex) {
            logger.error("fail to roll back resource " + resPath + " in hdfs", ex);
        }
        throw new SQLException(e);
    } finally {
        IOUtils.closeQuietly(out);
    }
}

From source file:org.apache.kylin.dict.global.GlobalDictHDFSStore.java

License:Apache License

private void migrateOldLayout() throws IOException {
    FileStatus[] sliceFiles = fileSystem.listStatus(basePath, new PathFilter() {
        @Override//ww w.  j a  v a 2  s  .  com
        public boolean accept(Path path) {
            return path.getName().startsWith(IndexFormatV1.SLICE_PREFIX);
        }
    });
    Path indexFile = new Path(basePath, V1_INDEX_NAME);

    if (fileSystem.exists(indexFile) && sliceFiles.length > 0) { // old layout
        final long version = System.currentTimeMillis();
        Path tempDir = new Path(basePath, "tmp_" + VERSION_PREFIX + version);
        Path versionDir = getVersionDir(version);

        logger.info("Convert global dict at {} to new layout with version {}", basePath, version);

        fileSystem.mkdirs(tempDir);
        // convert to new layout
        try {
            // copy index and slice files to temp
            FileUtil.copy(fileSystem, indexFile, fileSystem, tempDir, false, conf);
            for (FileStatus sliceFile : sliceFiles) {
                FileUtil.copy(fileSystem, sliceFile.getPath(), fileSystem, tempDir, false, conf);
            }
            // rename
            fileSystem.rename(tempDir, versionDir);
            // delete index and slices files in base dir
            fileSystem.delete(indexFile, false);
            for (FileStatus sliceFile : sliceFiles) {
                fileSystem.delete(sliceFile.getPath(), true);
            }

        } finally {
            if (fileSystem.exists(tempDir)) {
                fileSystem.delete(tempDir, true);
            }
        }
    }
}