Example usage for org.apache.commons.compress.archivers.tar TarArchiveEntry isFile

List of usage examples for org.apache.commons.compress.archivers.tar TarArchiveEntry isFile

Introduction

In this page you can find the example usage for org.apache.commons.compress.archivers.tar TarArchiveEntry isFile.

Prototype

public boolean isFile() 

Source Link

Document

Check if this is a "normal file"

Usage

From source file:com.google.cloud.tools.managedcloudsdk.install.TarGzExtractorProvider.java

@Override
public void extract(Path archive, Path destination, ProgressListener progressListener) throws IOException {

    progressListener.start("Extracting archive: " + archive.getFileName(), ProgressListener.UNKNOWN);

    String canonicalDestination = destination.toFile().getCanonicalPath();

    GzipCompressorInputStream gzipIn = new GzipCompressorInputStream(Files.newInputStream(archive));
    try (TarArchiveInputStream in = new TarArchiveInputStream(gzipIn)) {
        TarArchiveEntry entry;
        while ((entry = in.getNextTarEntry()) != null) {
            Path entryTarget = destination.resolve(entry.getName());

            String canonicalTarget = entryTarget.toFile().getCanonicalPath();
            if (!canonicalTarget.startsWith(canonicalDestination + File.separator)) {
                throw new IOException("Blocked unzipping files outside destination: " + entry.getName());
            }//  w w w . ja  va  2 s  .com

            progressListener.update(1);
            logger.fine(entryTarget.toString());

            if (entry.isDirectory()) {
                if (!Files.exists(entryTarget)) {
                    Files.createDirectories(entryTarget);
                }
            } else if (entry.isFile()) {
                if (!Files.exists(entryTarget.getParent())) {
                    Files.createDirectories(entryTarget.getParent());
                }
                try (OutputStream out = new BufferedOutputStream(Files.newOutputStream(entryTarget))) {
                    IOUtils.copy(in, out);
                    PosixFileAttributeView attributeView = Files.getFileAttributeView(entryTarget,
                            PosixFileAttributeView.class);
                    if (attributeView != null) {
                        attributeView.setPermissions(PosixUtil.getPosixFilePermissions(entry.getMode()));
                    }
                }
            } else {
                // we don't know what kind of entry this is (we only process directories and files).
                logger.warning("Skipping entry (unknown type): " + entry.getName());
            }
        }
        progressListener.done();
    }
}

From source file:com.facebook.buck.util.unarchive.Untar.java

@VisibleForTesting
ImmutableSet<Path> extractArchive(Path archiveFile, ProjectFilesystem filesystem, Path filesystemRelativePath,
        Optional<Path> stripPath, ExistingFileMode existingFileMode, PatternsMatcher entriesToExclude,
        boolean writeSymlinksAfterCreatingFiles) throws IOException {

    ImmutableSet.Builder<Path> paths = ImmutableSet.builder();
    HashSet<Path> dirsToTidy = new HashSet<>();
    TreeMap<Path, Long> dirCreationTimes = new TreeMap<>();
    DirectoryCreator creator = new DirectoryCreator(filesystem);

    // On windows, we create hard links instead of symlinks. This is fine, but the
    // destination file may not exist yet, which is an error. So, just hold onto the paths until
    // all files are extracted, and /then/ try to do the links
    Map<Path, Path> windowsSymlinkMap = new HashMap<>();

    try (TarArchiveInputStream archiveStream = getArchiveInputStream(archiveFile)) {
        TarArchiveEntry entry;
        while ((entry = archiveStream.getNextTarEntry()) != null) {
            String entryName = entry.getName();
            if (entriesToExclude.matchesAny(entryName)) {
                continue;
            }//from   www  .  j  a va 2 s . c o  m
            Path destFile = Paths.get(entryName);
            Path destPath;
            if (stripPath.isPresent()) {
                if (!destFile.startsWith(stripPath.get())) {
                    continue;
                }
                destPath = filesystemRelativePath.resolve(stripPath.get().relativize(destFile)).normalize();
            } else {
                destPath = filesystemRelativePath.resolve(destFile).normalize();
            }

            if (entry.isDirectory()) {
                dirsToTidy.add(destPath);
                mkdirs(creator, destPath);
                dirCreationTimes.put(destPath, entry.getModTime().getTime());
            } else if (entry.isSymbolicLink()) {
                if (writeSymlinksAfterCreatingFiles) {
                    recordSymbolicLinkForWindows(creator, destPath, entry, windowsSymlinkMap);
                } else {
                    writeSymbolicLink(creator, destPath, entry);
                }
                paths.add(destPath);
                setAttributes(filesystem, destPath, entry);
            } else if (entry.isFile()) {
                writeFile(creator, archiveStream, destPath);
                paths.add(destPath);
                setAttributes(filesystem, destPath, entry);
            }
        }

        writeWindowsSymlinks(creator, windowsSymlinkMap);
    } catch (CompressorException e) {
        throw new IOException(String.format("Could not get decompressor for archive at %s", archiveFile), e);
    }

    setDirectoryModificationTimes(filesystem, dirCreationTimes);

    ImmutableSet<Path> filePaths = paths.build();
    if (existingFileMode == ExistingFileMode.OVERWRITE_AND_CLEAN_DIRECTORIES) {
        // Clean out directories of files that were not in the archive
        tidyDirectories(filesystem, dirsToTidy, filePaths);
    }
    return filePaths;
}

From source file:org.apache.bookkeeper.tests.integration.utils.DockerUtils.java

public static void dumpContainerLogDirToTarget(DockerClient docker, String containerId, String path) {
    final int readBlockSize = 10000;

    try (InputStream dockerStream = docker.copyArchiveFromContainerCmd(containerId, path).exec();
            TarArchiveInputStream stream = new TarArchiveInputStream(dockerStream)) {
        TarArchiveEntry entry = stream.getNextTarEntry();
        while (entry != null) {
            if (entry.isFile()) {
                File output = new File(getTargetDirectory(containerId), entry.getName().replace("/", "-"));
                try (FileOutputStream os = new FileOutputStream(output)) {
                    byte[] block = new byte[readBlockSize];
                    int read = stream.read(block, 0, readBlockSize);
                    while (read > -1) {
                        os.write(block, 0, read);
                        read = stream.read(block, 0, readBlockSize);
                    }//w  w  w. j a  v  a 2 s.c  o m
                }
            }
            entry = stream.getNextTarEntry();
        }
    } catch (RuntimeException | IOException e) {
        LOG.error("Error reading bk logs from container {}", containerId, e);
    }
}

From source file:org.apache.hadoop.fs.tar.TarIndex.java

/**
 * Creates a Index out of a tar file. //from w w w.j  a v  a  2  s. c o m
 * Index is a MAP between Filename->start_offset
 * 
 * @param fs Underlying Hadoop FileSystem
 * @param tarPath Path to the Tar file
 * @param isWrite should I write the index to a file
 * @throws IOException
 */
public TarIndex(FileSystem fs, Path tarPath, boolean isWrite, Configuration conf) throws IOException {

    Path indexPath = getIndexPath(tarPath);
    Path altIndexP = getAltIndexPath(tarPath, conf);

    boolean readOK = false;
    readOK = readIndexFile(fs, indexPath);

    if (readOK == false)
        readOK = readIndexFile(fs, altIndexP);

    if (readOK == false) {
        FSDataInputStream is = fs.open(tarPath);
        byte[] buffer = new byte[512];

        while (true) {
            int bytesRead = is.read(buffer);
            if (bytesRead == -1)
                break;
            if (bytesRead < 512)
                throw new IOException("Could not read the full header.");

            long currOffset = is.getPos();
            TarArchiveEntry entry = new TarArchiveEntry(buffer);

            // Index only normal files. Do not support directories yet.
            if (entry.isFile()) {
                String name = entry.getName().trim();
                if (!name.equals("")) {
                    IndexEntry ie = new IndexEntry(entry.getSize(), currOffset);
                    index.put(name, ie);
                }
            }

            long nextOffset = currOffset + entry.getSize();
            if (nextOffset % 512 != 0)
                nextOffset = ((nextOffset / 512) + 1) * 512;
            is.seek(nextOffset);
        }
        is.close();

        if (isWrite) {
            boolean writeOK = writeIndex(fs, indexPath);

            if (writeOK == false && altIndexP != null)
                writeOK = writeIndex(fs, altIndexP);

            if (writeOK == false) {
                Path p = altIndexP == null ? indexPath : altIndexP;

                LOG.error("Could not create INDEX file " + p.toUri());
                if (altIndexP == null)
                    LOG.error("You can specify alternate location for index"
                            + " creation using tarfs.tmp.dir property.");

                LOG.error("Skipping writing index file.");
            }
        }
    }
}

From source file:org.apache.helix.provisioning.yarn.AppLauncher.java

/**
 * Generates the classpath after the archive file gets extracted under 'serviceName' folder
 * @param serviceName//  w  ww.j a  va2 s.co m
 * @param archiveFile
 * @return
 */
private String generateClasspathAfterExtraction(String serviceName, File archiveFile) {
    if (!isArchive(archiveFile.getAbsolutePath())) {
        return "./";
    }
    StringBuilder classpath = new StringBuilder();
    // put the jar files under the archive in the classpath
    try {
        final InputStream is = new FileInputStream(archiveFile);
        final TarArchiveInputStream debInputStream = (TarArchiveInputStream) new ArchiveStreamFactory()
                .createArchiveInputStream("tar", is);
        TarArchiveEntry entry = null;
        while ((entry = (TarArchiveEntry) debInputStream.getNextEntry()) != null) {
            if (entry.isFile()) {
                classpath.append(File.pathSeparatorChar);
                classpath.append("./" + serviceName + "/" + entry.getName());
            }
        }
        debInputStream.close();

    } catch (Exception e) {
        LOG.error("Unable to read archive file:" + archiveFile, e);
    }
    return classpath.toString();
}

From source file:org.apache.metron.enrichment.adapters.maxmind.MaxMindDatabase.java

/**
 * Update the database being queried to one backed by the provided HDFS file.
 * Access to the database should be guarded by read locks to avoid disruption while updates are occurring.
 * @param hdfsFile The HDFS file path to be used for new queries.
 *///from  w w w.  jav a  2  s  . c  o  m
default void update(String hdfsFile) {
    // If nothing is set (or it's been unset, use the defaults)
    if (hdfsFile == null || hdfsFile.isEmpty()) {
        LOG.debug("Using default for {}: {}", getHdfsFileConfig(), getHdfsFileDefault());
        hdfsFile = getHdfsFileDefault();
    }

    FileSystem fs = MaxMindDbUtilities.getFileSystem();

    if (hdfsFile.endsWith(MaxMindDatabase.EXTENSION_MMDB)) {
        lockIfNecessary();
        try (BufferedInputStream is = new BufferedInputStream(fs.open(new Path(hdfsFile)))) {
            setReader(MaxMindDbUtilities.readNewDatabase(getReader(), hdfsFile, is));
        } catch (IOException e) {
            MaxMindDbUtilities.handleDatabaseIOException(hdfsFile, e);
        } finally {
            unlockIfNecessary();
        }
    } else if (hdfsFile.endsWith(MaxMindDatabase.EXTENSION_MMDB_GZ)) {
        lockIfNecessary();
        try (GZIPInputStream is = new GZIPInputStream(fs.open(new Path(hdfsFile)))) {
            setReader(MaxMindDbUtilities.readNewDatabase(getReader(), hdfsFile, is));
        } catch (IOException e) {
            MaxMindDbUtilities.handleDatabaseIOException(hdfsFile, e);
        } finally {
            unlockIfNecessary();
        }
    } else if (hdfsFile.endsWith(MaxMindDatabase.EXTENSION_TAR_GZ)) {
        lockIfNecessary();
        try (TarArchiveInputStream is = new TarArchiveInputStream(
                new GZIPInputStream(fs.open(new Path(hdfsFile))))) {
            // Need to find the mmdb entry.
            TarArchiveEntry entry = is.getNextTarEntry();
            while (entry != null) {
                if (entry.isFile() && entry.getName().endsWith(MaxMindDatabase.EXTENSION_MMDB)) {
                    try (InputStream mmdb = new BufferedInputStream(is)) { // Read directly from tarInput
                        setReader(MaxMindDbUtilities.readNewDatabase(getReader(), hdfsFile, mmdb));
                        break; // Don't care about the other entries, leave immediately
                    }
                }
                entry = is.getNextTarEntry();
            }
        } catch (IOException e) {
            MaxMindDbUtilities.handleDatabaseIOException(hdfsFile, e);
        } finally {
            unlockIfNecessary();
        }
    }
}

From source file:org.apache.pulsar.tests.DockerUtils.java

public static void dumpContainerLogDirToTarget(DockerClient docker, String containerId, String path) {
    final int READ_BLOCK_SIZE = 10000;

    try (InputStream dockerStream = docker.copyArchiveFromContainerCmd(containerId, path).exec();
            TarArchiveInputStream stream = new TarArchiveInputStream(dockerStream)) {
        TarArchiveEntry entry = stream.getNextTarEntry();
        while (entry != null) {
            if (entry.isFile()) {
                File output = new File(getTargetDirectory(containerId), entry.getName().replace("/", "-"));
                try (FileOutputStream os = new FileOutputStream(output)) {
                    byte[] block = new byte[READ_BLOCK_SIZE];
                    int read = stream.read(block, 0, READ_BLOCK_SIZE);
                    while (read > -1) {
                        os.write(block, 0, read);
                        read = stream.read(block, 0, READ_BLOCK_SIZE);
                    }//from  w w w .  jav a  2  s  . c om
                }
            }
            entry = stream.getNextTarEntry();
        }
    } catch (RuntimeException | IOException e) {
        LOG.error("Error reading logs from container {}", containerId, e);
    }
}

From source file:org.codehaus.mojo.unix.deb.DpkgDebTool.java

private static List<UnixFsObject> process(InputStream is) throws IOException {
    TarArchiveInputStream tarInputStream = new TarArchiveInputStream(is);

    List<UnixFsObject> objects = new ArrayList<UnixFsObject>();

    TarArchiveEntry entry = (TarArchiveEntry) tarInputStream.getNextEntry();

    while (entry != null) {
        Option<UnixFileMode> mode = some(UnixFileMode.fromInt(entry.getMode()));
        FileAttributes attributes = new FileAttributes(some(entry.getUserName()), some(entry.getGroupName()),
                mode);//  www .  j a  va  2 s  .  com
        RelativePath path = relativePath(entry.getName());
        LocalDateTime lastModified = LocalDateTime.fromDateFields(entry.getModTime());

        UnixFsObject object;

        if (entry.isDirectory()) {
            object = directory(path, lastModified, attributes);
        } else if (entry.isSymbolicLink()) {
            object = symlink(path, lastModified, some(entry.getUserName()), some(entry.getGroupName()),
                    entry.getLinkName());
        } else if (entry.isFile()) {
            object = regularFile(path, lastModified, entry.getSize(), attributes);
        } else {
            throw new IOException("Unsupported link type: name=" + entry.getName());
        }

        objects.add(object);

        entry = (TarArchiveEntry) tarInputStream.getNextEntry();
    }

    return objects;
}

From source file:org.eclipse.tycho.plugins.tar.TarGzArchiverTest.java

@Test
public void testCreateArchiveEntriesPresent() throws Exception {
    archiver.createArchive();/*from w ww.j av  a  2 s  . c o m*/
    Map<String, TarArchiveEntry> tarEntries = getTarEntries();
    assertEquals(7, tarEntries.size());
    assertThat(tarEntries.keySet(), hasItems("dir2/", "dir2/test.txt", "dir2/dir3/", "dir2/dir3/test.sh",
            "dir2/testPermissions", "dir2/testLastModified", "dir2/testOwnerAndGroupName"));
    TarArchiveEntry dirArchiveEntry = tarEntries.get("dir2/");
    assertTrue(dirArchiveEntry.isDirectory());
    TarArchiveEntry textFileEntry = tarEntries.get("dir2/test.txt");
    assertTrue(textFileEntry.isFile());
    byte[] content = getTarEntry("dir2/test.txt");
    assertEquals("hello", new String(content, "UTF-8"));
}

From source file:org.eclipse.tycho.plugins.tar.TarGzArchiverTest.java

@Test
public void testSymbolicLinkOutsideArchiveInlined() throws Exception {
    File linkTargetFile = tempFolder.newFile("linkTargetOutsideArchiveRoot");
    FileUtils.fileWrite(linkTargetFile, "testContent");
    createSymbolicLink(new File(archiveRoot, "testSymLink"), linkTargetFile.toPath());
    archiver.createArchive();//ww w. j  a  va2s  .  c o m
    TarArchiveEntry inlinedSymLinkEntry = getTarEntries().get("testSymLink");
    assertFalse(inlinedSymLinkEntry.isSymbolicLink());
    assertTrue(inlinedSymLinkEntry.isFile());
    String content = new String(getTarEntry("testSymLink"), "UTF-8");
    assertEquals("testContent", content);
}