Example usage for org.apache.commons.compress.archivers.tar TarArchiveEntry getName

List of usage examples for org.apache.commons.compress.archivers.tar TarArchiveEntry getName

Introduction

In this page you can find the example usage for org.apache.commons.compress.archivers.tar TarArchiveEntry getName.

Prototype

public String getName() 

Source Link

Document

Get this entry's name.

Usage

From source file:org.apache.hadoop.fs.tar.TarIndex.java

/**
 * Creates a Index out of a tar file. //  w w w .  j  ava2s  . c  om
 * Index is a MAP between Filename->start_offset
 * 
 * @param fs Underlying Hadoop FileSystem
 * @param tarPath Path to the Tar file
 * @param isWrite should I write the index to a file
 * @throws IOException
 */
public TarIndex(FileSystem fs, Path tarPath, boolean isWrite, Configuration conf) throws IOException {

    Path indexPath = getIndexPath(tarPath);
    Path altIndexP = getAltIndexPath(tarPath, conf);

    boolean readOK = false;
    readOK = readIndexFile(fs, indexPath);

    if (readOK == false)
        readOK = readIndexFile(fs, altIndexP);

    if (readOK == false) {
        FSDataInputStream is = fs.open(tarPath);
        byte[] buffer = new byte[512];

        while (true) {
            int bytesRead = is.read(buffer);
            if (bytesRead == -1)
                break;
            if (bytesRead < 512)
                throw new IOException("Could not read the full header.");

            long currOffset = is.getPos();
            TarArchiveEntry entry = new TarArchiveEntry(buffer);

            // Index only normal files. Do not support directories yet.
            if (entry.isFile()) {
                String name = entry.getName().trim();
                if (!name.equals("")) {
                    IndexEntry ie = new IndexEntry(entry.getSize(), currOffset);
                    index.put(name, ie);
                }
            }

            long nextOffset = currOffset + entry.getSize();
            if (nextOffset % 512 != 0)
                nextOffset = ((nextOffset / 512) + 1) * 512;
            is.seek(nextOffset);
        }
        is.close();

        if (isWrite) {
            boolean writeOK = writeIndex(fs, indexPath);

            if (writeOK == false && altIndexP != null)
                writeOK = writeIndex(fs, altIndexP);

            if (writeOK == false) {
                Path p = altIndexP == null ? indexPath : altIndexP;

                LOG.error("Could not create INDEX file " + p.toUri());
                if (altIndexP == null)
                    LOG.error("You can specify alternate location for index"
                            + " creation using tarfs.tmp.dir property.");

                LOG.error("Skipping writing index file.");
            }
        }
    }
}

From source file:org.apache.hadoop.hive.common.CompressionUtils.java

/**
 * Untar an input file into an output file.
 *
 * The output file is created in the output folder, having the same name as the input file, minus
 * the '.tar' extension.//from   w w  w. jav a  2 s.  co m
 *
 * @param inputFileName the input .tar file
 * @param outputDirName the output directory file.
 * @throws IOException
 * @throws FileNotFoundException
 *
 * @return The {@link List} of {@link File}s with the untared content.
 * @throws ArchiveException
 */
public static List<File> unTar(final String inputFileName, final String outputDirName, boolean flatten)
        throws FileNotFoundException, IOException, ArchiveException {

    File inputFile = new File(inputFileName);
    File outputDir = new File(outputDirName);

    final List<File> untaredFiles = new LinkedList<File>();
    final InputStream is;

    if (inputFileName.endsWith(".gz")) {
        is = new GzipCompressorInputStream(new FileInputStream(inputFile));
    } else {
        is = new FileInputStream(inputFile);
    }

    final TarArchiveInputStream debInputStream = (TarArchiveInputStream) new ArchiveStreamFactory()
            .createArchiveInputStream("tar", is);
    TarArchiveEntry entry = null;
    while ((entry = (TarArchiveEntry) debInputStream.getNextEntry()) != null) {
        final File outputFile = new File(outputDir, entry.getName());
        if (entry.isDirectory()) {
            if (flatten) {
                // no sub-directories
                continue;
            }
            LOG.debug(String.format("Attempting to write output directory %s.", outputFile.getAbsolutePath()));
            if (!outputFile.exists()) {
                LOG.debug(String.format("Attempting to create output directory %s.",
                        outputFile.getAbsolutePath()));
                if (!outputFile.mkdirs()) {
                    throw new IllegalStateException(
                            String.format("Couldn't create directory %s.", outputFile.getAbsolutePath()));
                }
            }
        } else {
            final OutputStream outputFileStream;
            if (flatten) {
                File flatOutputFile = new File(outputDir, outputFile.getName());
                LOG.debug(String.format("Creating flat output file %s.", flatOutputFile.getAbsolutePath()));
                outputFileStream = new FileOutputStream(flatOutputFile);
            } else if (!outputFile.getParentFile().exists()) {
                LOG.debug(String.format("Attempting to create output directory %s.",
                        outputFile.getParentFile().getAbsoluteFile()));
                if (!outputFile.getParentFile().getAbsoluteFile().mkdirs()) {
                    throw new IllegalStateException(String.format("Couldn't create directory %s.",
                            outputFile.getParentFile().getAbsolutePath()));
                }
                LOG.debug(String.format("Creating output file %s.", outputFile.getAbsolutePath()));
                outputFileStream = new FileOutputStream(outputFile);
            } else {
                outputFileStream = new FileOutputStream(outputFile);
            }
            IOUtils.copy(debInputStream, outputFileStream);
            outputFileStream.close();
        }
        untaredFiles.add(outputFile);
    }
    debInputStream.close();

    return untaredFiles;
}

From source file:org.apache.helix.provisioning.yarn.AppLauncher.java

/**
 * Generates the classpath after the archive file gets extracted under 'serviceName' folder
 * @param serviceName/* w  ww.  j a  v a  2  s.c o m*/
 * @param archiveFile
 * @return
 */
private String generateClasspathAfterExtraction(String serviceName, File archiveFile) {
    if (!isArchive(archiveFile.getAbsolutePath())) {
        return "./";
    }
    StringBuilder classpath = new StringBuilder();
    // put the jar files under the archive in the classpath
    try {
        final InputStream is = new FileInputStream(archiveFile);
        final TarArchiveInputStream debInputStream = (TarArchiveInputStream) new ArchiveStreamFactory()
                .createArchiveInputStream("tar", is);
        TarArchiveEntry entry = null;
        while ((entry = (TarArchiveEntry) debInputStream.getNextEntry()) != null) {
            if (entry.isFile()) {
                classpath.append(File.pathSeparatorChar);
                classpath.append("./" + serviceName + "/" + entry.getName());
            }
        }
        debInputStream.close();

    } catch (Exception e) {
        LOG.error("Unable to read archive file:" + archiveFile, e);
    }
    return classpath.toString();
}

From source file:org.apache.ignite.testsuites.IgniteHadoopTestSuite.java

/**
 *  Downloads and extracts an Apache product.
 *
 * @param appName Name of application for log messages.
 * @param homeVariable Pointer to home directory of the component.
 * @param downloadPath Relative download path of tar package.
 * @param destName Local directory name to install component.
 * @throws Exception If failed./*  w  ww  .ja  v  a  2  s . co m*/
 */
private static void download(String appName, String homeVariable, String downloadPath, String destName)
        throws Exception {
    String homeVal = IgniteSystemProperties.getString(homeVariable);

    if (!F.isEmpty(homeVal) && new File(homeVal).isDirectory()) {
        X.println(homeVariable + " is set to: " + homeVal);

        return;
    }

    List<String> urls = F.asList("http://archive.apache.org/dist/", "http://apache-mirror.rbc.ru/pub/apache/",
            "http://www.eu.apache.org/dist/", "http://www.us.apache.org/dist/");

    String tmpPath = System.getProperty("java.io.tmpdir");

    X.println("tmp: " + tmpPath);

    final File install = new File(tmpPath + File.separatorChar + "__hadoop");

    final File home = new File(install, destName);

    X.println("Setting " + homeVariable + " to " + home.getAbsolutePath());

    System.setProperty(homeVariable, home.getAbsolutePath());

    final File successFile = new File(home, "__success");

    if (home.exists()) {
        if (successFile.exists()) {
            X.println(appName + " distribution already exists.");

            return;
        }

        X.println(appName + " distribution is invalid and it will be deleted.");

        if (!U.delete(home))
            throw new IOException("Failed to delete directory: " + home.getAbsolutePath());
    }

    for (String url : urls) {
        if (!(install.exists() || install.mkdirs()))
            throw new IOException("Failed to create directory: " + install.getAbsolutePath());

        URL u = new URL(url + downloadPath);

        X.println("Attempting to download from: " + u);

        try {
            URLConnection c = u.openConnection();

            c.connect();

            try (TarArchiveInputStream in = new TarArchiveInputStream(
                    new GzipCompressorInputStream(new BufferedInputStream(c.getInputStream(), 32 * 1024)))) {

                TarArchiveEntry entry;

                while ((entry = in.getNextTarEntry()) != null) {
                    File dest = new File(install, entry.getName());

                    if (entry.isDirectory()) {
                        if (!dest.mkdirs())
                            throw new IllegalStateException();
                    } else if (entry.isSymbolicLink()) {
                        // Important: in Hadoop installation there are symlinks, we need to create them:
                        Path theLinkItself = Paths.get(install.getAbsolutePath(), entry.getName());

                        Path linkTarget = Paths.get(entry.getLinkName());

                        Files.createSymbolicLink(theLinkItself, linkTarget);
                    } else {
                        File parent = dest.getParentFile();

                        if (!(parent.exists() || parent.mkdirs()))
                            throw new IllegalStateException();

                        X.print(" [" + dest);

                        try (BufferedOutputStream out = new BufferedOutputStream(
                                new FileOutputStream(dest, false), 128 * 1024)) {
                            U.copy(in, out);

                            out.flush();
                        }

                        Files.setPosixFilePermissions(dest.toPath(), modeToPermissionSet(entry.getMode()));

                        X.println("]");
                    }
                }
            }

            if (successFile.createNewFile())
                return;
        } catch (Exception e) {
            e.printStackTrace();

            U.delete(home);
        }
    }

    throw new IllegalStateException("Failed to install " + appName + ".");
}

From source file:org.apache.kylin.common.util.TarGZUtil.java

public static void uncompressTarGZ(File tarFile, File dest) throws IOException {
    dest.mkdir();/*from ww w. j a  v  a 2 s.c  om*/
    TarArchiveInputStream tarIn = null;

    tarIn = new TarArchiveInputStream(
            new GzipCompressorInputStream(new BufferedInputStream(new FileInputStream(tarFile))));

    TarArchiveEntry tarEntry = tarIn.getNextTarEntry();
    // tarIn is a TarArchiveInputStream
    while (tarEntry != null) {// create a file with the same name as the tarEntry
        File destPath = new File(dest, tarEntry.getName());
        System.out.println("working: " + destPath.getCanonicalPath());
        if (tarEntry.isDirectory()) {
            destPath.mkdirs();
        } else {
            destPath.createNewFile();
            //byte [] btoRead = new byte[(int)tarEntry.getSize()];
            byte[] btoRead = new byte[1024];
            //FileInputStream fin 
            //  = new FileInputStream(destPath.getCanonicalPath());
            BufferedOutputStream bout = new BufferedOutputStream(new FileOutputStream(destPath));
            int len = 0;

            while ((len = tarIn.read(btoRead)) != -1) {
                bout.write(btoRead, 0, len);
            }

            bout.close();
            btoRead = null;

        }
        tarEntry = tarIn.getNextTarEntry();
    }
    tarIn.close();
}

From source file:org.apache.metron.enrichment.adapters.maxmind.MaxMindDatabase.java

/**
 * Update the database being queried to one backed by the provided HDFS file.
 * Access to the database should be guarded by read locks to avoid disruption while updates are occurring.
 * @param hdfsFile The HDFS file path to be used for new queries.
 *///from w ww. j  ava2  s. c o  m
default void update(String hdfsFile) {
    // If nothing is set (or it's been unset, use the defaults)
    if (hdfsFile == null || hdfsFile.isEmpty()) {
        LOG.debug("Using default for {}: {}", getHdfsFileConfig(), getHdfsFileDefault());
        hdfsFile = getHdfsFileDefault();
    }

    FileSystem fs = MaxMindDbUtilities.getFileSystem();

    if (hdfsFile.endsWith(MaxMindDatabase.EXTENSION_MMDB)) {
        lockIfNecessary();
        try (BufferedInputStream is = new BufferedInputStream(fs.open(new Path(hdfsFile)))) {
            setReader(MaxMindDbUtilities.readNewDatabase(getReader(), hdfsFile, is));
        } catch (IOException e) {
            MaxMindDbUtilities.handleDatabaseIOException(hdfsFile, e);
        } finally {
            unlockIfNecessary();
        }
    } else if (hdfsFile.endsWith(MaxMindDatabase.EXTENSION_MMDB_GZ)) {
        lockIfNecessary();
        try (GZIPInputStream is = new GZIPInputStream(fs.open(new Path(hdfsFile)))) {
            setReader(MaxMindDbUtilities.readNewDatabase(getReader(), hdfsFile, is));
        } catch (IOException e) {
            MaxMindDbUtilities.handleDatabaseIOException(hdfsFile, e);
        } finally {
            unlockIfNecessary();
        }
    } else if (hdfsFile.endsWith(MaxMindDatabase.EXTENSION_TAR_GZ)) {
        lockIfNecessary();
        try (TarArchiveInputStream is = new TarArchiveInputStream(
                new GZIPInputStream(fs.open(new Path(hdfsFile))))) {
            // Need to find the mmdb entry.
            TarArchiveEntry entry = is.getNextTarEntry();
            while (entry != null) {
                if (entry.isFile() && entry.getName().endsWith(MaxMindDatabase.EXTENSION_MMDB)) {
                    try (InputStream mmdb = new BufferedInputStream(is)) { // Read directly from tarInput
                        setReader(MaxMindDbUtilities.readNewDatabase(getReader(), hdfsFile, mmdb));
                        break; // Don't care about the other entries, leave immediately
                    }
                }
                entry = is.getNextTarEntry();
            }
        } catch (IOException e) {
            MaxMindDbUtilities.handleDatabaseIOException(hdfsFile, e);
        } finally {
            unlockIfNecessary();
        }
    }
}

From source file:org.apache.nifi.cluster.flow.impl.DataFlowDaoImpl.java

private ClusterDataFlow parseDataFlow(final File file) throws IOException, JAXBException, DaoException {
    byte[] flowBytes = new byte[0];
    byte[] templateBytes = new byte[0];
    byte[] snippetBytes = new byte[0];
    byte[] clusterInfoBytes = new byte[0];
    byte[] controllerServiceBytes = new byte[0];
    byte[] reportingTaskBytes = new byte[0];

    try (final InputStream inStream = new FileInputStream(file);
            final TarArchiveInputStream tarIn = new TarArchiveInputStream(new BufferedInputStream(inStream))) {
        TarArchiveEntry tarEntry;
        while ((tarEntry = tarIn.getNextTarEntry()) != null) {
            switch (tarEntry.getName()) {
            case FLOW_XML_FILENAME:
                flowBytes = new byte[(int) tarEntry.getSize()];
                StreamUtils.fillBuffer(tarIn, flowBytes, true);
                break;
            case TEMPLATES_FILENAME:
                templateBytes = new byte[(int) tarEntry.getSize()];
                StreamUtils.fillBuffer(tarIn, templateBytes, true);
                break;
            case SNIPPETS_FILENAME:
                snippetBytes = new byte[(int) tarEntry.getSize()];
                StreamUtils.fillBuffer(tarIn, snippetBytes, true);
                break;
            case CLUSTER_INFO_FILENAME:
                clusterInfoBytes = new byte[(int) tarEntry.getSize()];
                StreamUtils.fillBuffer(tarIn, clusterInfoBytes, true);
                break;
            case CONTROLLER_SERVICES_FILENAME:
                controllerServiceBytes = new byte[(int) tarEntry.getSize()];
                StreamUtils.fillBuffer(tarIn, controllerServiceBytes, true);
                break;
            case REPORTING_TASKS_FILENAME:
                reportingTaskBytes = new byte[(int) tarEntry.getSize()];
                StreamUtils.fillBuffer(tarIn, reportingTaskBytes, true);
                break;
            default:
                throw new DaoException(
                        "Found Unexpected file in dataflow configuration: " + tarEntry.getName());
            }/*www. j  a  va2s  . c  om*/
        }
    }

    final ClusterMetadata clusterMetadata;
    if (clusterInfoBytes.length == 0) {
        clusterMetadata = null;
    } else {
        final Unmarshaller clusterMetadataUnmarshaller = ClusterMetadata.jaxbCtx.createUnmarshaller();
        clusterMetadata = (ClusterMetadata) clusterMetadataUnmarshaller
                .unmarshal(new ByteArrayInputStream(clusterInfoBytes));
    }

    final StandardDataFlow dataFlow = new StandardDataFlow(flowBytes, templateBytes, snippetBytes);
    dataFlow.setAutoStartProcessors(autoStart);

    return new ClusterDataFlow(dataFlow, clusterMetadata == null ? null : clusterMetadata.getPrimaryNodeId(),
            controllerServiceBytes, reportingTaskBytes);
}

From source file:org.apache.nifi.processors.hadoop.TarUnpackerSequenceFileWriter.java

@Override
protected void processInputStream(final InputStream stream, final FlowFile tarArchivedFlowFile,
        final Writer writer) throws IOException {
    try (final TarArchiveInputStream tarIn = new TarArchiveInputStream(new BufferedInputStream(stream))) {
        TarArchiveEntry tarEntry;
        while ((tarEntry = tarIn.getNextTarEntry()) != null) {
            if (tarEntry.isDirectory()) {
                continue;
            }//from w  w w . j av  a 2 s .c  o  m
            final String key = tarEntry.getName();
            final long fileSize = tarEntry.getSize();
            final InputStreamWritable inStreamWritable = new InputStreamWritable(tarIn, (int) fileSize);
            writer.append(new Text(key), inStreamWritable);
            logger.debug("Appending FlowFile {} to Sequence File", new Object[] { key });
        }
    }
}

From source file:org.apache.nifi.util.FlowFileUnpackagerV1.java

@Override
public Map<String, String> unpackageFlowFile(final InputStream in, final OutputStream out) throws IOException {
    flowFilesRead++;// w w w.ja  v  a2 s.  c  o m
    final TarArchiveInputStream tarIn = new TarArchiveInputStream(in);
    final TarArchiveEntry attribEntry = tarIn.getNextTarEntry();
    if (attribEntry == null) {
        return null;
    }

    final Map<String, String> attributes;
    if (attribEntry.getName().equals(FlowFilePackagerV1.FILENAME_ATTRIBUTES)) {
        attributes = getAttributes(tarIn);
    } else {
        throw new IOException("Expected two tar entries: " + FlowFilePackagerV1.FILENAME_CONTENT + " and "
                + FlowFilePackagerV1.FILENAME_ATTRIBUTES);
    }

    final TarArchiveEntry contentEntry = tarIn.getNextTarEntry();

    if (contentEntry != null && contentEntry.getName().equals(FlowFilePackagerV1.FILENAME_CONTENT)) {
        final byte[] buffer = new byte[512 << 10];//512KB
        int bytesRead = 0;
        while ((bytesRead = tarIn.read(buffer)) != -1) { //still more data to read
            if (bytesRead > 0) {
                out.write(buffer, 0, bytesRead);
            }
        }
        out.flush();
    } else {
        throw new IOException("Expected two tar entries: " + FlowFilePackagerV1.FILENAME_CONTENT + " and "
                + FlowFilePackagerV1.FILENAME_ATTRIBUTES);
    }

    return attributes;
}

From source file:org.apache.pulsar.tests.DockerUtils.java

public static void dumpContainerLogDirToTarget(DockerClient docker, String containerId, String path) {
    final int READ_BLOCK_SIZE = 10000;

    try (InputStream dockerStream = docker.copyArchiveFromContainerCmd(containerId, path).exec();
            TarArchiveInputStream stream = new TarArchiveInputStream(dockerStream)) {
        TarArchiveEntry entry = stream.getNextTarEntry();
        while (entry != null) {
            if (entry.isFile()) {
                File output = new File(getTargetDirectory(containerId), entry.getName().replace("/", "-"));
                try (FileOutputStream os = new FileOutputStream(output)) {
                    byte[] block = new byte[READ_BLOCK_SIZE];
                    int read = stream.read(block, 0, READ_BLOCK_SIZE);
                    while (read > -1) {
                        os.write(block, 0, read);
                        read = stream.read(block, 0, READ_BLOCK_SIZE);
                    }// w  w w.j a  v  a  2s  .c om
                }
            }
            entry = stream.getNextTarEntry();
        }
    } catch (RuntimeException | IOException e) {
        LOG.error("Error reading logs from container {}", containerId, e);
    }
}