Example usage for com.google.common.io NullOutputStream nullOutputStream

List of usage examples for com.google.common.io NullOutputStream nullOutputStream

Introduction

In this page you can find the example usage for com.google.common.io NullOutputStream nullOutputStream.

Prototype

public static OutputStream nullOutputStream() 

Source Link

Document

Returns a new OutputStream which discards all bytes.

Usage

From source file:com.google.enterprise.connector.util.diffing.OrderedSnapshotWriter.java

public OrderedSnapshotWriter(SnapshotWriter delegate) throws SnapshotWriterException {
    // The superclass is not actually used, but the Writer argument
    // cannot be null. We use a null sink.
    super(new OutputStreamWriter(new NullOutputStream()), null, null);
    this.delegate = delegate;
    this.maxWrittenSnapshot = null;
}

From source file:de.tu_berlin.dima.oligos.db.AbstractDbDerby.java

public AbstractDbDerby(boolean verbose) {
    this.out = (verbose) ? System.out : new PrintStream(new NullOutputStream());
    // set database and connection properties
    this.properties = new Properties();
    this.properties.setProperty("user", USER_NAME);
}

From source file:org.csanchez.jenkins.plugins.kubernetes.pipeline.ContainerExecDecorator.java

@Override
public Launcher decorate(final Launcher launcher, final Node node) {
    return new Launcher.DecoratedLauncher(launcher) {
        @Override/*  ww w  . j  a  va2  s .  co m*/
        public Proc launch(ProcStarter starter) throws IOException {
            if (!waitUntilContainerIsReady()) {
                throw new IOException("Failed to execute shell script inside container " + "[" + containerName
                        + "] of pod [" + podName + "]." + " Timed out waiting for container to become ready!");
            }

            final CountDownLatch started = new CountDownLatch(1);
            final CountDownLatch finished = new CountDownLatch(1);
            final AtomicBoolean alive = new AtomicBoolean(false);

            PrintStream printStream = launcher.getListener().getLogger();
            OutputStream stream = printStream;
            // Do not send this command to the output when in quiet mode
            if (starter.quiet()) {
                stream = new NullOutputStream();
                printStream = new PrintStream(stream, false, StandardCharsets.UTF_8.toString());
            }

            // we need to keep the last bytes in the stream to parse the exit code as it is printed there
            // so we use a buffer
            ExitCodeOutputStream exitCodeOutputStream = new ExitCodeOutputStream();
            // send container output both to the job output and our buffer
            stream = new TeeOutputStream(exitCodeOutputStream, stream);

            String msg = "Executing shell script inside container [" + containerName + "] of pod [" + podName
                    + "]";
            LOGGER.log(Level.FINEST, msg);
            printStream.println(msg);

            watch = client.pods().inNamespace(namespace).withName(podName).inContainer(containerName)
                    .redirectingInput().writingOutput(stream).writingError(stream).withTTY()
                    .usingListener(new ExecListener() {
                        @Override
                        public void onOpen(Response response) {
                            alive.set(true);
                            started.countDown();
                        }

                        @Override
                        public void onFailure(Throwable t, Response response) {
                            alive.set(false);
                            t.printStackTrace(launcher.getListener().getLogger());
                            started.countDown();
                            LOGGER.log(Level.FINEST, "onFailure : {0}", finished);
                            if (finished.getCount() == 0) {
                                LOGGER.log(Level.WARNING,
                                        "onFailure called but latch already finished. This may be a bug in the kubernetes-plugin");
                            }
                            finished.countDown();
                        }

                        @Override
                        public void onClose(int i, String s) {
                            alive.set(false);
                            started.countDown();
                            LOGGER.log(Level.FINEST, "onClose : {0}", finished);
                            if (finished.getCount() == 0) {
                                LOGGER.log(Level.WARNING,
                                        "onClose called but latch already finished. This indicates a bug in the kubernetes-plugin");
                            }
                            finished.countDown();
                        }
                    }).exec();

            waitQuietly(started);

            if (starter.pwd() != null) {
                // We need to get into the project workspace.
                // The workspace is not known in advance, so we have to execute a cd command.
                watch.getInput().write(
                        String.format("cd \"%s\"%s", starter.pwd(), NEWLINE).getBytes(StandardCharsets.UTF_8));
            }
            doExec(watch, printStream, getCommands(starter));
            proc = new ContainerExecProc(watch, alive, finished, new Callable<Integer>() {
                @Override
                public Integer call() {
                    return exitCodeOutputStream.getExitCode();
                }
            });
            return proc;
        }

        @Override
        public void kill(Map<String, String> modelEnvVars) throws IOException, InterruptedException {
            // String cookie = modelEnvVars.get(COOKIE_VAR);
            // TODO we need to use the cookie for something
            getListener().getLogger().println("Killing process.");
            ContainerExecDecorator.this.close();
        }

        private boolean isContainerReady(Pod pod, String container) {
            if (pod == null || pod.getStatus() == null || pod.getStatus().getContainerStatuses() == null) {
                return false;
            }

            for (ContainerStatus info : pod.getStatus().getContainerStatuses()) {
                if (info.getName().equals(container) && info.getReady()) {
                    return true;
                }
            }
            return false;
        }

        private boolean waitUntilContainerIsReady() {
            int i = 0;
            int j = 10; // wait 60 seconds
            Pod pod = client.pods().inNamespace(namespace).withName(podName).get();

            if (pod == null) {
                launcher.getListener().getLogger().println("Waiting for pod [" + podName + "] to exist.");
                // wait for Pod to be running.
                for (; i < j; i++) {
                    LOGGER.log(Level.INFO, "Getting pod ({1}/{2}): {0}", new Object[] { podName, i, j });
                    pod = client.pods().inNamespace(namespace).withName(podName).get();
                    if (pod != null) {
                        break;
                    }
                    LOGGER.log(Level.INFO, "Waiting 6 seconds before checking if pod exists ({1}/{2}): {0}",
                            new Object[] { podName, i, j });
                    try {
                        Thread.sleep(6000);
                    } catch (InterruptedException e) {
                        return false;
                    }
                }
            }

            if (pod == null) {
                throw new IllegalArgumentException("Container with name:[" + containerName
                        + "] not found in pod:[" + podName + "], pod doesn't exist");
            }

            if (isContainerReady(pod, containerName)) {
                return true;
            }

            launcher.getListener().getLogger().println("Waiting for container container [" + containerName
                    + "] of pod [" + podName + "] to become ready.");
            final CountDownLatch latch = new CountDownLatch(1);
            Watcher<Pod> podWatcher = new Watcher<Pod>() {
                @Override
                public void eventReceived(Action action, Pod resource) {
                    switch (action) {
                    case MODIFIED:
                        if (isContainerReady(resource, containerName)) {
                            latch.countDown();
                        }
                        break;
                    default:
                        break;
                    }
                }

                @Override
                public void onClose(KubernetesClientException cause) {

                }
            };

            try (Watch watch = client.pods().inNamespace(namespace).withName(podName).watch(podWatcher)) {
                if (latch.await(CONTAINER_READY_TIMEOUT, TimeUnit.MINUTES)) {
                    return true;
                }
            } catch (InterruptedException e) {
                return false;
            }
            return false;
        }
    };
}

From source file:net.sourceforge.vaticanfetcher.model.index.file.RarTree.java

protected Map<Integer, File> doUnpack(Map<Integer, TreeNode> unpackMap, TempFileFactory tempFileFactory)
        throws IOException {
    Map<Integer, File> indexFileMap = Maps.newHashMap();
    Archive archive = null;//from   www  . ja  v  a2  s. com
    try {
        archive = new Archive(archiveFile);

        /*
         * If the archive uses solid compression, all files preceding the target files must be extracted, otherwise JUnRar will throw
         * errors, such as 'crcError'. In order to save disk space, we can extract unneeded files into a NullOutputStream.
         * 
         * To find out whether the archive uses solid compression, we'll have to iterate over all file headers and check their solid flags
         * one by one. - This is necessary, because it is usually not the case that all file headers have the same solid flag! In fact, in
         * a regular solid archive the first file header is marked 'non-solid', while the remaining file headers are marked 'solid'.
         */
        boolean isSolid = isSolidRarArchive(archive);

        FileHeader fh = null;
        NullOutputStream nullOut = isSolid ? new NullOutputStream() : null;
        for (int i = 0;; i++) {
            /*
             * We can abort early if we've extracted all needed files before reaching the end of the archive.
             */
            if (unpackMap.isEmpty())
                break;

            fh = archive.nextFileHeader();
            if (fh == null)
                break; // Last entry reached
            if (fh.isDirectory())
                continue;

            /*
             * This was already reported when the tree was constructed, so we can continue silently here.
             */
            if (fh.isEncrypted())
                continue;

            /*
             * Remove entry from map so we'll know when there are no more files to extract.
             */
            TreeNode treeNode = unpackMap.remove(i);

            try {
                if (treeNode != null) {
                    File file = tempFileFactory.createTempFile(treeNode);
                    OutputStream out = new FileOutputStream(file);
                    archive.extractFile(fh, out);
                    Closeables.closeQuietly(out);
                    indexFileMap.put(i, file);
                } else if (isSolid) {
                    archive.extractFile(fh, nullOut);
                }
            } catch (OutOfMemoryError e) {
                /*
                 * Calling extractFile can throw an OutOfMemoryError. See bug #3443490.
                 */
                if (treeNode != null) // Ignore errors for entries written to NullOutputStream
                    failReporter.fail(ErrorType.OUT_OF_MEMORY, treeNode, e);
            } catch (Exception e) {
                if (treeNode != null) // Ignore errors for entries written to NullOutputStream
                    failReporter.fail(ErrorType.ARCHIVE_ENTRY, treeNode, e);
            }
        }
        return indexFileMap;
    } catch (RarException e) {
        throw new IOException(e);
    } finally {
        Closeables.closeQuietly(archive);
    }
}

From source file:net.sourceforge.docfetcher.model.index.file.RarTree.java

protected Map<Integer, File> doUnpack(Map<Integer, TreeNode> unpackMap, TempFileFactory tempFileFactory)
        throws IOException {
    Map<Integer, File> indexFileMap = Maps.newHashMap();
    Archive archive = null;/*w  ww  . ja va 2 s. com*/
    try {
        archive = new Archive(archiveFile);

        /*
         * If the archive uses solid compression, all files preceding the
         * target files must be extracted, otherwise JUnRar will throw
         * errors, such as 'crcError'. In order to save disk space, we can
         * extract unneeded files into a NullOutputStream.
         * 
         * To find out whether the archive uses solid compression, we'll
         * have to iterate over all file headers and check their solid flags
         * one by one. - This is necessary, because it is usually not the
         * case that all file headers have the same solid flag! In fact, in
         * a regular solid archive the first file header is marked
         * 'non-solid', while the remaining file headers are marked 'solid'.
         */
        boolean isSolid = isSolidRarArchive(archive);

        FileHeader fh = null;
        NullOutputStream nullOut = isSolid ? new NullOutputStream() : null;
        for (int i = 0;; i++) {
            /*
             * We can abort early if we've extracted all needed files before
             * reaching the end of the archive.
             */
            if (unpackMap.isEmpty())
                break;

            fh = archive.nextFileHeader();
            if (fh == null)
                break; // Last entry reached
            if (fh.isDirectory())
                continue;

            /*
             * This was already reported when the tree was constructed, so
             * we can continue silently here.
             */
            if (fh.isEncrypted())
                continue;

            /*
             * Remove entry from map so we'll know when there are no more
             * files to extract.
             */
            TreeNode treeNode = unpackMap.remove(i);

            try {
                if (treeNode != null) {
                    File file = tempFileFactory.createTempFile(treeNode);
                    OutputStream out = new FileOutputStream(file);
                    archive.extractFile(fh, out);
                    Closeables.closeQuietly(out);
                    indexFileMap.put(i, file);
                } else if (isSolid) {
                    archive.extractFile(fh, nullOut);
                }
            } catch (OutOfMemoryError e) {
                /*
                 * Calling extractFile can throw an OutOfMemoryError. See
                 * bug #3443490.
                 */
                if (treeNode != null) // Ignore errors for entries written to NullOutputStream
                    failReporter.fail(ErrorType.OUT_OF_MEMORY, treeNode, e);
            } catch (Exception e) {
                if (treeNode != null) // Ignore errors for entries written to NullOutputStream
                    failReporter.fail(ErrorType.ARCHIVE_ENTRY, treeNode, e);
            }
        }
        return indexFileMap;
    } catch (RarException e) {
        throw new IOException(e);
    } finally {
        Closeables.closeQuietly(archive);
    }
}

From source file:org.jclouds.cleanup.doclet.ClassDocParser.java

public Bean parseBean(ClassDoc element, ParseOptions options, boolean asSuperClass) {
    checkNotNull(element, "element");
    checkNotNull(options, "options");

    ParseOptions.NullableHandling nullableHandling = options.getNullableHandling();
    Bean superClass = null;//from ww w.j  av  a  2s.  c om
    if (element.superclassType() != null
            && !Objects.equal(element.superclassType().qualifiedTypeName(), "java.lang.Object")) {
        String superClassQualifiedName = element.superclassType().qualifiedTypeName();
        if (!CACHE.containsKey(superClassQualifiedName)) {
            LOG.debug("Parsing superclass " + superClassQualifiedName);
            parseBean(element.superclassType().asClassDoc(), options, true);
        }
        superClass = checkNotNull(CACHE.get(superClassQualifiedName),
                "oops failed to parse superclass " + superClassQualifiedName + " of " + element.name());
    }

    Bean bean = new Bean(superClass, element.containingPackage().toString(), element.isAbstract(), options,
            element.simpleTypeName(), getAnnotations(element),
            extractComment("Class " + element.name(), null, element));

    LOG.debug("Parsing bean " + bean);

    // Process imports
    List<String> lines = ImmutableList.of();
    try {
        if (element.position() != null && element.position().file() != null) {
            // This is actually helpful (oddly!)
            lines = ImmutableList.copyOf(
                    Strings2.toStringAndClose(new FileInputStream(element.position().file())).split("\n"));
        }
    } catch (IOException ex) {
        if (asSuperClass)
            LOG.debug("Failed to parse imports et al from superclass " + bean);
        else {
            LOG.error("Failed to parse imports et al for " + bean, ex);
            Throwables.propagate(ex);
        }
    }

    bean.addImports(Sets.filter(ImmutableSet.copyOf(lines), new Predicate<String>() {
        @Override
        public boolean apply(String input) {
            return input.trim().startsWith("import ");
        }
    }));

    // TODO this is re-indenting inner classes (sometimes unpleasantly!)
    // Process inner classes
    for (ClassDoc clazz : element.innerClasses()) {
        if (!clazz.simpleTypeName().toLowerCase().endsWith("builder")) {
            List<String> content = Lists.newArrayList();
            IndentedPrintWriter ipw = new IndentedPrintWriter(new NullOutputStream());
            ipw.println("{");
            for (int i = clazz.position().line(); ipw.currentIndent() > 0 && i < lines.size(); i++) {
                ipw.println(lines.get(i));
                content.add(lines.get(i).trim());
            }
            content.remove(content.size() - 1);
            LOG.debug("Processing inner class " + bean.getType() + "." + clazz.name());
            bean.addInnerClass(new InnerClass("public static " + (clazz.isEnum() ? "enum" : "class"),
                    clazz.simpleTypeName(), getAnnotations(clazz), extractComment(clazz), content));
        }
    }

    // Extract these bits of information from the ClassDoc data
    Map<String, String> serializedNames = Maps.newHashMap();
    Set<String> nullableFields = Sets.newHashSet();
    Map<String, MethodDoc> accessors = Maps.newHashMap();

    for (ConstructorDoc constructor : element.constructors()) {
        for (Parameter parameter : constructor.parameters()) {
            // options state field MUST be nullable
            if (nullableHandling.mustBeNullable(parameter.name())) {
                LOG.debug(nullableHandling + " marking " + bean.getType() + "." + parameter + " as nullable");
                nullableFields.add(parameter.name());
            }
            // options state field MAY be nullable
            if (nullableHandling.maybeNullable(parameter.name())) {
                for (AnnotationDesc anno : parameter.annotations()) {
                    if (Objects.equal(anno.annotationType().typeName(), "Nullable")) {
                        LOG.debug(nullableHandling + " marking " + bean.getType() + "." + parameter
                                + " as nullable (constructor annotation)");
                        nullableFields.add(parameter.name());
                    }
                }
            }
        }
    }

    // Inject/Named
    for (ConstructorDoc constructor : element.constructors()) {
        for (AnnotationDesc anno : constructor.annotations()) {
            if (Objects.equal(anno.annotationType().typeName(), "Inject")
                    || Objects.equal(anno.annotationType().typeName(), "ConstructorProperties")) {
                for (Parameter parameter : constructor.parameters()) {
                    // try to pick-up the associations with fields
                    String serializedName = getSerializedName(parameter.annotations());
                    if (serializedName != null) {
                        serializedNames.put(parameter.name(), serializedName);
                    }
                }
            }
        }
    }

    // ConstructorProperties
    for (ConstructorDoc constructor : element.constructors()) {
        Iterable<String> constructorProperties = null;
        for (AnnotationDesc anno : constructor.annotations()) {
            if (Objects.equal(anno.annotationType().typeName(), "ConstructorProperties")) {
                String stuff = anno.elementValues()[0].value().toString();
                constructorProperties = Splitter.on(",").trimResults(CharMatcher.anyOf("\t\n {}\""))
                        .split(stuff);
                break;
            }
        }

        // Try to map to actual field names...
        if (constructorProperties != null) {
            Iterator<String> it = constructorProperties.iterator();
            for (int i = 0; i < constructor.parameters().length && it.hasNext(); i++) {
                serializedNames.put(constructor.parameters()[i].name(), it.next());
            }
            break;
        }
    }

    // Look for accessor and field annotations
    for (FieldDoc field : element.fields()) {

        if (!field.isStatic()) {
            String fieldName = field.name();

            if (nullableHandling.maybeNullable(fieldName) && annotatatedAsNullable(field)) {
                LOG.debug(nullableHandling + " marking " + bean.getType() + "." + fieldName
                        + " as nullable (field annotation)");
                nullableFields.add(fieldName);
            }

            // Accessors first
            for (MethodDoc method : element.methods()) {
                if (Objects.equal(method.name(), getAccessorName(field))
                        || Objects.equal(method.name(), fieldName)) {
                    accessors.put(fieldName, method);
                    String serializedName = getSerializedName(method.annotations());
                    if (serializedName != null) {
                        LOG.debug(bean.getType() + "." + fieldName + " serialized name is " + serializedName
                                + " (getter annotation)");
                        serializedNames.put(fieldName, serializedName);
                    }
                    if (nullableHandling.maybeNullable(fieldName) && annotatatedAsNullable(method)) {
                        LOG.debug(nullableHandling + " marking " + bean.getType() + "." + fieldName
                                + " as nullable (getter annotation)");
                        nullableFields.add(fieldName);
                    }
                }
            }

            // Fields
            String serializedName = getSerializedName(field.annotations());
            if (serializedName != null) {
                LOG.debug(bean.getType() + "." + fieldName + " serialized name is " + serializedName
                        + " (field annotation)");
                serializedNames.put(fieldName, serializedName);
            }
        }
    }

    // Construct the fields
    for (FieldDoc field : element.fields()) {
        if (field.isStatic()) {
            LOG.debug("adding static field " + bean.getType() + "." + field.name());
            bean.addClassField(new ClassField(field.name(), properTypeName(field, bean.rawImports()),
                    getAnnotations(field),
                    extractComment(null, ImmutableMultimap.<String, String>of(), field)));
        } else {
            LOG.debug("adding instance field " + bean.getType() + "." + field.name());
            // Note we need to pick up any stray comments or annotations on accessors
            InstanceField instanceField = new InstanceField(field.name(), serializedNames.get(field.name()),
                    getAccessorName(field), properTypeName(field, bean.rawImports()),
                    nullableFields.contains(field.name()), getAnnotations(field), extractComment(null,
                            ImmutableMultimap.<String, String>of(), field, accessors.get(field.name())));
            bean.addInstanceField(instanceField);
        }
    }

    CACHE.put(element.qualifiedTypeName(), bean);

    return bean;
}

From source file:com.zimbra.cs.index.LuceneIndex.java

private IndexWriter openIndexWriter(IndexWriterConfig.OpenMode mode, boolean tryRepair) throws IOException {
    try {//from  w w w  .ja  v  a2s. c om
        IndexWriter writer = new IndexWriter(luceneDirectory, getWriterConfig().setOpenMode(mode)) {
            /**
             * Redirect Lucene's logging to ZimbraLog.
             */
            @Override
            public void message(String message) {
                ZimbraLog.index.debug("IW: %s", message);
            }
        };
        if (ZimbraLog.index.isDebugEnabled()) {
            // Set a dummy PrintStream, otherwise Lucene suppresses logging.
            writer.setInfoStream(new PrintStream(new NullOutputStream()));
        }
        return writer;
    } catch (AssertionError e) {
        unlockIndexWriter();
        if (!tryRepair) {
            throw e;
        }
        repair(e);
        return openIndexWriter(mode, false);
    } catch (CorruptIndexException e) {
        unlockIndexWriter();
        if (!tryRepair) {
            throw e;
        }
        repair(e);
        return openIndexWriter(mode, false);
    }
}

From source file:net.sourceforge.vaticanfetcher.model.index.file.FileFactory.java

@NotNull
private FileResource unpackFromRarArchive(@NotNull IndexingConfig config, @NotNull Path originalArchivePath,
        @NotNull FileResource archiveResource, @NotNull String entryPath)
        throws ArchiveEncryptedException, DiskSpaceException, FileNotFoundException, IOException {
    Archive archive = null;/*from  w  ww .  j  a  va2 s  . co  m*/
    try {
        File archiveFile = archiveResource.getFile();
        archive = new Archive(archiveFile);
        if (archive.isEncrypted())
            throw new ArchiveEncryptedException(archiveFile, originalArchivePath.getPath());

        List<FileHeader> fileHeaders = archive.getFileHeaders();

        boolean isSolid = false;
        for (FileHeader fh : fileHeaders) {
            if (fh.isSolid()) {
                isSolid = true;
                break;
            }
        }

        /*
         * For solid archives, if we want to extract a certain archive entry, we also have to extract all archive entries that preceded
         * it. Thus, it is more efficient to run through the archive twice rather than once: During the first phase, we check for any
         * matching archive entries by only looking at the file headers, and return early if there is no match. Only if there is a match,
         * we'll proceed to the second phase, where, if the archive uses solid compression, all files up to the target file will be extracted.
         */
        if (isSolid) {
            boolean match = false;
            for (FileHeader fh : fileHeaders) {
                if (fh.isEncrypted() || fh.isDirectory())
                    continue;
                String currentPath = fh.isUnicode() ? fh.getFileNameW() : fh.getFileNameString();
                currentPath = Util.toForwardSlashes(currentPath);
                assert noTrailingSlash(currentPath);

                if (entryPath.equals(currentPath)
                        || (entryPath.startsWith(currentPath + "/") && config.isArchive(currentPath))) {
                    match = true;
                    break;
                }
            }
            if (!match)
                throw new FileNotFoundException();
        }

        FileHeader fh = null;
        NullOutputStream nullOut = isSolid ? new NullOutputStream() : null;

        while (true) {
            fh = archive.nextFileHeader();
            if (fh == null)
                break; // Last entry reached
            if (fh.isEncrypted() || fh.isDirectory())
                continue;

            String currentPath = fh.isUnicode() ? fh.getFileNameW() : fh.getFileNameString();
            currentPath = Util.toForwardSlashes(currentPath);
            assert noTrailingSlash(currentPath);

            // TODO post-release-1.1: throw disk space exception
            if (entryPath.equals(currentPath)) { // Exact match
                Path cacheKey = originalArchivePath.createSubPath(currentPath);
                File unpackedFile = unpackRarEntry(config, archive, fh, entryPath);
                return unpackCache.putIfAbsent(cacheKey, unpackedFile);
            } else if (entryPath.startsWith(currentPath + "/") && config.isArchive(currentPath)) { // Partial match
                File innerArchiveFile;
                try {
                    innerArchiveFile = unpackRarEntry(config, archive, fh, currentPath);
                } finally {
                    archiveResource.dispose();
                }
                Path cacheKey = originalArchivePath.createSubPath(currentPath);
                FileResource innerArchive = unpackCache.putIfAbsent(cacheKey, innerArchiveFile);
                String remainingPath = entryPath.substring(currentPath.length() + 1);
                return unpackFromArchive(config, cacheKey, innerArchive, remainingPath);
            } else if (isSolid) { // Not a match
                archive.extractFile(fh, nullOut);
            }
        }
    } catch (RarException e) {
        throw new IOException(e);
    } finally {
        Closeables.closeQuietly(archive);
        archiveResource.dispose();
    }
    throw new FileNotFoundException();
}

From source file:net.sourceforge.docfetcher.model.index.file.FileFactory.java

@NotNull
private FileResource unpackFromRarArchive(@NotNull IndexingConfig config, @NotNull Path originalArchivePath,
        @NotNull FileResource archiveResource, @NotNull String entryPath)
        throws ArchiveEncryptedException, DiskSpaceException, FileNotFoundException, IOException {
    Archive archive = null;//  ww w .  j a v a 2s .  c  o  m
    try {
        File archiveFile = archiveResource.getFile();
        archive = new Archive(archiveFile);
        if (archive.isEncrypted())
            throw new ArchiveEncryptedException(archiveFile, originalArchivePath.getPath());

        List<FileHeader> fileHeaders = archive.getFileHeaders();

        boolean isSolid = false;
        for (FileHeader fh : fileHeaders) {
            if (fh.isSolid()) {
                isSolid = true;
                break;
            }
        }

        /*
         * For solid archives, if we want to extract a certain archive
         * entry, we also have to extract all archive entries that preceded
         * it. Thus, it is more efficient to run through the archive twice
         * rather than once: During the first phase, we check for any
         * matching archive entries by only looking at the file headers, and
         * return early if there is no match. Only if there is a match,
         * we'll proceed to the second phase, where, if the archive uses
         * solid compression, all files up to the target file will be
         * extracted.
         */
        if (isSolid) {
            boolean match = false;
            for (FileHeader fh : fileHeaders) {
                if (fh.isEncrypted() || fh.isDirectory())
                    continue;
                String currentPath = fh.isUnicode() ? fh.getFileNameW() : fh.getFileNameString();
                currentPath = Util.toForwardSlashes(currentPath);
                assert noTrailingSlash(currentPath);

                if (entryPath.equals(currentPath)
                        || (entryPath.startsWith(currentPath + "/") && config.isArchive(currentPath))) {
                    match = true;
                    break;
                }
            }
            if (!match)
                throw new FileNotFoundException();
        }

        FileHeader fh = null;
        NullOutputStream nullOut = isSolid ? new NullOutputStream() : null;

        while (true) {
            fh = archive.nextFileHeader();
            if (fh == null)
                break; // Last entry reached
            if (fh.isEncrypted() || fh.isDirectory())
                continue;

            String currentPath = fh.isUnicode() ? fh.getFileNameW() : fh.getFileNameString();
            currentPath = Util.toForwardSlashes(currentPath);
            assert noTrailingSlash(currentPath);

            // TODO post-release-1.1: throw disk space exception
            if (entryPath.equals(currentPath)) { // Exact match
                Path cacheKey = originalArchivePath.createSubPath(currentPath);
                File unpackedFile = unpackRarEntry(config, archive, fh, entryPath);
                return unpackCache.putIfAbsent(cacheKey, unpackedFile);
            } else if (entryPath.startsWith(currentPath + "/") && config.isArchive(currentPath)) { // Partial match
                File innerArchiveFile;
                try {
                    innerArchiveFile = unpackRarEntry(config, archive, fh, currentPath);
                } finally {
                    archiveResource.dispose();
                }
                Path cacheKey = originalArchivePath.createSubPath(currentPath);
                FileResource innerArchive = unpackCache.putIfAbsent(cacheKey, innerArchiveFile);
                String remainingPath = entryPath.substring(currentPath.length() + 1);
                return unpackFromArchive(config, cacheKey, innerArchive, remainingPath);
            } else if (isSolid) { // Not a match
                archive.extractFile(fh, nullOut);
            }
        }
    } catch (RarException e) {
        throw new IOException(e);
    } finally {
        Closeables.closeQuietly(archive);
        archiveResource.dispose();
    }
    throw new FileNotFoundException();
}