Example usage for java.io IOException addSuppressed

List of usage examples for java.io IOException addSuppressed

Introduction

In this page you can find the example usage for java.io IOException addSuppressed.

Prototype

public final synchronized void addSuppressed(Throwable exception) 

Source Link

Document

Appends the specified exception to the exceptions that were suppressed in order to deliver this exception.

Usage

From source file:org.apache.htrace.impl.PackedBufferManager.java

@Override
public void flush() throws IOException {
    SelectionKey sockKey = null;/*from  ww  w.  j a  v  a  2  s.  co m*/
    IOException ioe = null;
    frameBuffer.position(0);
    prequel.getBuffer().position(0);
    spans.getBuffer().position(0);
    if (LOG.isTraceEnabled()) {
        LOG.trace("Preparing to flush " + numSpans + " spans to " + conf.endpointStr);
    }
    try {
        sockKey = doConnect();
        doSend(sockKey, new ByteBuffer[] { frameBuffer, prequel.getBuffer(), spans.getBuffer() });
        ByteBuffer response = prequel.getBuffer();
        readAndValidateResponseFrame(sockKey, response, 1, METHOD_ID_WRITE_SPANS);
    } catch (IOException e) {
        // This LOG message is only at debug level because we also log these
        // exceptions at error level inside HTracedReceiver.  The logging in
        // HTracedReceiver is rate-limited to avoid overwhelming the client log
        // if htraced goes down.  The debug and trace logging is not
        // rate-limited.
        if (LOG.isDebugEnabled()) {
            LOG.debug("Got exception during flush", e);
        }
        ioe = e;
    } finally {
        if (sockKey != null) {
            sockKey.cancel();
            try {
                SocketChannel sock = (SocketChannel) sockKey.attachment();
                sock.close();
            } catch (IOException e) {
                if (ioe != null) {
                    ioe.addSuppressed(e);
                }
            }
        }
    }
    if (ioe != null) {
        throw ioe;
    }
    if (LOG.isTraceEnabled()) {
        LOG.trace("Successfully flushed " + numSpans + " spans to " + conf.endpointStr);
    }
}

From source file:org.apache.nifi.controller.repository.StandardProcessSession.java

@SuppressWarnings({ "unchecked", "rawtypes" })
private void commit(final Checkpoint checkpoint) {
    try {// w  w w.j a  v a 2s. c  om
        final long commitStartNanos = System.nanoTime();

        resetReadClaim();
        try {
            claimCache.flush();
        } finally {
            claimCache.reset();
        }

        final long updateProvenanceStart = System.nanoTime();
        updateProvenanceRepo(checkpoint);

        final long claimRemovalStart = System.nanoTime();
        final long updateProvenanceNanos = claimRemovalStart - updateProvenanceStart;

        /**
         * Figure out which content claims can be released. At this point,
         * we will decrement the Claimant Count for the claims via the
         * Content Repository. We do not actually destroy the content
         * because otherwise, we could remove the Original Claim and
         * crash/restart before the FlowFileRepository is updated. This will
         * result in the FlowFile being restored such that the content claim
         * points to the Original Claim -- which has already been removed!
         *
         */
        for (final Map.Entry<FlowFileRecord, StandardRepositoryRecord> entry : checkpoint.records.entrySet()) {
            final FlowFile flowFile = entry.getKey();
            final StandardRepositoryRecord record = entry.getValue();

            if (record.isMarkedForDelete()) {
                // if the working claim is not the same as the original claim, we can immediately destroy the working claim
                // because it was created in this session and is to be deleted. We don't need to wait for the FlowFile Repo to sync.
                decrementClaimCount(record.getWorkingClaim());

                if (record.getOriginalClaim() != null
                        && !record.getOriginalClaim().equals(record.getWorkingClaim())) {
                    // if working & original claim are same, don't remove twice; we only want to remove the original
                    // if it's different from the working. Otherwise, we remove two claimant counts. This causes
                    // an issue if we only updated the FlowFile attributes.
                    decrementClaimCount(record.getOriginalClaim());
                }
                final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate();
                final Connectable connectable = context.getConnectable();
                final Object terminator = connectable instanceof ProcessorNode
                        ? ((ProcessorNode) connectable).getProcessor()
                        : connectable;
                LOG.info("{} terminated by {}; life of FlowFile = {} ms",
                        new Object[] { flowFile, terminator, flowFileLife });
            } else if (record.isWorking() && record.getWorkingClaim() != record.getOriginalClaim()) {
                // records which have been updated - remove original if exists
                decrementClaimCount(record.getOriginalClaim());
            }
        }

        final long claimRemovalFinishNanos = System.nanoTime();
        final long claimRemovalNanos = claimRemovalFinishNanos - claimRemovalStart;

        // Update the FlowFile Repository
        try {
            final Collection<StandardRepositoryRecord> repoRecords = checkpoint.records.values();
            context.getFlowFileRepository().updateRepository((Collection) repoRecords);
        } catch (final IOException ioe) {
            // if we fail to commit the session, we need to roll back
            // the checkpoints as well because none of the checkpoints
            // were ever committed.
            rollback(false, true);
            throw new ProcessException("FlowFile Repository failed to update", ioe);
        }

        final long flowFileRepoUpdateFinishNanos = System.nanoTime();
        final long flowFileRepoUpdateNanos = flowFileRepoUpdateFinishNanos - claimRemovalFinishNanos;

        updateEventRepository(checkpoint);

        final long updateEventRepositoryFinishNanos = System.nanoTime();
        final long updateEventRepositoryNanos = updateEventRepositoryFinishNanos
                - flowFileRepoUpdateFinishNanos;

        // transfer the flowfiles to the connections' queues.
        final Map<FlowFileQueue, Collection<FlowFileRecord>> recordMap = new HashMap<>();
        for (final StandardRepositoryRecord record : checkpoint.records.values()) {
            if (record.isMarkedForAbort() || record.isMarkedForDelete()) {
                continue; // these don't need to be transferred
            }
            // record.getCurrent() will return null if this record was created in this session --
            // in this case, we just ignore it, and it will be cleaned up by clearing the records map.
            if (record.getCurrent() != null) {
                Collection<FlowFileRecord> collection = recordMap.get(record.getDestination());
                if (collection == null) {
                    collection = new ArrayList<>();
                    recordMap.put(record.getDestination(), collection);
                }
                collection.add(record.getCurrent());
            }
        }

        for (final Map.Entry<FlowFileQueue, Collection<FlowFileRecord>> entry : recordMap.entrySet()) {
            entry.getKey().putAll(entry.getValue());
        }

        final long enqueueFlowFileFinishNanos = System.nanoTime();
        final long enqueueFlowFileNanos = enqueueFlowFileFinishNanos - updateEventRepositoryFinishNanos;

        // Delete any files from disk that need to be removed.
        for (final Path path : checkpoint.deleteOnCommit.values()) {
            try {
                Files.deleteIfExists(path);
            } catch (final IOException e) {
                throw new FlowFileAccessException("Unable to delete " + path.toFile().getAbsolutePath(), e);
            }
        }
        checkpoint.deleteOnCommit.clear();

        if (LOG.isInfoEnabled()) {
            final String sessionSummary = summarizeEvents(checkpoint);
            if (!sessionSummary.isEmpty()) {
                LOG.info("{} for {}, committed the following events: {}",
                        new Object[] { this, connectableDescription, sessionSummary });
            }
        }

        for (final Map.Entry<String, Long> entry : checkpoint.counters.entrySet()) {
            adjustCounter(entry.getKey(), entry.getValue(), true);
        }

        acknowledgeRecords();
        resetState();

        if (LOG.isDebugEnabled()) {
            final StringBuilder timingInfo = new StringBuilder();
            timingInfo.append("Session commit for ").append(this).append(" [").append(connectableDescription)
                    .append("]").append(" took ");

            final long commitNanos = System.nanoTime() - commitStartNanos;
            formatNanos(commitNanos, timingInfo);
            timingInfo.append("; FlowFile Repository Update took ");
            formatNanos(flowFileRepoUpdateNanos, timingInfo);
            timingInfo.append("; Claim Removal took ");
            formatNanos(claimRemovalNanos, timingInfo);
            timingInfo.append("; FlowFile Event Update took ");
            formatNanos(updateEventRepositoryNanos, timingInfo);
            timingInfo.append("; Enqueuing FlowFiles took ");
            formatNanos(enqueueFlowFileNanos, timingInfo);
            timingInfo.append("; Updating Provenance Event Repository took ");
            formatNanos(updateProvenanceNanos, timingInfo);

            LOG.debug(timingInfo.toString());
        }
    } catch (final Exception e) {
        try {
            // if we fail to commit the session, we need to roll back
            // the checkpoints as well because none of the checkpoints
            // were ever committed.
            rollback(false, true);
        } catch (final Exception e1) {
            e.addSuppressed(e1);
        }

        if (e instanceof RuntimeException) {
            throw (RuntimeException) e;
        } else {
            throw new ProcessException(e);
        }
    }
}

From source file:org.lenskit.data.dao.file.TextEntitySource.java

/**
 * Open a stream to read entities from this source.
 * @return A stream of entities.//w  w w.ja v a 2s . c o  m
 */
@Override
public ObjectStream<Entity> openStream() throws IOException {
    BufferedReader reader = source.openBufferedStream();
    ObjectStream<String> lines = new LineStream(reader);
    int headerLines = format.getHeaderLines();
    List<String> header = new ArrayList<>();
    while (header.size() < headerLines) {
        String line = lines.readObject();
        if (line == null) {
            IOException ex = new IOException(
                    String.format("expected %d header lines, found %d", headerLines, header.size()));
            try {
                lines.close();
            } catch (Throwable th) {
                ex.addSuppressed(th);
            }
            throw ex;
        }
        header.add(line);
    }
    LineEntityParser parser = format.makeParser(header);
    return ObjectStreams.transform(lines, parser);
}

From source file:org.neo4j.io.pagecache.impl.SingleFilePageSwapper.java

private void closeAndCollectExceptions(int channelIndex, IOException exception) throws IOException {
    if (channelIndex == channels.length) {
        if (exception != null) {
            throw exception;
        }/*from w  ww  .  j av  a 2s  . co  m*/
        return;
    }

    try {
        channels[channelIndex].close();
    } catch (IOException e) {
        if (exception == null) {
            exception = e;
        } else {
            exception.addSuppressed(e);
        }
    }
    closeAndCollectExceptions(channelIndex + 1, exception);
}