Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException(String s) 

Source Link

Document

Constructs an InterruptedIOException with the specified detail message.

Usage

From source file:org.apache.http2.impl.client.AutoRetryHttpClient.java

public HttpResponse execute(HttpHost target, HttpRequest request, HttpContext context) throws IOException {
    for (int c = 1;; c++) {
        HttpResponse response = backend.execute(target, request, context);
        try {/*  w  w w.j  a va  2s.c  om*/
            if (retryStrategy.retryRequest(response, c, context)) {
                EntityUtils.consume(response.getEntity());
                long nextInterval = retryStrategy.getRetryInterval();
                try {
                    log.trace("Wait for " + nextInterval);
                    Thread.sleep(nextInterval);
                } catch (InterruptedException e) {
                    throw new InterruptedIOException(e.getMessage());
                }
            } else {
                return response;
            }
        } catch (RuntimeException ex) {
            try {
                EntityUtils.consume(response.getEntity());
            } catch (IOException ioex) {
                log.warn("I/O error consuming response content", ioex);
            }
            throw ex;
        }
    }
}

From source file:net.lightbody.bmp.proxy.jetty.http.nio.ByteBufferInputStream.java

private synchronized boolean waitForContent() throws InterruptedIOException {
    if (_buffer != null) {
        if (_buffer.hasRemaining())
            return true;

        // recycle buffer
        recycle(_buffer);/*  w  ww .j a  va  2 s  . co  m*/
        _buffer = null;
    }

    while (!_closed && LazyList.size(_buffers) == 0) {
        try {
            this.wait(_timeout);
        } catch (InterruptedException e) {
            log.debug(e);
            throw new InterruptedIOException(e.toString());
        }
    }

    if (_closed)
        return false;

    if (LazyList.size(_buffers) == 0)
        throw new SocketTimeoutException();

    _buffer = (ByteBuffer) LazyList.get(_buffers, 0);
    _buffers = LazyList.remove(_buffers, 0);

    return true;
}

From source file:org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor.java

private List<Path> compactInternal(StripeMultiFileWriter mw, CompactionRequest request,
        byte[] majorRangeFromRow, byte[] majorRangeToRow) throws IOException {
    final Collection<StoreFile> filesToCompact = request.getFiles();
    final FileDetails fd = getFileDetails(filesToCompact, request.isMajor());
    this.progress = new CompactionProgress(fd.maxKeyCount);

    long smallestReadPoint = getSmallestReadPoint();
    List<StoreFileScanner> scanners = createFileScanners(filesToCompact, smallestReadPoint);

    boolean finished = false;
    InternalScanner scanner = null;/*from   w w  w . jav a 2s  .  c o m*/
    try {
        // Get scanner to use.
        ScanType coprocScanType = ScanType.COMPACT_RETAIN_DELETES;
        scanner = preCreateCoprocScanner(request, coprocScanType, fd.earliestPutTs, scanners);
        if (scanner == null) {
            scanner = (majorRangeFromRow == null)
                    ? createScanner(store, scanners, ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint,
                            fd.earliestPutTs)
                    : createScanner(store, scanners, smallestReadPoint, fd.earliestPutTs, majorRangeFromRow,
                            majorRangeToRow);
        }
        scanner = postCreateCoprocScanner(request, coprocScanType, scanner);
        if (scanner == null) {
            // NULL scanner returned from coprocessor hooks means skip normal processing.
            return new ArrayList<Path>();
        }

        // Create the writer factory for compactions.
        final boolean needMvcc = fd.maxMVCCReadpoint >= smallestReadPoint;
        final Compression.Algorithm compression = store.getFamily().getCompactionCompression();
        StripeMultiFileWriter.WriterFactory factory = new StripeMultiFileWriter.WriterFactory() {
            @Override
            public Writer createWriter() throws IOException {
                return store.createWriterInTmp(fd.maxKeyCount, compression, true, needMvcc,
                        fd.maxTagsLength > 0);
            }
        };

        // Prepare multi-writer, and perform the compaction using scanner and writer.
        // It is ok here if storeScanner is null.
        StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner) scanner : null;
        mw.init(storeScanner, factory, store.getComparator());
        finished = performCompaction(scanner, mw, smallestReadPoint);
        if (!finished) {
            throw new InterruptedIOException("Aborting compaction of store " + store + " in region "
                    + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted.");
        }
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (Throwable t) {
                // Don't fail the compaction if this fails.
                LOG.error("Failed to close scanner after compaction.", t);
            }
        }
        if (!finished) {
            for (Path leftoverFile : mw.abortWriters()) {
                try {
                    store.getFileSystem().delete(leftoverFile, false);
                } catch (Exception ex) {
                    LOG.error("Failed to delete the leftover file after an unfinished compaction.", ex);
                }
            }
        }
    }

    assert finished : "We should have exited the method on all error paths";
    List<Path> newFiles = mw.commitWriters(fd.maxSeqId, request.isMajor());
    assert !newFiles.isEmpty() : "Should have produced an empty file to preserve metadata.";
    return newFiles;
}

From source file:cn.isif.util_plus.http.client.multipart.HttpMultipart.java

private void doWriteTo(final HttpMultipartMode mode, final OutputStream out,
        MultipartEntity.CallBackInfo callBackInfo, boolean writeContent) throws IOException {

    callBackInfo.pos = 0;/*from w  w w .  jav  a  2 s  . c  o m*/

    ByteArrayBuffer boundary = encode(this.charset, getBoundary());
    for (FormBodyPart part : this.parts) {
        if (!callBackInfo.doCallBack(true)) {
            throw new InterruptedIOException("cancel");
        }
        writeBytes(TWO_DASHES, out);
        callBackInfo.pos += TWO_DASHES.length();
        writeBytes(boundary, out);
        callBackInfo.pos += boundary.length();
        writeBytes(CR_LF, out);
        callBackInfo.pos += CR_LF.length();

        MinimalFieldHeader header = part.getHeader();

        switch (mode) {
        case STRICT:
            for (MinimalField field : header) {
                writeField(field, out);
                callBackInfo.pos += encode(MIME.DEFAULT_CHARSET, field.getName() + field.getBody()).length()
                        + FIELD_SEP.length() + CR_LF.length();
            }
            break;
        case BROWSER_COMPATIBLE:
            // Only write Content-Disposition
            // Use content charset
            MinimalField cd = header.getField(MIME.CONTENT_DISPOSITION);
            writeField(cd, this.charset, out);
            callBackInfo.pos += encode(this.charset, cd.getName() + cd.getBody()).length() + FIELD_SEP.length()
                    + CR_LF.length();
            String filename = part.getBody().getFilename();
            if (filename != null) {
                MinimalField ct = header.getField(MIME.CONTENT_TYPE);
                writeField(ct, this.charset, out);
                callBackInfo.pos += encode(this.charset, ct.getName() + ct.getBody()).length()
                        + FIELD_SEP.length() + CR_LF.length();
            }
            break;
        default:
            break;
        }
        writeBytes(CR_LF, out);
        callBackInfo.pos += CR_LF.length();

        if (writeContent) {
            ContentBody body = part.getBody();
            body.setCallBackInfo(callBackInfo);
            body.writeTo(out);
        }
        writeBytes(CR_LF, out);
        callBackInfo.pos += CR_LF.length();
    }
    writeBytes(TWO_DASHES, out);
    callBackInfo.pos += TWO_DASHES.length();
    writeBytes(boundary, out);
    callBackInfo.pos += boundary.length();
    writeBytes(TWO_DASHES, out);
    callBackInfo.pos += TWO_DASHES.length();
    writeBytes(CR_LF, out);
    callBackInfo.pos += CR_LF.length();
    callBackInfo.doCallBack(true);
}

From source file:com.dongfang.net.http.client.multipart.HttpMultipart.java

private void doWriteTo(final HttpMultipartMode mode, final OutputStream out,
        MultipartEntity.CallBackInfo callBackInfo, boolean writeContent) throws IOException {

    callBackInfo.pos = 0;/*from www  .  j  a v a 2  s  . co m*/

    ByteArrayBuffer boundary = encode(this.charset, getBoundary());
    for (FormBodyPart part : this.parts) {
        if (!callBackInfo.doCallBack(true)) {
            throw new InterruptedIOException("stop");
        }
        writeBytes(TWO_DASHES, out);
        callBackInfo.pos += TWO_DASHES.length();
        writeBytes(boundary, out);
        callBackInfo.pos += boundary.length();
        writeBytes(CR_LF, out);
        callBackInfo.pos += CR_LF.length();

        MinimalFieldHeader header = part.getHeader();

        switch (mode) {
        case STRICT:
            for (MinimalField field : header) {
                writeField(field, out);
                callBackInfo.pos += encode(MIME.DEFAULT_CHARSET, field.getName() + field.getBody()).length()
                        + FIELD_SEP.length() + CR_LF.length();
            }
            break;
        case BROWSER_COMPATIBLE:
            // Only write Content-Disposition
            // Use content charset
            MinimalField cd = header.getField(MIME.CONTENT_DISPOSITION);
            writeField(cd, this.charset, out);
            callBackInfo.pos += encode(this.charset, cd.getName() + cd.getBody()).length() + FIELD_SEP.length()
                    + CR_LF.length();
            String filename = part.getBody().getFilename();
            if (filename != null) {
                MinimalField ct = header.getField(MIME.CONTENT_TYPE);
                writeField(ct, this.charset, out);
                callBackInfo.pos += encode(this.charset, ct.getName() + ct.getBody()).length()
                        + FIELD_SEP.length() + CR_LF.length();
            }
            break;
        default:
            break;
        }
        writeBytes(CR_LF, out);
        callBackInfo.pos += CR_LF.length();

        if (writeContent) {
            ContentBody body = part.getBody();
            body.setCallBackInfo(callBackInfo);
            body.writeTo(out);
        }
        writeBytes(CR_LF, out);
        callBackInfo.pos += CR_LF.length();
    }
    writeBytes(TWO_DASHES, out);
    callBackInfo.pos += TWO_DASHES.length();
    writeBytes(boundary, out);
    callBackInfo.pos += boundary.length();
    writeBytes(TWO_DASHES, out);
    callBackInfo.pos += TWO_DASHES.length();
    writeBytes(CR_LF, out);
    callBackInfo.pos += CR_LF.length();
    callBackInfo.doCallBack(true);
}

From source file:org.openlaszlo.data.HTTPDataSource.java

public static Data getHTTPData(HttpServletRequest req, HttpServletResponse res, String surl, long since)
        throws DataSourceException, IOException {

    int tries = 1;

    // timeout msecs of time we're allowed in this routine
    // we must return or throw an exception.  0 means infinite.
    int timeout = mTimeout;
    if (req != null) {
        String timeoutParm = req.getParameter("timeout");
        if (timeoutParm != null) {
            timeout = Integer.parseInt(timeoutParm);
        }/*from ww w.ja v  a2s .co  m*/
    }

    long t1 = System.currentTimeMillis();
    long elapsed = 0;
    if (surl == null) {
        surl = getURL(req);
    }

    while (true) {
        long tout;
        if (timeout > 0) {
            tout = timeout - elapsed;
            if (tout <= 0) {
                throw new InterruptedIOException(
                        /* (non-Javadoc)
                         * @i18n.test
                         * @org-mes=p[0] + " timed out"
                         */
                        org.openlaszlo.i18n.LaszloMessages.getMessage(HTTPDataSource.class.getName(),
                                "051018-194", new Object[] { surl }));
            }
        } else {
            tout = 0;
        }

        try {
            HttpData data = getDataOnce(req, res, since, surl, 0, (int) tout);
            if (data.code >= 400) {
                data.release();
                throw new DataSourceException(errorMessage(data.code));
            }
            return data;
        } catch (HttpRecoverableException e) {
            // This type of exception should be retried.
            if (tries++ > mMaxRetries) {
                throw new InterruptedIOException(
                        /* (non-Javadoc)
                         * @i18n.test
                         * @org-mes="too many retries, exception: " + p[0]
                         */
                        org.openlaszlo.i18n.LaszloMessages.getMessage(HTTPDataSource.class.getName(),
                                "051018-217", new Object[] { e.getMessage() }));
            }
            mLogger.warn(
                    /* (non-Javadoc)
                     * @i18n.test
                     * @org-mes="retrying a recoverable exception: " + p[0]
                     */
                    org.openlaszlo.i18n.LaszloMessages.getMessage(HTTPDataSource.class.getName(), "051018-226",
                            new Object[] { e.getMessage() }));
        } catch (HttpException e) {
            throw new IOException(
                    /* (non-Javadoc)
                     * @i18n.test
                     * @org-mes="HttpException: " + p[0]
                     */
                    org.openlaszlo.i18n.LaszloMessages.getMessage(HTTPDataSource.class.getName(), "051018-235",
                            new Object[] { e.getMessage() }));
        } catch (IOException e) {

            try {
                Class<?> ssle = Class.forName("javax.net.ssl.SSLException");
                if (ssle.isAssignableFrom(e.getClass())) {
                    throw new DataSourceException(
                            /* (non-Javadoc)
                             * @i18n.test
                             * @org-mes="SSL exception: " + p[0]
                             */
                            org.openlaszlo.i18n.LaszloMessages.getMessage(HTTPDataSource.class.getName(),
                                    "051018-256", new Object[] { e.getMessage() }));
                }
            } catch (ClassNotFoundException cfne) {
            }

            throw e;
        }

        long t2 = System.currentTimeMillis();
        elapsed = (t2 - t1);
    }
}

From source file:org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator.java

@Override
public void sendGlobalBarrierReached(Procedure proc, List<String> nodeNames) throws IOException {
    String procName = proc.getName();
    String reachedNode = zkProc.getReachedBarrierNode(procName);
    LOG.debug("Creating reached barrier zk node:" + reachedNode);
    try {/*from   w ww  .  ja  v a2  s  . co m*/
        // create the reached znode and watch for the reached znodes
        ZKUtil.createWithParents(zkProc.getWatcher(), reachedNode);
        // loop through all the children of the acquire phase and watch for them
        for (String node : nodeNames) {
            String znode = ZKUtil.joinZNode(reachedNode, node);
            if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) {
                byte[] dataFromMember = ZKUtil.getData(zkProc.getWatcher(), znode);
                // ProtobufUtil.isPBMagicPrefix will check null
                if (dataFromMember != null && dataFromMember.length > 0) {
                    if (!ProtobufUtil.isPBMagicPrefix(dataFromMember)) {
                        String msg = "Failed to get data from finished node or data is illegally formatted: "
                                + znode;
                        LOG.error(msg);
                        throw new IOException(msg);
                    } else {
                        dataFromMember = Arrays.copyOfRange(dataFromMember, ProtobufUtil.lengthOfPBMagic(),
                                dataFromMember.length);
                        coordinator.memberFinishedBarrier(procName, node, dataFromMember);
                    }
                } else {
                    coordinator.memberFinishedBarrier(procName, node, dataFromMember);
                }
            }
        }
    } catch (KeeperException e) {
        String msg = "Failed while creating reached node:" + reachedNode;
        LOG.error(msg, e);
        throw new IOException(msg, e);
    } catch (InterruptedException e) {
        String msg = "Interrupted while creating reached node:" + reachedNode;
        LOG.error(msg, e);
        throw new InterruptedIOException(msg);
    }
}

From source file:com.reactive.hzdfs.core.DistributedFileSupportService.java

/**
 * //from   w w  w  .  j a  v  a  2s .  c om
 * @param sourceFile
 * @param config 
 * @return
 * @throws IOException
 */
private DFSSCommand prepareCluster(File sourceFile, DFSSTaskConfig config) throws IOException {

    DFSSCommand cmd = new DFSSCommand();
    cmd.setConfig(config);
    log.info("[DFSS] New task created with sessionId => " + cmd.getSessionId() + " for file => " + sourceFile);
    cmd.setCommand(DFSSCommand.CMD_INIT_ASCII_RCVRS);
    cmd.setChunkMap(chunkMapName(sourceFile));
    cmd.setRecordMap(cmd.getChunkMap() + "-REC");
    sendMessage(cmd);

    ICountDownLatch latch = sessionLatch(cmd);
    try {
        log.info("[DFSS#" + cmd.getSessionId() + "] Preparing cluster for file distribution.. ");
        boolean b = latch.await(config.getClusterPreparationTime().getDuration(),
                config.getClusterPreparationTime().getUnit());
        if (!b) {
            cmd.setCommand(DFSSCommand.CMD_ABORT_JOB);
            sendMessage(cmd);

            throw new IOException("[" + cmd.getSessionId() + "] Unable to prepare cluster for distribution in "
                    + config + ". Job aborted!");
        }
    } catch (InterruptedException e) {
        log.error("Aborting job on being interrupted unexpectedly", e);
        cmd.setCommand(DFSSCommand.CMD_ABORT_JOB);
        sendMessage(cmd);

        throw new InterruptedIOException("[" + cmd.getSessionId() + "] InterruptedException. Job aborted!");
    } finally {
        latch.destroy();
    }
    hzService.setMapConfiguration(recordMapCfg, cmd.getRecordMap());

    log.info("[DFSS#" + cmd.getSessionId() + "] Cluster preparation complete..");
    return cmd;
}

From source file:org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.java

@Override
public T callWithRetries(RetryingCallable<T> callable, int callTimeout) throws IOException, RuntimeException {
    List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions = new ArrayList<RetriesExhaustedException.ThrowableWithExtraContext>();
    this.globalStartTime = EnvironmentEdgeManager.currentTime();
    context.clear();/*  w ww  .  j a  v a2s .c o m*/
    for (int tries = 0;; tries++) {
        long expectedSleep;
        try {
            callable.prepare(tries != 0); // if called with false, check table status on ZK
            interceptor.intercept(context.prepare(callable, tries));
            return callable.call(getRemainingTime(callTimeout));
        } catch (PreemptiveFastFailException e) {
            throw e;
        } catch (Throwable t) {
            ExceptionUtil.rethrowIfInterrupt(t);
            if (tries > startLogErrorsCnt) {
                LOG.info("Call exception, tries=" + tries + ", retries=" + retries + ", started="
                        + (EnvironmentEdgeManager.currentTime() - this.globalStartTime) + " ms ago, "
                        + "cancelled=" + cancelled.get() + ", msg="
                        + callable.getExceptionMessageAdditionalDetail());
            }

            // translateException throws exception when should not retry: i.e. when request is bad.
            interceptor.handleFailure(context, t);
            t = translateException(t);
            callable.throwable(t, retries != 1);
            RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(
                    t, EnvironmentEdgeManager.currentTime(), toString());
            exceptions.add(qt);
            if (tries >= retries - 1) {
                throw new RetriesExhaustedException(tries, exceptions);
            }
            // If the server is dead, we need to wait a little before retrying, to give
            //  a chance to the regions to be
            // tries hasn't been bumped up yet so we use "tries + 1" to get right pause time
            expectedSleep = callable.sleep(pause, tries + 1);

            // If, after the planned sleep, there won't be enough time left, we stop now.
            long duration = singleCallDuration(expectedSleep);
            if (duration > callTimeout) {
                String msg = "callTimeout=" + callTimeout + ", callDuration=" + duration + ": "
                        + callable.getExceptionMessageAdditionalDetail();
                throw (SocketTimeoutException) (new SocketTimeoutException(msg).initCause(t));
            }
        } finally {
            interceptor.updateFailureInfo(context);
        }
        try {
            if (expectedSleep > 0) {
                synchronized (cancelled) {
                    if (cancelled.get())
                        return null;
                    cancelled.wait(expectedSleep);
                }
            }
            if (cancelled.get())
                return null;
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Interrupted after " + tries + " tries  on " + retries);
        }
    }
}

From source file:org.apache.hadoop.hbase.mapreduce.CrossSiteTableInputFormat.java

@Override
public RecordReader<ImmutableBytesWritable, Result> createRecordReader(InputSplit split,
        TaskAttemptContext context) throws IOException {
    TableSplit tSplit = (TableSplit) split;
    if (table == null) {
        throw new IOException("Cannot create a record reader because of a"
                + " previous error. Please look at the previous logs lines from"
                + " the task's full log for more details.");
    }//ww w  .ja  v  a 2s .c o m
    String clusterTableName = Bytes.toString(tSplit.getTableName());
    String clusterName = CrossSiteUtil.getClusterName(clusterTableName);
    HTableInterface clusterHTableInterface = table.getClusterHTable(clusterName);
    HTable clusterHTable = null;
    if (clusterHTableInterface instanceof HTable) {
        clusterHTable = (HTable) clusterHTableInterface;
    } else {
        throw new IOException("The cluster table is not an instance of the HTable. Its class is "
                + (clusterHTableInterface == null ? "" : clusterHTableInterface.getClass().getName()));
    }
    TableRecordReader trr = new TableRecordReader();
    Scan sc = new Scan(getScan());
    sc.setStartRow(tSplit.getStartRow());
    sc.setStopRow(tSplit.getEndRow());
    trr.setScan(sc);
    trr.setHTable(clusterHTable);
    try {
        trr.initialize(tSplit, context);
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    }
    return trr;
}