Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException() 

Source Link

Document

Constructs an InterruptedIOException with null as its error detail message.

Usage

From source file:org.apache.hadoop.hbase.rest.client.RemoteHTable.java

public HTableDescriptor getTableDescriptor() throws IOException {
    StringBuilder sb = new StringBuilder();
    sb.append('/');
    sb.append(Bytes.toStringBinary(name));
    sb.append('/');
    sb.append("schema");
    for (int i = 0; i < maxRetries; i++) {
        Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
        int code = response.getCode();
        switch (code) {
        case 200:
            TableSchemaModel schema = new TableSchemaModel();
            schema.getObjectFromMessage(response.getBody());
            return schema.getTableDescriptor();
        case 509:
            try {
                Thread.sleep(sleepTime);
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }//w  ww .  j a va  2s  . co  m
            break;
        default:
            throw new IOException("schema request returned " + code);
        }
    }
    throw new IOException("schema request timed out");
}

From source file:org.apache.hadoop.hbase.util.FSHDFSUtils.java

boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p, final Configuration conf,
        final CancelableProgressable reporter) throws IOException {
    LOG.info("Recovering lease on dfs file " + p);
    long startWaiting = EnvironmentEdgeManager.currentTimeMillis();
    // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS
    // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
    // beyond that limit 'to be safe'.
    long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
    // This setting should be a little bit above what the cluster dfs heartbeat is set to.
    long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
    // This should be set to how long it'll take for us to timeout against primary datanode if it
    // is dead.  We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the
    // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY.
    long subsequentPause = conf.getInt("hbase.lease.recovery.dfs.timeout", 61 * 1000);

    Method isFileClosedMeth = null;
    // whether we need to look for isFileClosed method
    boolean findIsFileClosedMeth = true;
    boolean recovered = false;
    // We break the loop if we succeed the lease recovery, timeout, or we throw an exception.
    for (int nbAttempt = 0; !recovered; nbAttempt++) {
        recovered = recoverLease(dfs, nbAttempt, p, startWaiting);
        if (recovered)
            break;
        checkIfCancelled(reporter);/*from w w w  . ja  va  2 s .c om*/
        if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting))
            break;
        try {
            // On the first time through wait the short 'firstPause'.
            if (nbAttempt == 0) {
                Thread.sleep(firstPause);
            } else {
                // Cycle here until subsequentPause elapses.  While spinning, check isFileClosed if
                // available (should be in hadoop 2.0.5... not in hadoop 1 though.
                long localStartWaiting = EnvironmentEdgeManager.currentTimeMillis();
                while ((EnvironmentEdgeManager.currentTimeMillis() - localStartWaiting) < subsequentPause) {
                    Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
                    if (findIsFileClosedMeth) {
                        try {
                            isFileClosedMeth = dfs.getClass().getMethod("isFileClosed",
                                    new Class[] { Path.class });
                        } catch (NoSuchMethodException nsme) {
                            LOG.debug("isFileClosed not available");
                        } finally {
                            findIsFileClosedMeth = false;
                        }
                    }
                    if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) {
                        recovered = true;
                        break;
                    }
                    checkIfCancelled(reporter);
                }
            }
        } catch (InterruptedException ie) {
            InterruptedIOException iioe = new InterruptedIOException();
            iioe.initCause(ie);
            throw iioe;
        }
    }
    return recovered;
}

From source file:com.sa.npopa.samples.hbase.rest.client.RemoteHTable.java

@Override
public HTableDescriptor getTableDescriptor() throws IOException {
    StringBuilder sb = new StringBuilder();
    sb.append('/');
    sb.append(Bytes.toStringBinary(name));
    sb.append('/');
    sb.append("schema");
    for (int i = 0; i < maxRetries; i++) {
        Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
        int code = response.getCode();
        switch (code) {
        case 200:
            TableSchemaModel schema = new TableSchemaModel();
            schema.getObjectFromMessage(response.getBody());
            return schema.getTableDescriptor();
        case 509:
            try {
                Thread.sleep(sleepTime);
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }/*from   ww  w  .ja  va  2  s  .c  o  m*/
            break;
        default:
            throw new IOException("schema request returned " + code);
        }
    }
    throw new IOException("schema request timed out");
}

From source file:byps.http.HIncomingStreamSync.java

@Override
public synchronized int read(byte[] b, int offs, int len) throws IOException {
    long t1 = System.currentTimeMillis();
    int bytesRead = -1;

    while (true) {

        if (ex != null)
            throw ex;
        if (closed)
            throw new IOException("Stream closed");

        if (bytesSource == NO_BYTES) {
            // noch keine bytes empfangen in write()
            if (log.isDebugEnabled())
                log.debug("NO_BYTES");
        } else if (bytesSource == FIRST_BYTES) {
            bytesRead = Math.min(firstBytes.length - readPos, len);
            if (log.isDebugEnabled())
                log.debug("FIRST_BYTES, #bytesRead=" + bytesRead);
            if (bytesRead > 0) {
                System.arraycopy(firstBytes, readPos, b, offs, bytesRead);
                readPos += bytesRead;/*  w ww . java 2  s  .c  o  m*/
                break;
            }
        } else if (bytesSource == SECOND_BYTES) {
            bytesRead = Math.min(secondBytesWritePos - readPos, len);
            if (log.isDebugEnabled())
                log.debug("SECOND_BYTES, #bytesRead=" + bytesRead);
            if (bytesRead > 0) {
                System.arraycopy(secondBytes, readPos, b, offs, bytesRead);
                readPos += bytesRead;
                break;
            }
        } else if (bytesSource == FILE_BYTES) {
            if (file != null) {
                if (fis == null) {
                    fis = new FileInputStream(file.getFile());
                    fis.skip(readPos);
                }
                bytesRead = fis.read(b, offs, len);
                if (bytesRead >= 0) {
                    break;
                }
            }
        } else {
            throw new IllegalStateException("Illegal bytesSource=" + bytesSource);
        }

        if (writeClosed) {
            bytesRead = -1;
            break;
        }

        try {
            long to = getLifetimeMillis();
            if (log.isDebugEnabled())
                log.debug("wait for targetId=" + targetId + " for reading");
            this.wait(to);

            long t2 = System.currentTimeMillis();
            if (t2 - t1 >= to) {
                if (log.isDebugEnabled())
                    log.debug("timeout while waiting for targetId=" + targetId);
                throw new IOException("Timeout");
            }

            if (log.isDebugEnabled())
                log.debug("received singal, continue read");

        } catch (InterruptedException e) {
            if (log.isDebugEnabled())
                log.debug("waiting for targetId=" + targetId + " interrupted");
            throw new InterruptedIOException();
        }
    }

    // Lebenszeit verlngern
    extendLifetime();

    return bytesRead;
}

From source file:org.apache.hadoop.hbase.wal.LogRecoveredEditsOutputSink.java

private List<IOException> closeLogWriters(List<IOException> thrown) throws IOException {
    if (writersClosed) {
        return thrown;
    }/* w w w  .  ja v  a  2  s.  co m*/
    if (thrown == null) {
        thrown = Lists.newArrayList();
    }
    try {
        for (WriterThread writerThread : writerThreads) {
            while (writerThread.isAlive()) {
                writerThread.setShouldStop(true);
                writerThread.interrupt();
                try {
                    writerThread.join(10);
                } catch (InterruptedException e) {
                    IOException iie = new InterruptedIOException();
                    iie.initCause(e);
                    throw iie;
                }
            }
        }
    } finally {
        WALSplitter.WriterAndPath wap = null;
        for (WALSplitter.SinkWriter tmpWAP : writers.values()) {
            try {
                wap = (WALSplitter.WriterAndPath) tmpWAP;
                wap.writer.close();
            } catch (IOException ioe) {
                LOG.error("Couldn't close log at {}", wap.path, ioe);
                thrown.add(ioe);
                continue;
            }
            LOG.info("Closed log " + wap.path + " (wrote " + wap.editsWritten + " edits in "
                    + (wap.nanosSpent / 1000 / 1000) + "ms)");
        }
        writersClosed = true;
    }

    return thrown;
}

From source file:com.grendelscan.commons.http.apache_overrides.client.CustomClientRequestDirector.java

@Override
public HttpResponse execute(HttpHost originalTarget, final HttpRequest request, HttpContext context)
        throws HttpException, IOException {
    HttpHost target = originalTarget;// ww  w  . j  a va 2 s . co m
    final HttpRoute route = determineRoute(target, request, context);

    virtualHost = (HttpHost) request.getParams().getParameter(ClientPNames.VIRTUAL_HOST);

    long timeout = ConnManagerParams.getTimeout(params);

    try {
        HttpResponse response = null;

        // See if we have a user token bound to the execution context
        Object userToken = context.getAttribute(ClientContext.USER_TOKEN);

        // Allocate connection if needed
        if (managedConn == null) {
            ClientConnectionRequest connRequest = connManager.requestConnection(route, userToken);
            if (request instanceof AbortableHttpRequest) {
                ((AbortableHttpRequest) request).setConnectionRequest(connRequest);
            }

            try {
                managedConn = connRequest.getConnection(timeout, TimeUnit.MILLISECONDS);
            } catch (InterruptedException interrupted) {
                InterruptedIOException iox = new InterruptedIOException();
                iox.initCause(interrupted);
                throw iox;
            }

            if (HttpConnectionParams.isStaleCheckingEnabled(params)) {
                // validate connection
                if (managedConn.isOpen()) {
                    LOGGER.debug("Stale connection check");
                    if (managedConn.isStale()) {
                        LOGGER.debug("Stale connection detected");
                        managedConn.close();
                    }
                }
            }
        }

        if (request instanceof AbortableHttpRequest) {
            ((AbortableHttpRequest) request).setReleaseTrigger(managedConn);
        }

        // Reopen connection if needed
        if (!managedConn.isOpen()) {
            managedConn.open(route, context, params);
        } else {
            managedConn.setSocketTimeout(HttpConnectionParams.getSoTimeout(params));
        }

        try {
            establishRoute(route, context);
        } catch (TunnelRefusedException ex) {
            LOGGER.debug(ex.getMessage());
            response = ex.getResponse();
        }

        // Use virtual host if set
        target = virtualHost;

        if (target == null) {
            target = route.getTargetHost();
        }

        HttpHost proxy = route.getProxyHost();

        // Populate the execution context
        context.setAttribute(ExecutionContext.HTTP_TARGET_HOST, target);
        context.setAttribute(ExecutionContext.HTTP_PROXY_HOST, proxy);
        context.setAttribute(ExecutionContext.HTTP_CONNECTION, managedConn);
        context.setAttribute(ClientContext.TARGET_AUTH_STATE, targetAuthState);
        context.setAttribute(ClientContext.PROXY_AUTH_STATE, proxyAuthState);

        // Run request protocol interceptors
        requestExec.preProcess(request, httpProcessor, context);

        try {
            response = requestExec.execute(request, managedConn, context);
        } catch (IOException ex) {
            LOGGER.debug("Closing connection after request failure.");
            managedConn.close();
            throw ex;
        }

        if (response == null) {
            return null;
        }

        // Run response protocol interceptors
        response.setParams(params);
        requestExec.postProcess(response, httpProcessor, context);

        // The connection is in or can be brought to a re-usable state.
        boolean reuse = reuseStrategy.keepAlive(response, context);
        if (reuse) {
            // Set the idle duration of this connection
            long duration = keepAliveStrategy.getKeepAliveDuration(response, context);
            managedConn.setIdleDuration(duration, TimeUnit.MILLISECONDS);

            if (duration >= 0) {
                LOGGER.trace("Connection can be kept alive for " + duration + " ms");
            } else {
                LOGGER.trace("Connection can be kept alive indefinitely");
            }
        }

        if ((managedConn != null) && (userToken == null)) {
            userToken = userTokenHandler.getUserToken(context);
            context.setAttribute(ClientContext.USER_TOKEN, userToken);
            if (userToken != null) {
                managedConn.setState(userToken);
            }
        }

        // check for entity, release connection if possible
        if ((response.getEntity() == null) || !response.getEntity().isStreaming()) {
            // connection not needed and (assumed to be) in re-usable state
            if (reuse) {
                managedConn.markReusable();
            }
            releaseConnection();
        } else {
            // install an auto-release entity
            HttpEntity entity = response.getEntity();
            entity = new BasicManagedEntity(entity, managedConn, reuse);
            response.setEntity(entity);
        }

        return response;

    } catch (HttpException ex) {
        abortConnection();
        throw ex;
    } catch (IOException ex) {
        abortConnection();
        throw ex;
    } catch (RuntimeException ex) {
        abortConnection();
        throw ex;
    }
}

From source file:org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster.java

private static boolean waitForServerDown(int port, long timeout) throws IOException {
    long start = System.currentTimeMillis();
    while (true) {
        try {/*from   w w  w . ja va  2s  .c  o m*/
            Socket sock = new Socket("localhost", port);
            try {
                OutputStream outstream = sock.getOutputStream();
                outstream.write("stat".getBytes());
                outstream.flush();
            } finally {
                sock.close();
            }
        } catch (IOException e) {
            return true;
        }

        if (System.currentTimeMillis() > start + timeout) {
            break;
        }
        try {
            Thread.sleep(250);
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }
    return false;
}

From source file:cn.edu.zjnu.acm.judge.core.Judger.java

private boolean compile(RunRecord runRecord) throws IOException {
    String source = runRecord.getSource();
    if (StringUtils.isEmptyOrWhitespace(source)) {
        return false;
    }//  w w  w .jav a2  s . com
    Path work = runRecord.getWorkDirectory();
    final String main = "Main";
    Files.createDirectories(work);
    Path sourceFile = work.resolve(main + "." + runRecord.getLanguage().getSourceExtension()); //???
    Files.copy(new ByteArrayInputStream(source.getBytes(Platform.getCharset())), sourceFile,
            StandardCopyOption.REPLACE_EXISTING);

    String compileCommand = runRecord.getLanguage().getCompileCommand();
    log.debug("Compile Command: {}", compileCommand); //
    if (StringUtils.isEmptyOrWhitespace(compileCommand)) {
        return true;
    }
    assert compileCommand != null;
    //
    // VC++?
    // G++?
    Path compileInfo = work.resolve("compileinfo.txt");
    Process process = ProcessCreationHelper.execute(new ProcessBuilder(compileCommand.split("\\s+"))
            .directory(work.toFile()).redirectOutput(compileInfo.toFile()).redirectErrorStream(true)::start);
    process.getInputStream().close();
    try {
        process.waitFor(45, TimeUnit.SECONDS);
    } catch (InterruptedException ex) {
        throw new InterruptedIOException();
    }
    //?
    String errorInfo;
    if (process.isAlive()) {
        process.destroy();
        try {
            process.waitFor();
        } catch (InterruptedException ex) {
            throw new InterruptedIOException();
        }
        errorInfo = "Compile timeout\nOutput:\n" + collectLines(compileInfo);
    } else {
        errorInfo = collectLines(compileInfo);
    }
    log.debug("errorInfo = {}", errorInfo);
    Path executable = work.resolve(main + "." + runRecord.getLanguage().getExecutableExtension()); //??
    log.debug("executable = {}", executable);
    boolean compileOK = Files.exists(executable);
    //
    if (!compileOK) {
        submissionMapper.updateResult(runRecord.getSubmissionId(), ResultType.COMPILE_ERROR, 0, 0);
        submissionMapper.saveCompileInfo(runRecord.getSubmissionId(), errorInfo);
        updateSubmissionStatus(runRecord);
    }
    return compileOK;
}

From source file:org.apache.hadoop.hbase.catalog.MetaReader.java

/**
 * Gets all of the regions of the specified table.
 * @param catalogTracker/*from  ww w  .  j ava 2  s  . c o  m*/
 * @param tableName
 * @param excludeOfflinedSplitParents If true, do not include offlined split
 * parents in the return.
 * @return Ordered list of {@link HRegionInfo}.
 * @throws IOException
 */
public static List<HRegionInfo> getTableRegions(CatalogTracker catalogTracker, TableName tableName,
        final boolean excludeOfflinedSplitParents) throws IOException {
    List<Pair<HRegionInfo, ServerName>> result = null;
    try {
        result = getTableRegionsAndLocations(catalogTracker, tableName, excludeOfflinedSplitParents);
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }
    return getListOfHRegionInfos(result);
}

From source file:org.apache.hadoop.hbase.wal.WALKey.java

/**
 * Wait for sequence number to be assigned &amp; return the assigned value.
 * @param maxWaitForSeqId maximum time to wait in milliseconds for sequenceid
 * @return long the new assigned sequence number
 * @throws IOException/*from  w  w w  .j a v a2  s  .com*/
 */
public long getSequenceId(final long maxWaitForSeqId) throws IOException {
    // TODO: This implementation waiting on a latch is problematic because if a higher level
    // determines we should stop or abort, there is no global list of all these blocked WALKeys
    // waiting on a sequence id; they can't be cancelled... interrupted. See getNextSequenceId.
    //
    // UPDATE: I think we can remove the timeout now we are stamping all walkeys with sequenceid,
    // even those that have failed (previously we were not... so they would just hang out...).
    // St.Ack 20150910
    try {
        if (maxWaitForSeqId < 0) {
            this.seqNumAssignedLatch.await();
        } else if (!this.seqNumAssignedLatch.await(maxWaitForSeqId, TimeUnit.MILLISECONDS)) {
            throw new TimeoutIOException("Failed to get sequenceid after " + maxWaitForSeqId
                    + "ms; WAL system stuck or has gone away?");
        }
    } catch (InterruptedException ie) {
        LOG.warn("Thread interrupted waiting for next log sequence number");
        InterruptedIOException iie = new InterruptedIOException();
        iie.initCause(ie);
        throw iie;
    }
    return this.logSeqNum;
}