Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException(String s) 

Source Link

Document

Constructs an InterruptedIOException with the specified detail message.

Usage

From source file:com.sshtools.j2ssh.io.DynamicBuffer.java

/**
 * Read a byte from the buffer/*  w  w w.  j av a 2 s.c o m*/
 *
 * @return
 *
 * @throws IOException
 * @throws InterruptedIOException
 */
protected synchronized int read() throws IOException {
    try {
        block();
    } catch (InterruptedException ex) {
        throw new InterruptedIOException("The blocking operation was interrupted");
    }

    if (closed && available() <= 0) {
        return -1;
    }

    return (int) buf[readpos++];

}

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogFactory.java

public static HLog.Reader createReader(final FileSystem fs, final Path path, Configuration conf,
        CancelableProgressable reporter, boolean allowCustom) throws IOException {
    if (allowCustom && (logReaderClass == null)) {
        logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
                Reader.class);
    }//from  w  w  w  .  j  av a2s  .c o  m
    Class<? extends Reader> lrClass = allowCustom ? logReaderClass : ProtobufLogReader.class;

    try {
        // A hlog file could be under recovery, so it may take several
        // tries to get it open. Instead of claiming it is corrupted, retry
        // to open it up to 5 minutes by default.
        long startWaiting = EnvironmentEdgeManager.currentTimeMillis();
        long openTimeout = conf.getInt("hbase.hlog.open.timeout", 300000) + startWaiting;
        int nbAttempt = 0;
        while (true) {
            try {
                if (lrClass != ProtobufLogReader.class) {
                    // User is overriding the WAL reader, let them.
                    HLog.Reader reader = lrClass.newInstance();
                    reader.init(fs, path, conf, null);
                    return reader;
                } else {
                    FSDataInputStream stream = fs.open(path);
                    // Note that zero-length file will fail to read PB magic, and attempt to create
                    // a non-PB reader and fail the same way existing code expects it to. If we get
                    // rid of the old reader entirely, we need to handle 0-size files differently from
                    // merely non-PB files.
                    byte[] magic = new byte[ProtobufLogReader.PB_WAL_MAGIC.length];
                    boolean isPbWal = (stream.read(magic) == magic.length)
                            && Arrays.equals(magic, ProtobufLogReader.PB_WAL_MAGIC);
                    HLog.Reader reader = isPbWal ? new ProtobufLogReader() : new SequenceFileLogReader();
                    reader.init(fs, path, conf, stream);
                    return reader;
                }
            } catch (IOException e) {
                String msg = e.getMessage();
                if (msg != null && (msg.contains("Cannot obtain block length")
                        || msg.contains("Could not obtain the last block")
                        || msg.matches("Blocklist for [^ ]* has changed.*"))) {
                    if (++nbAttempt == 1) {
                        LOG.warn("Lease should have recovered. This is not expected. Will retry", e);
                    }
                    if (reporter != null && !reporter.progress()) {
                        throw new InterruptedIOException("Operation is cancelled");
                    }
                    if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTimeMillis()) {
                        LOG.error("Can't open after " + nbAttempt + " attempts and "
                                + (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms " + " for "
                                + path);
                    } else {
                        try {
                            Thread.sleep(nbAttempt < 3 ? 500 : 1000);
                            continue; // retry
                        } catch (InterruptedException ie) {
                            InterruptedIOException iioe = new InterruptedIOException();
                            iioe.initCause(ie);
                            throw iioe;
                        }
                    }
                }
                throw e;
            }
        }
    } catch (IOException ie) {
        throw ie;
    } catch (Exception e) {
        throw new IOException("Cannot get log reader", e);
    }
}

From source file:com.sshtools.j2ssh.connection.ChannelInputStream.java

/**
 *
 *
 * @param b//from  w ww.j  a  va 2 s .c o  m
 * @param off
 * @param len
 *
 * @return
 *
 * @throws IOException
 * @throws IOException
 */
public int read(byte[] b, int off, int len) throws IOException {
    try {
        block();

        int actual = available();

        if (actual > len) {
            actual = len;
        }

        if (actual > 0) {
            System.arraycopy(msgdata, currentPos, b, off, actual);
            currentPos += actual;
        }

        return actual;
    } catch (MessageStoreEOFException mse) {
        return -1;
    } catch (InterruptedException ex) {
        throw new InterruptedIOException("The thread was interrupted whilst waiting for channel data");
    }
}

From source file:org.apache.cxf.transport.http.asyncclient.SharedInputBuffer.java

protected void waitForData(int waitPos) throws IOException {
    this.lock.lock();
    try {/*ww  w .  j  a v  a 2s  .  c om*/
        try {
            while (true) {
                if (this.waitingBuffer != null && this.waitingBuffer.position() > waitPos) {
                    return;
                }
                if (super.hasData()) {
                    return;
                }
                if (this.endOfStream) {
                    return;
                }
                if (this.shutdown) {
                    throw new InterruptedIOException("Input operation aborted");
                }
                if (this.ioctrl != null) {
                    this.ioctrl.requestInput();
                }
                this.condition.await();
            }
        } catch (InterruptedException ex) {
            throw new IOException("Interrupted while waiting for more data");
        }
    } finally {
        this.lock.unlock();
    }
}

From source file:org.apache.hadoop.hbase.client.RpcRetryingCaller.java

/**
 * Retries if invocation fails.//from   w  w  w .jav  a 2 s  .  c  om
 * @param callTimeout Timeout for this call
 * @param callable The {@link RetryingCallable} to run.
 * @return an object of type T
 * @throws IOException if a remote or network exception occurs
 * @throws RuntimeException other unspecified error
 */
public T callWithRetries(RetryingCallable<T> callable, int callTimeout) throws IOException, RuntimeException {
    List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions = new ArrayList<RetriesExhaustedException.ThrowableWithExtraContext>();
    this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis();
    for (int tries = 0;; tries++) {
        long expectedSleep;
        try {
            callable.prepare(tries != 0); // if called with false, check table status on ZK
            return callable.call(getRemainingTime(callTimeout));
        } catch (Throwable t) {
            ExceptionUtil.rethrowIfInterrupt(t);
            if (LOG.isTraceEnabled()) {
                LOG.trace(
                        "Call exception, tries=" + tries + ", retries=" + retries + ", retryTime="
                                + (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + "ms",
                        t);
            }
            // translateException throws exception when should not retry: i.e. when request is bad.
            t = translateException(t);
            callable.throwable(t, retries != 1);
            RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(
                    t, EnvironmentEdgeManager.currentTimeMillis(), toString());
            exceptions.add(qt);
            if (tries >= retries - 1) {
                throw new RetriesExhaustedException(tries, exceptions);
            }
            // If the server is dead, we need to wait a little before retrying, to give
            //  a chance to the regions to be
            // tries hasn't been bumped up yet so we use "tries + 1" to get right pause time
            expectedSleep = callable.sleep(pause, tries + 1);

            // If, after the planned sleep, there won't be enough time left, we stop now.
            long duration = singleCallDuration(expectedSleep);
            if (duration > callTimeout) {
                String msg = "callTimeout=" + callTimeout + ", callDuration=" + duration + ": "
                        + callable.getExceptionMessageAdditionalDetail();
                throw (SocketTimeoutException) (new SocketTimeoutException(msg).initCause(t));
            }
        }
        try {
            Thread.sleep(expectedSleep);
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Interrupted after " + tries + " tries  on " + retries);
        }
    }
}

From source file:com.marklogic.contentpump.utilities.TransformHelper.java

/**
 * for Import all file types except archive.
 *  /*  w  w  w.  ja v a  2 s . c  om*/
 * @param conf
 * @param query
 * @param moduleUri
 * @param functionNs
 * @param functionName
 * @param functionParam
 * @param uri
 * @param value
 * @param type
 * @param cOptions
 * @return
 * @throws InterruptedIOException
 * @throws UnsupportedEncodingException
 */
public static AdhocQuery getTransformInsertQry(Configuration conf, AdhocQuery query, String moduleUri,
        String functionNs, String functionName, String functionParam, String uri, Object value, String type,
        ContentCreateOptions cOptions) throws InterruptedIOException, UnsupportedEncodingException {
    HashMap<String, String> optionsMap = new HashMap<String, String>();

    query.setNewStringVariable("URI", uri);
    ContentType contentType = ContentType.valueOf(type);
    if (contentType == ContentType.MIXED) {
        // get type from mimetype map
        contentType = ContentType.forName(getTypeFromMap(uri));
    }

    switch (contentType) {
    case BINARY:
        query.setNewVariable("CONTENT", ValueType.XS_BASE64_BINARY,
                Base64.encodeBytes(((BytesWritable) value).getBytes(), 0, ((BytesWritable) value).getLength()));
        optionsMap.put("value-type", ValueType.XS_BASE64_BINARY.toString());
        break;

    case TEXT:
        if (value instanceof BytesWritable) {
            // in MIXED type, value is byteswritable
            String encoding = cOptions.getEncoding();
            query.setNewStringVariable("CONTENT", new String(((BytesWritable) value).getBytes(), 0,
                    ((BytesWritable) value).getLength(), encoding));
        } else {
            // must be text or xml
            query.setNewStringVariable("CONTENT", ((Text) value).toString());
        }
        optionsMap.put("value-type", ValueType.TEXT.toString());
        break;
    case JSON:
    case XML:
        if (value instanceof BytesWritable) {
            // in MIXED type, value is byteswritable
            String encoding = cOptions.getEncoding();
            query.setNewStringVariable("CONTENT", new String(((BytesWritable) value).getBytes(), 0,
                    ((BytesWritable) value).getLength(), encoding));
        } else if (value instanceof RDFWritable) {
            //RDFWritable's value is Text
            query.setNewStringVariable("CONTENT", ((RDFWritable) value).getValue().toString());
        } else if (value instanceof ContentWithFileNameWritable) {
            query.setNewStringVariable("CONTENT", ((ContentWithFileNameWritable) value).getValue().toString());
        } else {
            // must be text or xml
            query.setNewStringVariable("CONTENT", ((Text) value).toString());
        }
        optionsMap.put("value-type", ValueType.XS_STRING.toString());
        break;
    case MIXED:
    case UNKNOWN:
        throw new InterruptedIOException("Unexpected:" + contentType);
    default:
        throw new UnsupportedOperationException("invalid type:" + contentType);
    }
    String namespace = cOptions.getNamespace();
    if (namespace != null) {
        optionsMap.put("namespace", namespace);
    }
    String lang = cOptions.getLanguage();
    if (lang != null) {
        optionsMap.put("language", "default-language=" + lang);
    }
    ContentPermission[] perms = cOptions.getPermissions();
    StringBuilder rolesReadList = new StringBuilder();
    StringBuilder rolesExeList = new StringBuilder();
    StringBuilder rolesUpdateList = new StringBuilder();
    StringBuilder rolesInsertList = new StringBuilder();
    if (perms != null && perms.length > 0) {
        for (ContentPermission cp : perms) {
            String roleName = cp.getRole();
            if (roleName == null || roleName.isEmpty()) {
                LOG.error("Illegal role name: " + roleName);
                continue;
            }
            ContentCapability cc = cp.getCapability();
            if (cc.equals(ContentCapability.READ)) {
                if (rolesReadList.length() != 0) {
                    rolesReadList.append(",");
                }
                rolesReadList.append(roleName);
            } else if (cc.equals(ContentCapability.EXECUTE)) {
                if (rolesExeList.length() != 0) {
                    rolesExeList.append(",");
                }
                rolesExeList.append(roleName);
            } else if (cc.equals(ContentCapability.INSERT)) {
                if (rolesInsertList.length() != 0) {
                    rolesInsertList.append(",");
                }
                rolesInsertList.append(roleName);
            } else if (cc.equals(ContentCapability.UPDATE)) {
                if (rolesUpdateList.length() != 0) {
                    rolesUpdateList.append(",");
                }
                rolesUpdateList.append(roleName);
            }
        }
    }
    optionsMap.put("roles-read", rolesReadList.toString());
    optionsMap.put("roles-execute", rolesExeList.toString());
    optionsMap.put("roles-update", rolesUpdateList.toString());
    optionsMap.put("roles-insert", rolesInsertList.toString());

    String[] collections = cOptions.getCollections();
    StringBuilder sb = new StringBuilder();
    if (collections != null || value instanceof ContentWithFileNameWritable) {
        if (collections != null) {
            for (int i = 0; i < collections.length; i++) {
                if (i != 0)
                    sb.append(",");
                sb.append(collections[i].trim());
            }
        }

        if (value instanceof ContentWithFileNameWritable) {
            if (collections != null)
                sb.append(",");
            sb.append(((ContentWithFileNameWritable) value).getFileName());
        }

        optionsMap.put("collections", sb.toString());
    }

    optionsMap.put("quality", String.valueOf(cOptions.getQuality()));
    DocumentRepairLevel repairLevel = cOptions.getRepairLevel();
    if (!DocumentRepairLevel.DEFAULT.equals(repairLevel)) {
        optionsMap.put("xml-repair-level", "repair-" + repairLevel);
    }

    String optionElem = mapToElement(optionsMap);
    query.setNewVariable("INSERT-OPTIONS", ValueType.ELEMENT, optionElem);
    return query;
}

From source file:org.apache.hadoop.hdfs.DFSOutputStream.java

/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno,
        boolean lastPacketInBlock) throws InterruptedIOException {
    final byte[] buf;
    final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

    try {//w w  w  .j  a  v a2 s .c  om
        buf = byteArrayManager.newByteArray(bufferSize);
    } catch (InterruptedException ie) {
        final InterruptedIOException iioe = new InterruptedIOException("seqno=" + seqno);
        iioe.initCause(ie);
        throw iioe;
    }

    return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno, checksum.getChecksumSize(),
            lastPacketInBlock);
}

From source file:org.apache.hadoop.hbase.catalog.MetaEditor.java

/**
 * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
 * @param ct CatalogTracker on whose back we will ride the edit.
 * @param mutations Puts and Deletes to execute on hbase:meta
 * @throws IOException/*from  w  w w .j  a v a2  s. c o m*/
 */
public static void mutateMetaTable(final CatalogTracker ct, final List<Mutation> mutations) throws IOException {
    HTable t = MetaReader.getMetaHTable(ct);
    try {
        t.batch(mutations);
    } catch (InterruptedException e) {
        InterruptedIOException ie = new InterruptedIOException(e.getMessage());
        ie.initCause(e);
        throw ie;
    } finally {
        t.close();
    }
}

From source file:org.apache.hadoop.hbase.util.ModifyRegionUtils.java

/**
 * Create new set of regions on the specified file-system.
 * NOTE: that you should add the regions to hbase:meta after this operation.
 *
 * @param exec Thread Pool Executor//from ww  w .  j a  va 2s.  c  o m
 * @param conf {@link Configuration}
 * @param rootDir Root directory for HBase instance
 * @param tableDir table directory
 * @param hTableDescriptor description of the table
 * @param newRegions {@link HRegionInfo} that describes the regions to create
 * @param task {@link RegionFillTask} custom code to populate region after creation
 * @throws IOException
 */
public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec, final Configuration conf,
        final Path rootDir, final Path tableDir, final HTableDescriptor hTableDescriptor,
        final HRegionInfo[] newRegions, final RegionFillTask task) throws IOException {
    if (newRegions == null)
        return null;
    int regionNumber = newRegions.length;
    CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<HRegionInfo>(exec);
    List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
    for (final HRegionInfo newRegion : newRegions) {
        completionService.submit(new Callable<HRegionInfo>() {
            @Override
            public HRegionInfo call() throws IOException {
                return createRegion(conf, rootDir, tableDir, hTableDescriptor, newRegion, task);
            }
        });
    }
    try {
        // wait for all regions to finish creation
        for (int i = 0; i < regionNumber; i++) {
            Future<HRegionInfo> future = completionService.take();
            HRegionInfo regionInfo = future.get();
            regionInfos.add(regionInfo);
        }
    } catch (InterruptedException e) {
        LOG.error("Caught " + e + " during region creation");
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        throw new IOException(e);
    }
    return regionInfos;
}

From source file:com.sshtools.j2ssh.io.DynamicBuffer.java

/**
 * Read a byte array from the buffer/*from w w  w  .  j  ava2  s  . c  o m*/
 *
 * @param data
 * @param offset
 * @param len
 *
 * @return
 *
 * @throws IOException
 * @throws InterruptedIOException
 */
protected synchronized int read(byte[] data, int offset, int len) throws IOException {
    try {

        block();
    } catch (InterruptedException ex) {
        throw new InterruptedIOException("The blocking operation was interrupted");
    }

    if (closed && available() <= 0) {
        return -1;
    }

    int read = (len > (writepos - readpos)) ? (writepos - readpos) : len;
    System.arraycopy(buf, readpos, data, offset, read);
    readpos += read;

    return read;

}