Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException(String s) 

Source Link

Document

Constructs an InterruptedIOException with the specified detail message.

Usage

From source file:org.apache.hadoop.hbase.master.handler.CreateTableHandler.java

public CreateTableHandler prepare() throws NotAllMetaRegionsOnlineException, TableExistsException, IOException {
    int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
    // Need hbase:meta availability to create a table
    try {//  www  . j ava2  s  .  c o m
        if (catalogTracker.waitForMeta(timeout) == null) {
            throw new NotAllMetaRegionsOnlineException();
        }
    } catch (InterruptedException e) {
        LOG.warn("Interrupted waiting for meta availability", e);
        InterruptedIOException ie = new InterruptedIOException(e.getMessage());
        ie.initCause(e);
        throw ie;
    }

    //acquire the table write lock, blocking. Make sure that it is released.
    this.tableLock.acquire();
    boolean success = false;
    try {
        TableName tableName = this.hTableDescriptor.getTableName();
        if (MetaReader.tableExists(catalogTracker, tableName)) {
            throw new TableExistsException(tableName);
        }

        checkAndSetEnablingTable(assignmentManager, tableName);
        success = true;
    } finally {
        if (!success) {
            releaseTableLock();
        }
    }
    return this;
}

From source file:com.dongfang.net.http.client.multipart.content.StringBody.java

public void writeTo(final OutputStream out) throws IOException {
    if (out == null) {
        throw new IllegalArgumentException("Output stream may not be null");
    }// w  w  w . j  a  v a 2s  . c om
    InputStream in = new ByteArrayInputStream(this.content);
    byte[] tmp = new byte[4096];
    int l;
    while ((l = in.read(tmp)) != -1) {
        out.write(tmp, 0, l);
        callBackInfo.pos += l;
        if (!callBackInfo.doCallBack(false)) {
            throw new InterruptedIOException("stop");
        }
    }
    out.flush();
}

From source file:org.apache.manifoldcf.connectorcommon.common.InterruptibleSocketFactory.java

protected Socket fireOffThread(InetAddress address, int port, InetAddress localHost, int localPort)
        throws IOException {
    SocketCreateThread thread = new SocketCreateThread(wrappedFactory, address, port, localHost, localPort);
    thread.start();/*from   w  ww. ja v  a2s . c o  m*/
    try {
        // Wait for thread to complete for only a certain amount of time!
        thread.join(connectTimeoutMilliseconds);
        // If join() times out, then the thread is going to still be alive.
        if (thread.isAlive()) {
            // Kill the thread - not that this will necessarily work, but we need to try
            thread.interrupt();
            throw new ConnectTimeoutException("Secure connection timed out");
        }
        // The thread terminated.  Throw an error if there is one, otherwise return the result.
        Throwable t = thread.getException();
        if (t != null) {
            if (t instanceof java.net.SocketTimeoutException)
                throw (java.net.SocketTimeoutException) t;
            else if (t instanceof ConnectTimeoutException)
                throw (ConnectTimeoutException) t;
            else if (t instanceof InterruptedIOException)
                throw (InterruptedIOException) t;
            else if (t instanceof IOException)
                throw (IOException) t;
            else if (t instanceof Error)
                throw (Error) t;
            else if (t instanceof RuntimeException)
                throw (RuntimeException) t;
            throw new Error("Received an unexpected exception: " + t.getMessage(), t);
        }
        return thread.getResult();
    } catch (InterruptedException e) {
        throw new InterruptedIOException("Interrupted: " + e.getMessage());
    }

}

From source file:org.apache.hadoop.hbase.util.MultiHConnection.java

/**
 * Randomly pick a connection and process the batch of actions for a given table
 * @param actions the actions/* w  ww  .  ja v a2s. c o m*/
 * @param tableName table name
 * @param results the results array
 * @param callback 
 * @throws IOException
 */
@SuppressWarnings("deprecation")
public <R> void processBatchCallback(List<? extends Row> actions, TableName tableName, Object[] results,
        Batch.Callback<R> callback) throws IOException {
    // Currently used by RegionStateStore
    // A deprecated method is used as multiple threads accessing RegionStateStore do a single put
    // and htable is not thread safe. Alternative would be to create an Htable instance for each 
    // put but that is not very efficient.
    // See HBASE-11610 for more details.
    try {
        hConnections[ThreadLocalRandom.current().nextInt(noOfConnections)].processBatchCallback(actions,
                tableName, this.batchPool, results, callback);
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    }
}

From source file:com.sshtools.j2ssh.connection.ChannelInputStream.java

/**
 *
 *
 * @return//from w  ww  .ja va2s.  com
 *
 * @throws java.io.IOException
 * @throws InterruptedIOException
 */
public int read() throws java.io.IOException {
    try {
        block();

        return msgdata[currentPos++] & 0xFF;
    } catch (MessageStoreEOFException mse) {
        return -1;
    } catch (InterruptedException ex) {
        throw new InterruptedIOException("The thread was interrupted whilst waiting for channel data");
    }
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV1.java

static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor,
        final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
    FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
    if (regions == null) {
        LOG.info("No regions under directory:" + snapshotDir);
        return null;
    }/*w ww  .  j  a  va2  s.c o  m*/

    final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>(
            executor);
    for (final FileStatus region : regions) {
        completionService.submit(new Callable<SnapshotRegionManifest>() {
            @Override
            public SnapshotRegionManifest call() throws IOException {
                HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath());
                return buildManifestFromDisk(conf, fs, snapshotDir, hri);
            }
        });
    }

    ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>(regions.length);
    try {
        for (int i = 0; i < regions.length; ++i) {
            regionsManifest.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
    return regionsManifest;
}

From source file:org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor.java

/**
 * Do a minor/major compaction on an explicit set of storefiles from a Store.
 *///from w ww.  j  av  a 2  s .  co  m
public List<Path> compact(final CompactionRequest request, CompactionThroughputController throughputController)
        throws IOException {
    FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles());
    this.progress = new CompactionProgress(fd.maxKeyCount);

    // Find the smallest read point across all the Scanners.
    long smallestReadPoint = getSmallestReadPoint();

    List<StoreFileScanner> scanners;
    Collection<StoreFile> readersToClose;
    if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) {
        // clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles,
        // HFileFiles, and their readers
        readersToClose = new ArrayList<StoreFile>(request.getFiles().size());
        for (StoreFile f : request.getFiles()) {
            readersToClose.add(new StoreFile(f));
        }
        scanners = createFileScanners(readersToClose, smallestReadPoint,
                store.throttleCompaction(request.getSize()));
    } else {
        readersToClose = Collections.emptyList();
        scanners = createFileScanners(request.getFiles(), smallestReadPoint,
                store.throttleCompaction(request.getSize()));
    }

    StoreFile.Writer writer = null;
    List<Path> newFiles = new ArrayList<Path>();
    boolean cleanSeqId = false;
    IOException e = null;
    try {
        InternalScanner scanner = null;
        try {
            /* Include deletes, unless we are doing a compaction of all files */
            ScanType scanType = request.isRetainDeleteMarkers() ? ScanType.COMPACT_RETAIN_DELETES
                    : ScanType.COMPACT_DROP_DELETES;
            scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners);
            if (scanner == null) {
                scanner = createScanner(store, scanners, scanType, smallestReadPoint, fd.earliestPutTs);
            }
            scanner = postCreateCoprocScanner(request, scanType, scanner);
            if (scanner == null) {
                // NULL scanner returned from coprocessor hooks means skip normal processing.
                return newFiles;
            }
            // Create the writer even if no kv(Empty store file is also ok),
            // because we need record the max seq id for the store file, see HBASE-6059
            if (fd.minSeqIdToKeep > 0) {
                smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint);
                cleanSeqId = true;
            }

            writer = createTmpWriter(fd, store.throttleCompaction(request.getSize()));
            boolean finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId,
                    throughputController, request.isAllFiles());

            if (!finished) {
                writer.close();
                store.getFileSystem().delete(writer.getPath(), false);
                writer = null;
                throw new InterruptedIOException("Aborting compaction of store " + store + " in region "
                        + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted.");
            }
        } finally {
            if (scanner != null) {
                scanner.close();
            }
        }
    } catch (IOException ioe) {
        e = ioe;
        // Throw the exception
        throw ioe;
    } finally {
        try {
            if (writer != null) {
                if (e != null) {
                    writer.close();
                } else {
                    writer.appendMetadata(fd.maxSeqId, request.isAllFiles());
                    writer.close();
                    newFiles.add(writer.getPath());
                }
            }
        } finally {
            for (StoreFile f : readersToClose) {
                try {
                    f.closeReader(true);
                } catch (IOException ioe) {
                    LOG.warn("Exception closing " + f, ioe);
                }
            }
        }
    }
    return newFiles;
}

From source file:org.eclipse.emf.mwe.internal.core.debug.communication.PackageReceiver.java

private void checkForException(final AbstractPackage packet) throws InterruptedIOException {
    if (packet != null) {
        return;// w  w w .  jav  a2 s  . c  o m
    }
    if ((exception != null) && !(exception instanceof IOException)) {
        // print stack trace if it is not an IOException
        logger.error(exception.getMessage(), exception);
    }
    if (interrupt) {
        throw new InterruptedIOException("packet receiver is going to close");
    }
    if (exception != null) {
        throw new InterruptedIOException(exception.getMessage());
    }
    throw new InterruptedIOException("timeout reading a packet");
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcServer.java

public NettyRpcServer(Server server, String name, List<BlockingServiceAndInterface> services,
        InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException {
    super(server, name, services, bindAddress, conf, scheduler);
    this.bindAddress = bindAddress;
    EventLoopGroup eventLoopGroup;//from  ww w  .  j a  va2s . com
    Class<? extends ServerChannel> channelClass;
    if (server instanceof HRegionServer) {
        NettyEventLoopGroupConfig config = ((HRegionServer) server).getEventLoopGroupConfig();
        eventLoopGroup = config.group();
        channelClass = config.serverChannelClass();
    } else {
        eventLoopGroup = new NioEventLoopGroup(0,
                new DefaultThreadFactory("NettyRpcServer", true, Thread.MAX_PRIORITY));
        channelClass = NioServerSocketChannel.class;
    }
    ServerBootstrap bootstrap = new ServerBootstrap().group(eventLoopGroup).channel(channelClass)
            .childOption(ChannelOption.TCP_NODELAY, tcpNoDelay)
            .childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive)
            .childHandler(new ChannelInitializer<Channel>() {

                @Override
                protected void initChannel(Channel ch) throws Exception {
                    ChannelPipeline pipeline = ch.pipeline();
                    FixedLengthFrameDecoder preambleDecoder = new FixedLengthFrameDecoder(6);
                    preambleDecoder.setSingleDecode(true);
                    pipeline.addLast("preambleDecoder", preambleDecoder);
                    pipeline.addLast("preambleHandler", new NettyRpcServerPreambleHandler(NettyRpcServer.this));
                    pipeline.addLast("frameDecoder",
                            new LengthFieldBasedFrameDecoder(maxRequestSize, 0, 4, 0, 4, true));
                    pipeline.addLast("decoder", new NettyRpcServerRequestDecoder(allChannels, metrics));
                    pipeline.addLast("encoder", new NettyRpcServerResponseEncoder(metrics));
                }
            });
    try {
        serverChannel = bootstrap.bind(this.bindAddress).sync().channel();
        LOG.info("NettyRpcServer bind to address=" + serverChannel.localAddress());
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    }
    initReconfigurable(conf);
    this.scheduler.init(new RpcSchedulerContext(this));
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV2.java

static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor,
        final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
    FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
        @Override//from  w ww  . ja  v  a 2s.  co  m
        public boolean accept(Path path) {
            return path.getName().startsWith(SNAPSHOT_MANIFEST_PREFIX);
        }
    });

    if (manifestFiles == null || manifestFiles.length == 0)
        return null;

    final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>(
            executor);
    for (final FileStatus st : manifestFiles) {
        completionService.submit(new Callable<SnapshotRegionManifest>() {
            @Override
            public SnapshotRegionManifest call() throws IOException {
                FSDataInputStream stream = fs.open(st.getPath());
                try {
                    return SnapshotRegionManifest.parseFrom(stream);
                } finally {
                    stream.close();
                }
            }
        });
    }

    ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>(
            manifestFiles.length);
    try {
        for (int i = 0; i < manifestFiles.length; ++i) {
            regionsManifest.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
    return regionsManifest;
}