Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException() 

Source Link

Document

Constructs an InterruptedIOException with null as its error detail message.

Usage

From source file:com.sa.npopa.samples.hbase.rest.client.RemoteHTable.java

@Override
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete)
        throws IOException {
    Put put = new Put(row);
    // column to check-the-value
    put.add(new KeyValue(row, family, qualifier, value));
    CellSetModel model = buildModelFromPut(put);
    StringBuilder sb = new StringBuilder();
    sb.append('/');
    sb.append(Bytes.toStringBinary(name));
    sb.append('/');
    sb.append(Bytes.toStringBinary(row));
    sb.append("?check=delete");

    for (int i = 0; i < maxRetries; i++) {
        Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
                model.createProtobufOutput());
        int code = response.getCode();
        switch (code) {
        case 200:
            return true;
        case 304: // NOT-MODIFIED
            return false;
        case 509:
            try {
                Thread.sleep(sleepTime);
            } catch (final InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }//from   w w w .j a  v a2s  .  co  m
            break;
        default:
            throw new IOException("checkAndDelete request failed with " + code);
        }
    }
    throw new IOException("checkAndDelete request timed out");
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * Sets version of file system// w  ww.  ja v a  2 s. c o  m
 *
 * @param fs filesystem object
 * @param rootdir hbase root directory
 * @param version version to set
 * @param wait time to wait for retry
 * @param retries number of times to retry before throwing an IOException
 * @throws IOException e
 */
public static void setVersion(FileSystem fs, Path rootdir, String version, int wait, int retries)
        throws IOException {
    Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
    while (true) {
        try {
            FSDataOutputStream s = fs.create(versionFile);
            s.write(toVersionByteArray(version));
            s.close();
            LOG.debug("Created version file at " + rootdir.toString() + " with version=" + version);
            return;
        } catch (IOException e) {
            if (retries > 0) {
                LOG.warn("Unable to create version file at " + rootdir.toString() + ", retrying", e);
                fs.delete(versionFile, false);
                try {
                    if (wait > 0) {
                        Thread.sleep(wait);
                    }
                } catch (InterruptedException ie) {
                    throw (InterruptedIOException) new InterruptedIOException().initCause(ie);
                }
                retries--;
            } else {
                throw e;
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.master.MasterFileSystem.java

private NavigableMap<HRegionInfo, Result> getServerUserRegions(ServerName serverName) throws IOException {
    if (!this.master.isStopped()) {
        try {//  ww w  .j  a v  a  2  s.  c o m
            this.master.getCatalogTracker().waitForMeta();
            return MetaReader.getServerUserRegions(this.master.getCatalogTracker(), serverName);
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }
    return null;
}

From source file:org.lockss.util.ObjectSerializerTester.java

/**
 * <p>Tests that an {@link InterruptedIOException} is thrown when
 * I/O fails because of an underlying/* w  ww .  j a  v  a 2s .  c om*/
 * {@link InterruptedIOException}.</p>
 * @throws Exception if an unexpected or unhandled problem arises.
 */
public void testThrowsInterruptedIOException() throws Exception {

    // Make a sample object
    final ExtMapBean obj = makeSample_ExtMapBean();

    // Define variant actions
    DoSomething[] actions = new DoSomething[] {
            // With a Writer
            new DoSomething() {
                public void doSomething(ObjectSerializer serializer) throws Exception {
                    serializer.serialize(new StringWriter() {
                        public void write(char[] cbuf, int off, int len) {
                            throw new RuntimeException(new InterruptedIOException());
                        }
                    }, obj);
                }
            },
            // With an OutputStream
            new DoSomething() {
                public void doSomething(ObjectSerializer serializer) throws Exception {
                    serializer.serialize(new ByteArrayOutputStream() {
                        public synchronized void write(byte[] b, int off, int len) {
                            throw new RuntimeException(new InterruptedIOException());
                        }
                    }, obj);
                }
            },
            // With a Reader
            new DoSomething() {
                public void doSomething(ObjectSerializer serializer) throws Exception {
                    serializer.deserialize(new StringReader("") {
                        public int read(char[] cbuf, int off, int len) throws IOException {
                            throw new InterruptedIOException();
                        }
                    });
                }
            },
            // With an InputStream
            new DoSomething() {
                public void doSomething(ObjectSerializer serializer) throws Exception {
                    serializer.deserialize(new StringInputStream("") {
                        public int read(byte[] b, int off, int len) throws IOException {
                            throw new InterruptedIOException();
                        }
                    });
                }
            }, };

    // For each variant action...
    for (int action = 0; action < actions.length; ++action) {
        logger.debug("Begin with action " + action);

        // For each type of serializer...
        ObjectSerializer[] serializers = getObjectSerializers_ExtMapBean();
        for (int serializer = 0; serializer < serializers.length; ++serializer) {
            logger.debug("Begin with deserializer " + serializer);

            try {
                // Perform variant action
                actions[action].doSomething(serializers[serializer]);
                fail("Should have thrown InterruptedIOException (" + action + "," + serializer + ")");
            } catch (InterruptedIOException ignore) {
                // success
            } catch (RuntimeException re) {
                if (re.getCause() != null && re.getCause() instanceof InterruptedIOException) {
                    fail("Should have thrown InterruptedIOException (" + action + "," + serializer + ")");
                } else {
                    // success
                }
            }
        }
    }

}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.java

private IOException convertInterruptedExceptionToIOException(final InterruptedException ie) {
    Thread.currentThread().interrupt();
    IOException ioe = new InterruptedIOException();
    ioe.initCause(ie);// w  w w.  jav  a  2s .co m
    return ioe;
}

From source file:org.apache.hadoop.hbase.master.SplitLogManager.java

/**
 * It removes stale recovering regions under /hbase/recovering-regions/[encoded region name]
 * during master initialization phase.//from  www. j  ava2 s  . c o  m
 * @param failedServers A set of known failed servers
 * @throws KeeperException
 */
void removeStaleRecoveringRegionsFromZK(final Set<ServerName> failedServers)
        throws KeeperException, InterruptedIOException {

    if (!this.distributedLogReplay) {
        // remove any regions in recovery from ZK which could happen when we turn the feature on
        // and later turn it off
        ZKUtil.deleteChildrenRecursively(watcher, watcher.recoveringRegionsZNode);
        // the function is only used in distributedLogReplay mode when master is in initialization
        return;
    }

    Set<String> knownFailedServers = new HashSet<String>();
    if (failedServers != null) {
        for (ServerName tmpServerName : failedServers) {
            knownFailedServers.add(tmpServerName.getServerName());
        }
    }

    this.recoveringRegionLock.lock();
    try {
        List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.splitLogZNode);
        if (tasks != null) {
            for (String t : tasks) {
                byte[] data;
                try {
                    data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.splitLogZNode, t));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException();
                }
                if (data != null) {
                    SplitLogTask slt = null;
                    try {
                        slt = SplitLogTask.parseFrom(data);
                    } catch (DeserializationException e) {
                        LOG.warn("Failed parse data for znode " + t, e);
                    }
                    if (slt != null && slt.isDone()) {
                        continue;
                    }
                }
                // decode the file name
                t = ZKSplitLog.getFileName(t);
                ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(new Path(t));
                if (serverName != null) {
                    knownFailedServers.add(serverName.getServerName());
                } else {
                    LOG.warn("Found invalid WAL log file name:" + t);
                }
            }
        }

        // remove recovering regions which doesn't have any RS associated with it
        List<String> regions = ZKUtil.listChildrenNoWatch(watcher, watcher.recoveringRegionsZNode);
        if (regions != null) {
            for (String region : regions) {
                String nodePath = ZKUtil.joinZNode(watcher.recoveringRegionsZNode, region);
                List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(watcher, nodePath);
                if (regionFailedServers == null || regionFailedServers.isEmpty()) {
                    ZKUtil.deleteNode(watcher, nodePath);
                    continue;
                }
                boolean needMoreRecovery = false;
                for (String tmpFailedServer : regionFailedServers) {
                    if (knownFailedServers.contains(tmpFailedServer)) {
                        needMoreRecovery = true;
                        break;
                    }
                }
                if (!needMoreRecovery) {
                    ZKUtil.deleteNodeRecursively(watcher, nodePath);
                }
            }
        }
    } finally {
        this.recoveringRegionLock.unlock();
    }
}

From source file:org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure.java

/**
 * Create Split directory//from   w w  w . jav  a 2 s .c o m
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs)
        throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    final Map<String, Collection<StoreFileInfo>> files = new HashMap<String, Collection<StoreFileInfo>>(
            regionFs.getFamilies().size());
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> sfis = regionFs.getStoreFiles(family);
        if (sfis == null)
            continue;
        Collection<StoreFileInfo> filteredSfis = null;
        for (StoreFileInfo sfi : sfis) {
            // Filter. There is a lag cleaning up compacted reference files. They get cleared
            // after a delay in case outstanding Scanners still have references. Because of this,
            // the listing of the Store content may have straggler reference files. Skip these.
            // It should be safe to skip references at this point because we checked above with
            // the region if it thinks it is splittable and if we are here, it thinks it is
            // splitable.
            if (sfi.isReference()) {
                LOG.info("Skipping split of " + sfi + "; presuming ready for archiving.");
                continue;
            }
            if (filteredSfis == null) {
                filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
                files.put(family, filteredSfis);
            }
            filteredSfis.add(sfi);
            nbFiles++;
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(
            conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX,
                    conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)),
            nbFiles);
    LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region="
            + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads);
    final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads,
            Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) {
        byte[] familyName = Bytes.toBytes(e.getKey());
        final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
        final Collection<StoreFileInfo> storeFiles = e.getValue();
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, new HStoreFile(
                        mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("pid=" + getProcId() + " split storefiles for region " + getParentRegion().getShortNameToLog()
                + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<Integer, Integer>(daughterA, daughterB);
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * Checks that a cluster ID file exists in the HBase root directory
 * @param fs the root directory FileSystem
 * @param rootdir the HBase root directory in HDFS
 * @param wait how long to wait between retries
 * @return <code>true</code> if the file exists, otherwise <code>false</code>
 * @throws IOException if checking the FileSystem fails
 *//*from  www  .  j a  v a2s .  c om*/
public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, int wait) throws IOException {
    while (true) {
        try {
            Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
            return fs.exists(filePath);
        } catch (IOException ioe) {
            if (wait > 0) {
                LOG.warn("Unable to check cluster ID file in " + rootdir.toString() + ", retrying in " + wait
                        + "msec: " + StringUtils.stringifyException(ioe));
                try {
                    Thread.sleep(wait);
                } catch (InterruptedException e) {
                    throw (InterruptedIOException) new InterruptedIOException().initCause(e);
                }
            } else {
                throw ioe;
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.java

/**
 * Create znodes /hbase/recovering-regions/[region_ids...]/[failed region server names ...] for
 * all regions of the passed in region servers
 * @param serverName the name of a region server
 * @param userRegions user regiones assigned on the region server
 *///from   w  w w .  j  a  v  a 2s. co  m
@Override
public void markRegionsRecovering(final ServerName serverName, Set<HRegionInfo> userRegions)
        throws IOException, InterruptedIOException {
    this.lastRecoveringNodeCreationTime = EnvironmentEdgeManager.currentTime();
    for (HRegionInfo region : userRegions) {
        String regionEncodeName = region.getEncodedName();
        long retries = this.zkretries;

        do {
            String nodePath = ZKUtil.joinZNode(watcher.recoveringRegionsZNode, regionEncodeName);
            long lastRecordedFlushedSequenceId = -1;
            try {
                long lastSequenceId = this.details.getMaster().getServerManager()
                        .getLastFlushedSequenceId(regionEncodeName.getBytes()).getLastFlushedSequenceId();

                /*
                 * znode layout: .../region_id[last known flushed sequence id]/failed server[last known
                 * flushed sequence id for the server]
                 */
                byte[] data = ZKUtil.getData(this.watcher, nodePath);
                if (data == null) {
                    ZKUtil.createSetData(this.watcher, nodePath, ZKUtil.positionToByteArray(lastSequenceId));
                } else {
                    lastRecordedFlushedSequenceId = ZKSplitLog.parseLastFlushedSequenceIdFrom(data);
                    if (lastRecordedFlushedSequenceId < lastSequenceId) {
                        // update last flushed sequence id in the region level
                        ZKUtil.setData(this.watcher, nodePath, ZKUtil.positionToByteArray(lastSequenceId));
                    }
                }
                // go one level deeper with server name
                nodePath = ZKUtil.joinZNode(nodePath, serverName.getServerName());
                if (lastSequenceId <= lastRecordedFlushedSequenceId) {
                    // the newly assigned RS failed even before any flush to the region
                    lastSequenceId = lastRecordedFlushedSequenceId;
                }
                ZKUtil.createSetData(this.watcher, nodePath,
                        ZKUtil.regionSequenceIdsToByteArray(lastSequenceId, null));
                if (LOG.isDebugEnabled()) {
                    LOG.debug(
                            "Marked " + regionEncodeName + " recovering from " + serverName + ": " + nodePath);
                }
                // break retry loop
                break;
            } catch (KeeperException e) {
                // ignore ZooKeeper exceptions inside retry loop
                if (retries <= 1) {
                    throw new IOException(e);
                }
                // wait a little bit for retry
                try {
                    Thread.sleep(20);
                } catch (InterruptedException e1) {
                    throw new InterruptedIOException();
                }
            } catch (InterruptedException e) {
                throw new InterruptedIOException();
            }
        } while ((--retries) > 0);
    }
}

From source file:org.apache.hadoop.hbase.regionserver.StoreScanner.java

/**
 * Seek storefiles in parallel to optimize IO latency as much as possible
 * @param scanners the list {@link KeyValueScanner}s to be read from
 * @param kv the KeyValue on which the operation is being requested
 * @throws IOException/*from w  w  w  .  j  a v a 2s  .  c  om*/
 */
private void parallelSeek(final List<? extends KeyValueScanner> scanners, final Cell kv) throws IOException {
    if (scanners.isEmpty())
        return;
    int storeFileScannerCount = scanners.size();
    CountDownLatch latch = new CountDownLatch(storeFileScannerCount);
    List<ParallelSeekHandler> handlers = new ArrayList<ParallelSeekHandler>(storeFileScannerCount);
    for (KeyValueScanner scanner : scanners) {
        if (scanner instanceof StoreFileScanner) {
            ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, this.readPt, latch);
            executor.submit(seekHandler);
            handlers.add(seekHandler);
        } else {
            scanner.seek(kv);
            latch.countDown();
        }
    }

    try {
        latch.await();
    } catch (InterruptedException ie) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(ie);
    }

    for (ParallelSeekHandler handler : handlers) {
        if (handler.getErr() != null) {
            throw new IOException(handler.getErr());
        }
    }
}