Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException() 

Source Link

Document

Constructs an InterruptedIOException with null as its error detail message.

Usage

From source file:org.apache.hadoop.hbase.client.HTable.java

/**
 * {@inheritDoc}//from   w w w.  j  a v a 2s  .co  m
 */
@Override
public Result[] get(List<Get> gets) throws IOException {
    if (gets.size() == 1) {
        return new Result[] { get(gets.get(0)) };
    }
    try {
        Object[] r1 = batch((List) gets);

        // translate.
        Result[] results = new Result[r1.length];
        int i = 0;
        for (Object o : r1) {
            // batch ensures if there is a failure we get an exception instead
            results[i++] = (Result) o;
        }

        return results;
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }
}

From source file:org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.java

/**
 * This function is to set recovery mode from outstanding split log tasks from before or current
 * configuration setting//from  w  w  w.j  a  v  a 2 s .c  om
 * @param isForInitialization
 * @throws IOException
 */
@Override
public void setRecoveryMode(boolean isForInitialization) throws IOException {
    synchronized (this) {
        if (this.isDrainingDone) {
            // when there is no outstanding splitlogtask after master start up, we already have up to
            // date recovery mode
            return;
        }
    }
    if (this.watcher == null) {
        // when watcher is null(testing code) and recovery mode can only be LOG_SPLITTING
        synchronized (this) {
            this.isDrainingDone = true;
            this.recoveryMode = RecoveryMode.LOG_SPLITTING;
        }
        return;
    }
    boolean hasSplitLogTask = false;
    boolean hasRecoveringRegions = false;
    RecoveryMode previousRecoveryMode = RecoveryMode.UNKNOWN;
    RecoveryMode recoveryModeInConfig = (isDistributedLogReplay(conf)) ? RecoveryMode.LOG_REPLAY
            : RecoveryMode.LOG_SPLITTING;

    // Firstly check if there are outstanding recovering regions
    try {
        List<String> regions = ZKUtil.listChildrenNoWatch(watcher, watcher.recoveringRegionsZNode);
        if (regions != null && !regions.isEmpty()) {
            hasRecoveringRegions = true;
            previousRecoveryMode = RecoveryMode.LOG_REPLAY;
        }
        if (previousRecoveryMode == RecoveryMode.UNKNOWN) {
            // Secondly check if there are outstanding split log task
            List<String> tasks = listSplitLogTasks();
            if (!tasks.isEmpty()) {
                hasSplitLogTask = true;
                if (isForInitialization) {
                    // during initialization, try to get recovery mode from splitlogtask
                    int listSize = tasks.size();
                    for (int i = 0; i < listSize; i++) {
                        String task = tasks.get(i);
                        try {
                            byte[] data = ZKUtil.getData(this.watcher,
                                    ZKUtil.joinZNode(watcher.splitLogZNode, task));
                            if (data == null)
                                continue;
                            SplitLogTask slt = SplitLogTask.parseFrom(data);
                            previousRecoveryMode = slt.getMode();
                            if (previousRecoveryMode == RecoveryMode.UNKNOWN) {
                                // created by old code base where we don't set recovery mode in splitlogtask
                                // we can safely set to LOG_SPLITTING because we're in master initialization code
                                // before SSH is enabled & there is no outstanding recovering regions
                                previousRecoveryMode = RecoveryMode.LOG_SPLITTING;
                            }
                            break;
                        } catch (DeserializationException e) {
                            LOG.warn("Failed parse data for znode " + task, e);
                        } catch (InterruptedException e) {
                            throw new InterruptedIOException();
                        }
                    }
                }
            }
        }
    } catch (KeeperException e) {
        throw new IOException(e);
    }

    synchronized (this) {
        if (this.isDrainingDone) {
            return;
        }
        if (!hasSplitLogTask && !hasRecoveringRegions) {
            this.isDrainingDone = true;
            this.recoveryMode = recoveryModeInConfig;
            return;
        } else if (!isForInitialization) {
            // splitlogtask hasn't drained yet, keep existing recovery mode
            return;
        }

        if (previousRecoveryMode != RecoveryMode.UNKNOWN) {
            this.isDrainingDone = (previousRecoveryMode == recoveryModeInConfig);
            this.recoveryMode = previousRecoveryMode;
        } else {
            this.recoveryMode = recoveryModeInConfig;
        }
    }
}

From source file:org.apache.hadoop.hbase.client.HTable.java

/**
 * {@inheritDoc}/*www .  j  a  va2  s .  c o m*/
 */
@Override
public void delete(final List<Delete> deletes) throws IOException {
    Object[] results = new Object[deletes.size()];
    try {
        batch(deletes, results);
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    } finally {
        // mutate list so that it is empty for complete success, or contains only failed records
        // results are returned in the same order as the requests in list
        // walk the list backwards, so we can remove from list without impacting the indexes of earlier members
        for (int i = results.length - 1; i >= 0; i--) {
            // if result is not null, it succeeded
            if (results[i] instanceof Result) {
                deletes.remove(i);
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HRegionFileSystem.java

/**
 * Creates a directory. Assumes the user has already checked for this directory existence.
 * @param dir/*from w ww.  j  a v a2s . com*/
 * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
 *         whether the directory exists or not, and returns true if it exists.
 * @throws IOException
 */
boolean createDir(Path dir) throws IOException {
    int i = 0;
    IOException lastIOE = null;
    do {
        try {
            return fs.mkdirs(dir);
        } catch (IOException ioe) {
            lastIOE = ioe;
            if (fs.exists(dir))
                return true; // directory is present
            try {
                sleepBeforeRetry("Create Directory", i + 1);
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }
        }
    } while (++i <= hdfsClientRetriesNumber);
    throw new IOException("Exception in createDir", lastIOE);
}

From source file:org.apache.hadoop.hdfs.server.namenode.Standby.java

/**
 * Creates image upload thread. /*from www . j a v a 2 s . c o m*/
 */
private void uploadImage(long txid) throws IOException {
    final long start = AvatarNode.now();
    LOG.info("Standby: Checkpointing - Upload fsimage to remote namenode.");
    checkpointStatus("Image upload started");

    imageUploader = new ImageUploader(txid);
    imageUploader.start();

    // wait for the upload to complete   
    while (running && !imageUploader.done && AvatarNode.now() - start < MAX_CHECKPOINT_UPLOAD_TIMEOUT) {
        try {
            imageUploader.join(3000);
        } catch (InterruptedException ie) {
            LOG.error("Reveived interruption when uploading image for txid: " + txid);
            Thread.currentThread().interrupt();
            throw (IOException) new InterruptedIOException().initCause(ie);
        }
    }
    if (!running || !imageUploader.succeeded) {
        InjectionHandler.processEvent(InjectionEvent.STANDBY_UPLOAD_FAIL);
        throw new IOException(
                "Standby: Checkpointing - Image upload failed (time= " + (AvatarNode.now() - start) + " ms).",
                imageUploader.error);
    }
    imageUploader = null;
    LOG.info("Standby: Checkpointing - Upload fsimage to remote namenode DONE.");
    checkpointStatus("Image upload completed");
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status)
        throws IOException, UnsupportedEncodingException {
    // Load in all the HStores.

    long maxSeqId = -1;
    // initialized to -1 so that we pick up MemstoreTS from column families
    long maxMemstoreTS = -1;

    if (!htableDescriptor.getFamilies().isEmpty()) {
        // initialize the thread pool for opening stores in parallel.
        ThreadPoolExecutor storeOpenerThreadPool = getStoreOpenAndCloseThreadPool(
                "StoreOpener-" + this.getRegionInfo().getShortNameToLog());
        CompletionService<HStore> completionService = new ExecutorCompletionService<HStore>(
                storeOpenerThreadPool);/*w  ww.j  av a 2s .c om*/

        // initialize each store in parallel
        for (final HColumnDescriptor family : htableDescriptor.getFamilies()) {
            status.setStatus("Instantiating store for column family " + family);
            completionService.submit(new Callable<HStore>() {
                @Override
                public HStore call() throws IOException {
                    return instantiateHStore(family);
                }
            });
        }
        boolean allStoresOpened = false;
        try {
            for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) {
                Future<HStore> future = completionService.take();
                HStore store = future.get();
                this.stores.put(store.getColumnFamilyName().getBytes(), store);

                long storeMaxSequenceId = store.getMaxSequenceId();
                maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId);
                if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
                    maxSeqId = storeMaxSequenceId;
                }
                long maxStoreMemstoreTS = store.getMaxMemstoreTS();
                if (maxStoreMemstoreTS > maxMemstoreTS) {
                    maxMemstoreTS = maxStoreMemstoreTS;
                }
            }
            allStoresOpened = true;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e.getCause());
        } finally {
            storeOpenerThreadPool.shutdownNow();
            if (!allStoresOpened) {
                // something went wrong, close all opened stores
                LOG.error("Could not initialize all stores for the region=" + this);
                for (Store store : this.stores.values()) {
                    try {
                        store.close();
                    } catch (IOException e) {
                        LOG.warn(e.getMessage());
                    }
                }
            }
        }
    }
    mvcc.initialize(maxMemstoreTS + 1);
    // Recover any edits if available.
    maxSeqId = Math.max(maxSeqId,
            replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
    return maxSeqId;
}

From source file:org.apache.hadoop.hbase.regionserver.HRegionFileSystem.java

/**
 * Renames a directory. Assumes the user has already checked for this directory existence.
 * @param srcpath/* ww w.ja  v a 2  s  .c om*/
 * @param dstPath
 * @return true if rename is successful.
 * @throws IOException
 */
boolean rename(Path srcpath, Path dstPath) throws IOException {
    IOException lastIOE = null;
    int i = 0;
    do {
        try {
            return fs.rename(srcpath, dstPath);
        } catch (IOException ioe) {
            lastIOE = ioe;
            if (!fs.exists(srcpath) && fs.exists(dstPath))
                return true; // successful move
            // dir is not there, retry after some time.
            try {
                sleepBeforeRetry("Rename Directory", i + 1);
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }
        }
    } while (++i <= hdfsClientRetriesNumber);

    throw new IOException("Exception in rename", lastIOE);
}

From source file:org.apache.hadoop.hbase.regionserver.HRegionFileSystem.java

/**
 * Deletes a directory. Assumes the user has already checked for this directory existence.
 * @param dir/*w ww  .  ja va 2s  .  c om*/
 * @return true if the directory is deleted.
 * @throws IOException
 */
boolean deleteDir(Path dir) throws IOException {
    IOException lastIOE = null;
    int i = 0;
    do {
        try {
            return fs.delete(dir, true);
        } catch (IOException ioe) {
            lastIOE = ioe;
            if (!fs.exists(dir))
                return true;
            // dir is there, retry deleting after some time.
            try {
                sleepBeforeRetry("Delete Directory", i + 1);
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }
        }
    } while (++i <= hdfsClientRetriesNumber);

    throw new IOException("Exception in DeleteDir", lastIOE);
}

From source file:org.apache.hadoop.hbase.regionserver.HRegionFileSystem.java

/**
 * Creates a directory for a filesystem and configuration object. Assumes the user has already
 * checked for this directory existence.
 * @param fs/*from  www.  j  av a2  s  .c  o  m*/
 * @param conf
 * @param dir
 * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
 *         whether the directory exists or not, and returns true if it exists.
 * @throws IOException
 */
private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir) throws IOException {
    int i = 0;
    IOException lastIOE = null;
    int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number", DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
    int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
            DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
    do {
        try {
            return fs.mkdirs(dir);
        } catch (IOException ioe) {
            lastIOE = ioe;
            if (fs.exists(dir))
                return true; // directory is present
            try {
                sleepBeforeRetry("Create Directory", i + 1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            }
        }
    } while (++i <= hdfsClientRetriesNumber);

    throw new IOException("Exception in createDir", lastIOE);
}

From source file:org.apache.hadoop.hdfs.server.namenode.Standby.java

/**
 * Checks the status of image validation during checkpoint.
 * @throws IOException/*  ww  w  .j  ava 2s . c o m*/
 */
private void checkImageValidation() throws IOException {
    try {
        imageValidator.join();
    } catch (InterruptedException ie) {
        throw (IOException) new InterruptedIOException().initCause(ie);
    }
    if (!imageValidator.succeeded) {
        throw new IOException("Image file validation failed", imageValidator.error);
    }
}