Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException() 

Source Link

Document

Constructs an InterruptedIOException with null as its error detail message.

Usage

From source file:org.apache.hadoop.hbase.util.LoadTestTool.java

/**
 * When NUM_TABLES is specified, the function starts multiple worker threads
 * which individually start a LoadTestTool instance to load a table. Each
 * table name is in format <tn>_<index>. For example, "-tn test -num_tables 2"
 * , table names will be "test_1", "test_2"
 *
 * @throws IOException//from  w  ww.  j  a  v  a 2 s .c  o  m
 */
private int parallelLoadTables() throws IOException {
    // create new command args
    String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME);
    String[] newArgs = null;
    if (!cmd.hasOption(LoadTestTool.OPT_TABLE_NAME)) {
        newArgs = new String[cmdLineArgs.length + 2];
        newArgs[0] = "-" + LoadTestTool.OPT_TABLE_NAME;
        newArgs[1] = LoadTestTool.DEFAULT_TABLE_NAME;
        for (int i = 0; i < cmdLineArgs.length; i++) {
            newArgs[i + 2] = cmdLineArgs[i];
        }
    } else {
        newArgs = cmdLineArgs;
    }

    int tableNameValueIndex = -1;
    for (int j = 0; j < newArgs.length; j++) {
        if (newArgs[j].endsWith(OPT_TABLE_NAME)) {
            tableNameValueIndex = j + 1;
        } else if (newArgs[j].endsWith(NUM_TABLES)) {
            // change NUM_TABLES to 1 so that each worker loads one table
            newArgs[j + 1] = "1";
        }
    }

    // starting to load multiple tables
    List<WorkerThread> workers = new ArrayList<WorkerThread>();
    for (int i = 0; i < numTables; i++) {
        String[] workerArgs = newArgs.clone();
        workerArgs[tableNameValueIndex] = tableName + "_" + (i + 1);
        WorkerThread worker = new WorkerThread(i, workerArgs);
        workers.add(worker);
        LOG.info(worker + " starting");
        worker.start();
    }

    // wait for all workers finish
    LOG.info("Waiting for worker threads to finish");
    for (WorkerThread t : workers) {
        try {
            t.join();
        } catch (InterruptedException ie) {
            IOException iie = new InterruptedIOException();
            iie.initCause(ie);
            throw iie;
        }
        checkForErrors();
    }

    return EXIT_SUCCESS;
}

From source file:org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.java

/**
 * ZooKeeper implementation of//  w w w  .j  a  v a2s .  c  o m
 * {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
 */
@Override
public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)
        throws IOException, InterruptedIOException {

    try {
        List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.splitLogZNode);
        if (tasks != null) {
            int listSize = tasks.size();
            for (int i = 0; i < listSize; i++) {
                String t = tasks.get(i);
                byte[] data;
                try {
                    data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.splitLogZNode, t));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException();
                }
                if (data != null) {
                    SplitLogTask slt = null;
                    try {
                        slt = SplitLogTask.parseFrom(data);
                    } catch (DeserializationException e) {
                        LOG.warn("Failed parse data for znode " + t, e);
                    }
                    if (slt != null && slt.isDone()) {
                        continue;
                    }
                }
                // decode the file name
                t = ZKSplitLog.getFileName(t);
                ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(new Path(t));
                if (serverName != null) {
                    knownFailedServers.add(serverName.getServerName());
                } else {
                    LOG.warn("Found invalid WAL log file name:" + t);
                }
            }
        }

        // remove recovering regions which doesn't have any RS associated with it
        List<String> regions = ZKUtil.listChildrenNoWatch(watcher, watcher.recoveringRegionsZNode);
        if (regions != null) {
            int listSize = regions.size();
            for (int i = 0; i < listSize; i++) {
                String nodePath = ZKUtil.joinZNode(watcher.recoveringRegionsZNode, regions.get(i));
                List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(watcher, nodePath);
                if (regionFailedServers == null || regionFailedServers.isEmpty()) {
                    ZKUtil.deleteNode(watcher, nodePath);
                    continue;
                }
                boolean needMoreRecovery = false;
                int tmpFailedServerSize = regionFailedServers.size();
                for (int j = 0; j < tmpFailedServerSize; j++) {
                    if (knownFailedServers.contains(regionFailedServers.get(j))) {
                        needMoreRecovery = true;
                        break;
                    }
                }
                if (!needMoreRecovery) {
                    ZKUtil.deleteNodeRecursively(watcher, nodePath);
                }
            }
        }
    } catch (KeeperException e) {
        throw new IOException(e);
    }
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHTable.java

/**
 * {@inheritDoc}//w ww  .j a v  a  2s.  c o  m
 */
@Override
public void put(List<Put> puts) throws InterruptedIOException, RetriesExhaustedWithDetailsException {
    Map<String, List<Put>> tableMap = new HashMap<String, List<Put>>();
    ClusterLocator clusterLocator = cachedZKInfo.clusterLocator;
    for (Put put : puts) {
        validatePut(put);
        String clusterName = null;
        try {
            clusterName = clusterLocator.getClusterName(put.getRow());
        } catch (IOException e) {
            LOG.error("Fail to get cluster name", e);
        }
        List<Put> ps = tableMap.get(clusterName);
        if (ps == null) {
            ps = new ArrayList<Put>();
            tableMap.put(clusterName, ps);
        }
        ps.add(put);
    }
    Map<String, Future<Void>> futures = new HashMap<String, Future<Void>>();
    for (final Entry<String, List<Put>> entry : tableMap.entrySet()) {
        futures.put(entry.getKey(), pool.submit(new Callable<Void>() {

            @Override
            public Void call() throws Exception {
                try {
                    getClusterHTable(entry.getKey()).put(entry.getValue());
                } catch (IOException e) {
                    // need clear the cached HTable if the connection is refused
                    clearCachedTable(entry.getKey());
                    throw e;
                }
                return null;
            }
        }));
    }
    boolean hasError = false;
    for (Entry<String, Future<Void>> result : futures.entrySet()) {
        try {
            result.getValue().get();
        } catch (Exception e) {
            hasError = true;
            LOG.error(e);
        }
    }
    if (hasError) {
        throw new InterruptedIOException();
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure.java

/**
 * If hbase:meta is not assigned already, assign.
 * @throws IOException//from w w w .ja  v a 2  s.c om
 */
private void verifyAndAssignMetaWithRetries(final MasterProcedureEnv env) throws IOException {
    MasterServices services = env.getMasterServices();
    int iTimes = services.getConfiguration().getInt(KEY_RETRIES_ON_META, DEFAULT_RETRIES_ON_META);
    // Just reuse same time as we have for short wait on meta. Adding another config is overkill.
    long waitTime = services.getConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META);
    int iFlag = 0;
    while (true) {
        try {
            verifyAndAssignMeta(env);
            break;
        } catch (KeeperException e) {
            services.abort("In server shutdown processing, assigning meta", e);
            throw new IOException("Aborting", e);
        } catch (Exception e) {
            if (iFlag >= iTimes) {
                services.abort("verifyAndAssignMeta failed after" + iTimes + " retries, aborting", e);
                throw new IOException("Aborting", e);
            }
            try {
                Thread.sleep(waitTime);
            } catch (InterruptedException e1) {
                LOG.warn("Interrupted when is the thread sleep", e1);
                Thread.currentThread().interrupt();
                throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
            }
            iFlag++;
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

@Override
public ImmutableCollection<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {//from   www. j a v  a2 s  .co m
        // Clear so metrics doesn't find them.
        ImmutableCollection<StoreFile> result = storeEngine.getStoreFileManager().clearFiles();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region
                    .getStoreFileOpenAndCloseThreadPool("StoreFileCloserThread-" + this.getColumnFamilyName());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            IOException ioe = null;
            try {
                for (int i = 0; i < result.size(); i++) {
                    try {
                        Future<Void> future = completionService.take();
                        future.get();
                    } catch (InterruptedException e) {
                        if (ioe == null) {
                            ioe = new InterruptedIOException();
                            ioe.initCause(e);
                        }
                    } catch (ExecutionException e) {
                        if (ioe == null)
                            ioe = new IOException(e.getCause());
                    }
                }
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
            if (ioe != null)
                throw ioe;
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java

private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit) throws IOException {
    if (hstoreFilesToSplit == null) {
        // Could be null because close didn't succeed -- for now consider it fatal
        throw new IOException("Close returned empty list of StoreFiles");
    }/*w  w  w  . j  a va 2 s  .com*/
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    int nbFiles = hstoreFilesToSplit.size();
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return;
    }
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("StoreFileSplitter-%1$d");
    ThreadFactory factory = builder.build();
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
    List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);

    // Split each store file.
    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesToSplit.entrySet()) {
        for (StoreFile sf : entry.getValue()) {
            StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
            futures.add(threadPool.submit(sfs));
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    try {
        boolean stillRunning = !threadPool.awaitTermination(this.fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    // Look for any exception
    for (Future<Void> future : futures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * Writes a new unique identifier for this cluster to the "hbase.id" file
 * in the HBase root directory//from ww  w . ja  va  2s .  c om
 * @param fs the root directory FileSystem
 * @param rootdir the path to the HBase root directory
 * @param clusterId the unique identifier to store
 * @param wait how long (in milliseconds) to wait between retries
 * @throws IOException if writing to the FileSystem fails and no wait value
 */
public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId, int wait) throws IOException {
    while (true) {
        try {
            Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
            FSDataOutputStream s = fs.create(filePath);
            try {
                s.write(clusterId.toByteArray());
            } finally {
                s.close();
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Created cluster ID file at " + filePath.toString() + " with ID: " + clusterId);
            }
            return;
        } catch (IOException ioe) {
            if (wait > 0) {
                LOG.warn("Unable to create cluster ID file in " + rootdir.toString() + ", retrying in " + wait
                        + "msec: " + StringUtils.stringifyException(ioe));
                try {
                    Thread.sleep(wait);
                } catch (InterruptedException e) {
                    throw (InterruptedIOException) new InterruptedIOException().initCause(e);
                }
            } else {
                throw ioe;
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * @param table the table to load into/*from www . j a v  a  2  s.  c  om*/
 * @param pool the ExecutorService
 * @param queue the queue for LoadQueueItem
 * @param startEndKeys start and end keys
 * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles.
 */
private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase(final Table table,
        ExecutorService pool, Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys)
        throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);
    Set<String> missingHFiles = new HashSet<>();
    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = new Pair<>(regionGroups, missingHFiles);

    // drain LQIs and figure out bulk load groups
    Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<Pair<List<LoadQueueItem>, String>> call = new Callable<Pair<List<LoadQueueItem>, String>>() {
            @Override
            public Pair<List<LoadQueueItem>, String> call() throws Exception {
                Pair<List<LoadQueueItem>, String> splits = groupOrSplit(regionGroups, item, table,
                        startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) {
        try {
            Pair<List<LoadQueueItem>, String> splits = lqis.get();
            if (splits != null) {
                if (splits.getFirst() != null) {
                    queue.addAll(splits.getFirst());
                } else {
                    missingHFiles.add(splits.getSecond());
                }
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
    return pair;
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * If DFS, check safe mode and if so, wait until we clear it.
 * @param conf configuration/* www  .  j a  va 2  s. co  m*/
 * @param wait Sleep between retries
 * @throws IOException e
 */
public static void waitOnSafeMode(final Configuration conf, final long wait) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    if (!(fs instanceof DistributedFileSystem))
        return;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    // Make sure dfs is not in safe mode
    while (isInSafeMode(dfs)) {
        LOG.info("Waiting for dfs to exit safe mode...");
        try {
            Thread.sleep(wait);
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

/**
 * Write out current snapshot.  Presumes {@link #snapshot()} has been called
 * previously./*from  www . j a va  2  s .  com*/
 * @param logCacheFlushId flush sequence number
 * @param snapshot
 * @param status
 * @return The path name of the tmp file to which the store was flushed
 * @throws IOException
 */
protected List<Path> flushCache(final long logCacheFlushId, MemStoreSnapshot snapshot, MonitoredTask status)
        throws IOException {
    // If an exception happens flushing, we let it out without clearing
    // the memstore snapshot.  The old snapshot will be returned when we say
    // 'snapshot', the next time flush comes around.
    // Retry after catching exception when flushing, otherwise server will abort
    // itself
    StoreFlusher flusher = storeEngine.getStoreFlusher();
    IOException lastException = null;
    for (int i = 0; i < flushRetriesNumber; i++) {
        try {
            List<Path> pathNames = flusher.flushSnapshot(snapshot, logCacheFlushId, status);
            Path lastPathName = null;
            try {
                for (Path pathName : pathNames) {
                    lastPathName = pathName;
                    validateStoreFile(pathName);
                }
                return pathNames;
            } catch (Exception e) {
                LOG.warn("Failed validating store file " + lastPathName + ", retrying num=" + i, e);
                if (e instanceof IOException) {
                    lastException = (IOException) e;
                } else {
                    lastException = new IOException(e);
                }
            }
        } catch (IOException e) {
            LOG.warn("Failed flushing store file, retrying num=" + i, e);
            lastException = e;
        }
        if (lastException != null && i < (flushRetriesNumber - 1)) {
            try {
                Thread.sleep(pauseTime);
            } catch (InterruptedException e) {
                IOException iie = new InterruptedIOException();
                iie.initCause(e);
                throw iie;
            }
        }
    }
    throw lastException;
}