Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException() 

Source Link

Document

Constructs an InterruptedIOException with null as its error detail message.

Usage

From source file:org.marketcetera.util.except.ExceptUtilsTest.java

@Test
public void wrap() {
    wrapHelper(new CloneNotSupportedException(), false);
    wrapHelper(new InterruptedException(), true);
    wrapHelper(new InterruptedIOException(), true);
    wrapHelper(new ClosedByInterruptException(), true);
    wrapHelper(new FileLockInterruptionException(), true);
    wrapHelper(new InterruptedNamingException(), true);
    wrapHelper(new I18NInterruptedException(), true);
    wrapHelper(new I18NInterruptedRuntimeException(), true);
}

From source file:org.apache.hadoop.hbase.master.handler.ServerShutdownHandler.java

@Override
public void process() throws IOException {
    boolean hasLogReplayWork = false;
    final ServerName serverName = this.serverName;
    try {//from ww  w .j  a v a  2 s  .  c  o  m

        // We don't want worker thread in the MetaServerShutdownHandler
        // executor pool to block by waiting availability of hbase:meta
        // Otherwise, it could run into the following issue:
        // 1. The current MetaServerShutdownHandler instance For RS1 waits for the hbase:meta
        //    to come online.
        // 2. The newly assigned hbase:meta region server RS2 was shutdown right after
        //    it opens the hbase:meta region. So the MetaServerShutdownHandler
        //    instance For RS1 will still be blocked.
        // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
        // 4. The newly assigned hbase:meta region server RS3 was shutdown right after
        //    it opens the hbase:meta region. So the MetaServerShutdownHandler
        //    instance For RS1 and RS2 will still be blocked.
        // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
        // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
        // The solution here is to resubmit a ServerShutdownHandler request to process
        // user regions on that server so that MetaServerShutdownHandler
        // executor pool is always available.
        //
        // If AssignmentManager hasn't finished rebuilding user regions,
        // we are not ready to assign dead regions either. So we re-queue up
        // the dead server for further processing too.
        AssignmentManager am = services.getAssignmentManager();
        if (isCarryingMeta() // hbase:meta
                || !am.isFailoverCleanupDone()) {
            this.services.getServerManager().processDeadServer(serverName, this.shouldSplitHlog);
            return;
        }

        // Wait on meta to come online; we need it to progress.
        // TODO: Best way to hold strictly here?  We should build this retry logic
        // into the MetaReader operations themselves.
        // TODO: Is the reading of hbase:meta necessary when the Master has state of
        // cluster in its head?  It should be possible to do without reading hbase:meta
        // in all but one case. On split, the RS updates the hbase:meta
        // table and THEN informs the master of the split via zk nodes in
        // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
        // the regionserver dies, these nodes do not stick around and this server
        // shutdown processing does fixup (see the fixupDaughters method below).
        // If we wanted to skip the hbase:meta scan, we'd have to change at least the
        // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
        // completed (zk is updated after edits to hbase:meta have gone in).  See
        // {@link SplitTransaction}.  We'd also have to be figure another way for
        // doing the below hbase:meta daughters fixup.
        NavigableMap<HRegionInfo, Result> hris = null;
        while (!this.server.isStopped()) {
            try {
                this.server.getCatalogTracker().waitForMeta();
                // Skip getting user regions if the server is stopped.
                if (!this.server.isStopped()) {
                    hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(), this.serverName);
                }
                break;
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            } catch (IOException ioe) {
                LOG.info("Received exception accessing hbase:meta during server shutdown of " + serverName
                        + ", retrying hbase:meta read", ioe);
            }
        }
        if (this.server.isStopped()) {
            throw new IOException("Server is stopped");
        }

        try {
            if (this.shouldSplitHlog) {
                LOG.info("Splitting logs for " + serverName + " before assignment.");
                if (this.distributedLogReplay) {
                    LOG.info("Mark regions in recovery before assignment.");
                    Set<ServerName> serverNames = new HashSet<ServerName>();
                    serverNames.add(serverName);
                    this.services.getMasterFileSystem().prepareLogReplay(serverNames);
                } else {
                    this.services.getMasterFileSystem().splitLog(serverName);
                }
                am.getRegionStates().logSplit(serverName);
            } else {
                LOG.info("Skipping log splitting for " + serverName);
            }
        } catch (IOException ioe) {
            resubmit(serverName, ioe);
        }

        // Clean out anything in regions in transition.  Being conservative and
        // doing after log splitting.  Could do some states before -- OPENING?
        // OFFLINE? -- and then others after like CLOSING that depend on log
        // splitting.
        List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
        LOG.info("Reassigning " + ((hris == null) ? 0 : hris.size()) + " region(s) that "
                + (serverName == null ? "null" : serverName) + " was carrying (and "
                + regionsInTransition.size() + " regions(s) that were opening on this server)");

        List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
        toAssignRegions.addAll(regionsInTransition);

        // Iterate regions that were on this server and assign them
        if (hris != null) {
            RegionStates regionStates = am.getRegionStates();
            for (Map.Entry<HRegionInfo, Result> e : hris.entrySet()) {
                HRegionInfo hri = e.getKey();
                if (regionsInTransition.contains(hri)) {
                    continue;
                }
                String encodedName = hri.getEncodedName();
                Lock lock = am.acquireRegionLock(encodedName);
                try {
                    RegionState rit = regionStates.getRegionTransitionState(hri);
                    if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
                        ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
                        if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
                            // If this region is in transition on the dead server, it must be
                            // opening or pending_open, which should have been covered by AM#processServerShutdown
                            LOG.info("Skip assigning region " + hri.getRegionNameAsString()
                                    + " because it has been opened in " + addressFromAM.getServerName());
                            continue;
                        }
                        if (rit != null) {
                            if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
                                // Skip regions that are in transition on other server
                                LOG.info("Skip assigning region in transition on other server" + rit);
                                continue;
                            }
                            try {
                                //clean zk node
                                LOG.info("Reassigning region with rs = " + rit
                                        + " and deleting zk node if exists");
                                ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
                                regionStates.updateRegionState(hri, State.OFFLINE);
                            } catch (KeeperException ke) {
                                this.server.abort("Unexpected ZK exception deleting unassigned node " + hri,
                                        ke);
                                return;
                            }
                        } else if (regionStates.isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
                            regionStates.regionOffline(hri);
                        }
                        toAssignRegions.add(hri);
                    } else if (rit != null) {
                        if (rit.isPendingCloseOrClosing() && am.getTableStateManager().isTableState(
                                hri.getTable(), ZooKeeperProtos.Table.State.DISABLED,
                                ZooKeeperProtos.Table.State.DISABLING)) {
                            // If the table was partially disabled and the RS went down, we should clear the RIT
                            // and remove the node for the region.
                            // The rit that we use may be stale in case the table was in DISABLING state
                            // but though we did assign we will not be clearing the znode in CLOSING state.
                            // Doing this will have no harm. See HBASE-5927
                            regionStates.updateRegionState(hri, State.OFFLINE);
                            am.deleteClosingOrClosedNode(hri, rit.getServerName());
                            am.offlineDisabledRegion(hri);
                        } else {
                            LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition " + rit
                                    + " not to be assigned by SSH of server " + serverName);
                        }
                    }
                } finally {
                    lock.unlock();
                }
            }
        }

        try {
            am.assign(toAssignRegions);
        } catch (InterruptedException ie) {
            LOG.error("Caught " + ie + " during round-robin assignment");
            throw (InterruptedIOException) new InterruptedIOException().initCause(ie);
        }

        if (this.shouldSplitHlog && this.distributedLogReplay) {
            // wait for region assignment completes
            for (HRegionInfo hri : toAssignRegions) {
                try {
                    if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
                        // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
                        // when replay happens before region assignment completes.
                        LOG.warn("Region " + hri.getEncodedName() + " didn't complete assignment in time");
                    }
                } catch (InterruptedException ie) {
                    throw new InterruptedIOException(
                            "Caught " + ie + " during waitOnRegionToClearRegionsInTransition");
                }
            }
            // submit logReplay work
            this.services.getExecutorService().submit(
                    new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
            hasLogReplayWork = true;
        }
    } finally {
        this.deadServers.finish(serverName);
    }

    if (!hasLogReplayWork) {
        LOG.info("Finished processing of shutdown of " + serverName);
    }
}

From source file:com.asakusafw.testdriver.DirectIoUtil.java

static <T> DataModelSinkFactory dump(Configuration configuration, DataModelDefinition<T> definition,
        Class<? extends DataFormat<?>> formatClass, File destination) throws IOException {
    DataFormat<? super T> format = newDataFormat(configuration, formatClass);
    checkDataType(definition, format);// w  w w.  j  a v a 2 s .c  o m
    org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(destination.toURI());
    HadoopFileFormat<? super T> hFormat = HadoopDataSourceUtil.toHadoopFileFormat(configuration, format);
    return new DataModelSinkFactory() {
        @Override
        public <S> DataModelSink createSink(DataModelDefinition<S> def, TestContext context)
                throws IOException {
            try {
                return new DirectOutputSink<>(definition, hFormat, path);
            } catch (InterruptedException e) {
                throw (IOException) new InterruptedIOException().initCause(e);
            }
        }
    };
}

From source file:org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.java

/**
 * <p>/*  ww w . j  a  v  a 2 s.c o m*/
 * Algo:
 * - we put the query into the execution pool.
 * - after x ms, if we don't have a result, we add the queries for the secondary replicas
 * - we take the first answer
 * - when done, we cancel what's left. Cancelling means:
 * - removing from the pool if the actual call was not started
 * - interrupting the call if it has started
 * Client side, we need to take into account
 * - a call is not executed immediately after being put into the pool
 * - a call is a thread. Let's not multiply the number of thread by the number of replicas.
 * Server side, if we can cancel when it's still in the handler pool, it's much better, as a call
 * can take some i/o.
 * </p>
 * Globally, the number of retries, timeout and so on still applies, but it's per replica,
 * not global. We continue until all retries are done, or all timeouts are exceeded.
 */
public synchronized Result call()
        throws DoNotRetryIOException, InterruptedIOException, RetriesExhaustedException {
    boolean isTargetReplicaSpecified = (get.getReplicaId() >= 0);

    RegionLocations rl = getRegionLocations(true,
            (isTargetReplicaSpecified ? get.getReplicaId() : RegionReplicaUtil.DEFAULT_REPLICA_ID), cConnection,
            tableName, get.getRow());
    ResultBoundedCompletionService<Result> cs = new ResultBoundedCompletionService<Result>(
            this.rpcRetryingCallerFactory, pool, rl.size());

    if (isTargetReplicaSpecified) {
        addCallsForReplica(cs, rl, get.getReplicaId(), get.getReplicaId());
    } else {
        addCallsForReplica(cs, rl, 0, 0);
        try {
            // wait for the timeout to see whether the primary responds back
            Future<Result> f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, microseconds
            if (f != null) {
                return f.get(); //great we got a response
            }
        } catch (ExecutionException e) {
            throwEnrichedException(e, retries);
        } catch (CancellationException e) {
            throw new InterruptedIOException();
        } catch (InterruptedException e) {
            throw new InterruptedIOException();
        }

        // submit call for the all of the secondaries at once
        addCallsForReplica(cs, rl, 1, rl.size() - 1);
    }

    try {
        try {
            Future<Result> f = cs.take();
            return f.get();
        } catch (ExecutionException e) {
            throwEnrichedException(e, retries);
        }
    } catch (CancellationException e) {
        throw new InterruptedIOException();
    } catch (InterruptedException e) {
        throw new InterruptedIOException();
    } finally {
        // We get there because we were interrupted or because one or more of the
        // calls succeeded or failed. In all case, we stop all our tasks.
        cs.cancelAll();
    }

    return null; // unreachable
}

From source file:com.limegroup.gnutella.archive.ArchiveContribution.java

/**
 * //from w w w . j a va 2 s .  co  m
 * @param fileName
 * @param input
 *         The input stream (not necessarily buffered).
 *        This stream will be closed by this method
 */
private void uploadFile(String remoteFileName, InputStream input, FTPClient ftp)
        throws InterruptedIOException, IOException {
    fileStarted(remoteFileName);
    final InputStream fileStream = new BufferedInputStream(new UploadMonitorInputStream(input, this));

    try {
        if (isCancelled()) {
            throw new InterruptedIOException();
        }

        ftp.storeFile(remoteFileName, fileStream);
    } finally {
        fileStream.close();
    }

    if (isCancelled()) {
        throw new InterruptedIOException();
    }
    fileCompleted();
}

From source file:com.github.dryangkun.hbase.tidx.hive.HiveHFileOutputFormat.java

@Override
public RecordWriter getHiveRecordWriter(final JobConf jc, final Path finalOutPath,
        Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties,
        final Progressable progressable) throws IOException {

    // Read configuration for the target path, first from jobconf, then from table properties
    String hfilePath = getFamilyPath(jc, tableProperties);
    if (hfilePath == null) {
        throw new RuntimeException("Please set " + HFILE_FAMILY_PATH + " to target location for HFiles");
    }//  ww w . j  a  v  a 2  s  . com

    // Target path's last component is also the column family name.
    final Path columnFamilyPath = new Path(hfilePath);
    final String columnFamilyName = columnFamilyPath.getName();
    final byte[] columnFamilyNameBytes = Bytes.toBytes(columnFamilyName);
    final Job job = new Job(jc);
    setCompressOutput(job, isCompressed);
    setOutputPath(job, finalOutPath);

    // Create the HFile writer
    final org.apache.hadoop.mapreduce.TaskAttemptContext tac = ShimLoader.getHadoopShims()
            .newTaskAttemptContext(job.getConfiguration(), progressable);

    final Path outputdir = FileOutputFormat.getOutputPath(tac);
    final org.apache.hadoop.mapreduce.RecordWriter<ImmutableBytesWritable, KeyValue> fileWriter = getFileWriter(
            tac);

    // Individual columns are going to be pivoted to HBase cells,
    // and for each row, they need to be written out in order
    // of column name, so sort the column names now, creating a
    // mapping to their column position.  However, the first
    // column is interpreted as the row key.
    String columnList = tableProperties.getProperty("columns");
    String[] columnArray = columnList.split(",");
    final SortedMap<byte[], Integer> columnMap = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    int i = 0;
    for (String columnName : columnArray) {
        if (i != 0) {
            columnMap.put(Bytes.toBytes(columnName), i);
        }
        ++i;
    }

    return new RecordWriter() {

        @Override
        public void close(boolean abort) throws IOException {
            try {
                fileWriter.close(null);
                if (abort) {
                    return;
                }
                // Move the hfiles file(s) from the task output directory to the
                // location specified by the user.
                FileSystem fs = outputdir.getFileSystem(jc);
                fs.mkdirs(columnFamilyPath);
                Path srcDir = outputdir;
                for (;;) {
                    FileStatus[] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER);
                    if ((files == null) || (files.length == 0)) {
                        throw new IOException("No family directories found in " + srcDir);
                    }
                    if (files.length != 1) {
                        throw new IOException("Multiple family directories found in " + srcDir);
                    }
                    srcDir = files[0].getPath();
                    if (srcDir.getName().equals(columnFamilyName)) {
                        break;
                    }
                }
                for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) {
                    fs.rename(regionFile.getPath(), new Path(columnFamilyPath, regionFile.getPath().getName()));
                }
                // Hive actually wants a file as task output (not a directory), so
                // replace the empty directory with an empty file to keep it happy.
                fs.delete(outputdir, true);
                fs.createNewFile(outputdir);
            } catch (InterruptedException ex) {
                throw new IOException(ex);
            }
        }

        private void writeText(Text text) throws IOException {
            // Decompose the incoming text row into fields.
            String s = text.toString();
            String[] fields = s.split("\u0001");
            assert (fields.length <= (columnMap.size() + 1));
            // First field is the row key.
            byte[] rowKeyBytes = Bytes.toBytes(fields[0]);
            // Remaining fields are cells addressed by column name within row.
            for (Map.Entry<byte[], Integer> entry : columnMap.entrySet()) {
                byte[] columnNameBytes = entry.getKey();
                int iColumn = entry.getValue();
                String val;
                if (iColumn >= fields.length) {
                    // trailing blank field
                    val = "";
                } else {
                    val = fields[iColumn];
                    if ("\\N".equals(val)) {
                        // omit nulls
                        continue;
                    }
                }
                byte[] valBytes = Bytes.toBytes(val);
                KeyValue kv = new KeyValue(rowKeyBytes, columnFamilyNameBytes, columnNameBytes, valBytes);
                try {
                    fileWriter.write(null, kv);
                } catch (IOException e) {
                    LOG.error("Failed while writing row: " + s);
                    throw e;
                } catch (InterruptedException ex) {
                    throw new IOException(ex);
                }
            }
        }

        private void writePut(PutWritable put) throws IOException {
            ImmutableBytesWritable row = new ImmutableBytesWritable(put.getPut().getRow());
            SortedMap<byte[], List<Cell>> cells = put.getPut().getFamilyCellMap();
            for (Map.Entry<byte[], List<Cell>> entry : cells.entrySet()) {
                Collections.sort(entry.getValue(), new CellComparator());
                for (Cell c : entry.getValue()) {
                    try {
                        fileWriter.write(row, KeyValueUtil.copyToNewKeyValue(c));
                    } catch (InterruptedException e) {
                        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
                    }
                }
            }
        }

        @Override
        public void write(Writable w) throws IOException {
            if (w instanceof Text) {
                writeText((Text) w);
            } else if (w instanceof PutWritable) {
                writePut((PutWritable) w);
            } else {
                throw new IOException("Unexpected writable " + w);
            }
        }
    };
}

From source file:org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.java

/**
 * Do the changes and handle the pool/*w  ww  .  ja v  a2s  . com*/
 * @param tableName table to insert into
 * @param allRows list of actions
 * @throws IOException
 */
protected void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException {
    if (allRows.isEmpty()) {
        return;
    }
    HTableInterface table = null;
    try {
        table = this.sharedHtableCon.getTable(tableName);
        for (List<Row> rows : allRows) {
            table.batch(rows);
        }
    } catch (InterruptedException ix) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(ix);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}

From source file:org.apache.hadoop.hive.hbase.HiveHFileOutputFormat.java

@Override
public RecordWriter getHiveRecordWriter(final JobConf jc, final Path finalOutPath,
        Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties,
        final Progressable progressable) throws IOException {

    // Read configuration for the target path, first from jobconf, then from table properties
    String hfilePath = getFamilyPath(jc, tableProperties);
    if (hfilePath == null) {
        throw new RuntimeException("Please set " + HFILE_FAMILY_PATH + " to target location for HFiles");
    }/*  w w w  .  j  av a 2  s  .com*/

    // Target path's last component is also the column family name.
    final Path columnFamilyPath = new Path(hfilePath);
    final String columnFamilyName = columnFamilyPath.getName();
    final byte[] columnFamilyNameBytes = Bytes.toBytes(columnFamilyName);
    final Job job = new Job(jc);
    setCompressOutput(job, isCompressed);
    setOutputPath(job, finalOutPath);

    // Create the HFile writer
    final org.apache.hadoop.mapreduce.TaskAttemptContext tac = ShimLoader.getHadoopShims()
            .newTaskAttemptContext(job.getConfiguration(), progressable);

    final Path outputdir = FileOutputFormat.getOutputPath(tac);
    final Path taskAttemptOutputdir = new FileOutputCommitter(outputdir, tac).getWorkPath();
    final org.apache.hadoop.mapreduce.RecordWriter<ImmutableBytesWritable, KeyValue> fileWriter = getFileWriter(
            tac);

    // Individual columns are going to be pivoted to HBase cells,
    // and for each row, they need to be written out in order
    // of column name, so sort the column names now, creating a
    // mapping to their column position.  However, the first
    // column is interpreted as the row key.
    String columnList = tableProperties.getProperty("columns");
    String[] columnArray = columnList.split(",");
    final SortedMap<byte[], Integer> columnMap = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    int i = 0;
    for (String columnName : columnArray) {
        if (i != 0) {
            columnMap.put(Bytes.toBytes(columnName), i);
        }
        ++i;
    }

    return new RecordWriter() {

        @Override
        public void close(boolean abort) throws IOException {
            try {
                fileWriter.close(null);
                if (abort) {
                    return;
                }
                // Move the hfiles file(s) from the task output directory to the
                // location specified by the user.
                FileSystem fs = outputdir.getFileSystem(jc);
                fs.mkdirs(columnFamilyPath);
                Path srcDir = taskAttemptOutputdir;
                for (;;) {
                    FileStatus[] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER);
                    if ((files == null) || (files.length == 0)) {
                        throw new IOException("No family directories found in " + srcDir);
                    }
                    if (files.length != 1) {
                        throw new IOException("Multiple family directories found in " + srcDir);
                    }
                    srcDir = files[0].getPath();
                    if (srcDir.getName().equals(columnFamilyName)) {
                        break;
                    }
                    if (files[0].isFile()) {
                        throw new IOException("No family directories found in " + taskAttemptOutputdir + ". "
                                + "The last component in hfile path should match column family name "
                                + columnFamilyName);
                    }
                }
                for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) {
                    fs.rename(regionFile.getPath(), new Path(columnFamilyPath, regionFile.getPath().getName()));
                }
                // Hive actually wants a file as task output (not a directory), so
                // replace the empty directory with an empty file to keep it happy.
                fs.delete(taskAttemptOutputdir, true);
                fs.createNewFile(taskAttemptOutputdir);
            } catch (InterruptedException ex) {
                throw new IOException(ex);
            }
        }

        private void writeText(Text text) throws IOException {
            // Decompose the incoming text row into fields.
            String s = text.toString();
            String[] fields = s.split("\u0001");
            assert (fields.length <= (columnMap.size() + 1));
            // First field is the row key.
            byte[] rowKeyBytes = Bytes.toBytes(fields[0]);
            // Remaining fields are cells addressed by column name within row.
            for (Map.Entry<byte[], Integer> entry : columnMap.entrySet()) {
                byte[] columnNameBytes = entry.getKey();
                int iColumn = entry.getValue();
                String val;
                if (iColumn >= fields.length) {
                    // trailing blank field
                    val = "";
                } else {
                    val = fields[iColumn];
                    if ("\\N".equals(val)) {
                        // omit nulls
                        continue;
                    }
                }
                byte[] valBytes = Bytes.toBytes(val);
                KeyValue kv = new KeyValue(rowKeyBytes, columnFamilyNameBytes, columnNameBytes, valBytes);
                try {
                    fileWriter.write(null, kv);
                } catch (IOException e) {
                    LOG.error("Failed while writing row: " + s);
                    throw e;
                } catch (InterruptedException ex) {
                    throw new IOException(ex);
                }
            }
        }

        private void writePut(PutWritable put) throws IOException {
            ImmutableBytesWritable row = new ImmutableBytesWritable(put.getPut().getRow());
            SortedMap<byte[], List<Cell>> cells = put.getPut().getFamilyCellMap();
            for (Map.Entry<byte[], List<Cell>> entry : cells.entrySet()) {
                Collections.sort(entry.getValue(), new CellComparator());
                for (Cell c : entry.getValue()) {
                    try {
                        fileWriter.write(row, KeyValueUtil.copyToNewKeyValue(c));
                    } catch (InterruptedException e) {
                        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
                    }
                }
            }
        }

        @Override
        public void write(Writable w) throws IOException {
            if (w instanceof Text) {
                writeText((Text) w);
            } else if (w instanceof PutWritable) {
                writePut((PutWritable) w);
            } else {
                throw new IOException("Unexpected writable " + w);
            }
        }
    };
}

From source file:org.sonar.plugins.javascript.JavaScriptSquidSensorTest.java

@Test
public void cancelled_analysis_causing_recognition_exception() throws Exception {
    JavaScriptCheck check = new ExceptionRaisingCheck(
            new RecognitionException(42, "message", new InterruptedIOException()));
    analyseFileWithException(check, inputFile("cpd/Person.js"), "Analysis cancelled");
}

From source file:org.apache.hadoop.hbase.zookeeper.ZKTableStateManager.java

/**
 * Gets a list of all the tables of specified states in zookeeper.
 * @return Set of tables of specified states, empty Set if none
 * @throws KeeperException//from  w  ww  . j  a  v a  2s .  c o  m
 */
Set<TableName> getAllTables(final ZooKeeperProtos.Table.State... states)
        throws KeeperException, InterruptedIOException {

    Set<TableName> allTables = new HashSet<TableName>();
    List<String> children = ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode);
    if (children == null)
        return allTables;
    for (String child : children) {
        TableName tableName = TableName.valueOf(child);
        ZooKeeperProtos.Table.State state;
        try {
            state = getTableState(watcher, tableName);
        } catch (InterruptedException e) {
            throw new InterruptedIOException();
        }
        for (ZooKeeperProtos.Table.State expectedState : states) {
            if (state == expectedState) {
                allTables.add(tableName);
                break;
            }
        }
    }
    return allTables;
}