Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException(String s) 

Source Link

Document

Constructs an InterruptedIOException with the specified detail message.

Usage

From source file:org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.java

@Override
public Result[] call(int timeout) throws IOException {
    // If the active replica callable was closed somewhere, invoke the RPC to
    // really close it. In the case of regular scanners, this applies. We make couple
    // of RPCs to a RegionServer, and when that region is exhausted, we set
    // the closed flag. Then an RPC is required to actually close the scanner.
    if (currentScannerCallable != null && currentScannerCallable.closed) {
        // For closing we target that exact scanner (and not do replica fallback like in
        // the case of normal reads)
        if (LOG.isTraceEnabled()) {
            LOG.trace("Closing scanner id=" + currentScannerCallable.scannerId);
        }/*from w ww .j  a va2  s.  co m*/
        Result[] r = currentScannerCallable.call(timeout);
        currentScannerCallable = null;
        return r;
    }
    // We need to do the following:
    //1. When a scan goes out to a certain replica (default or not), we need to
    //   continue to hit that until there is a failure. So store the last successfully invoked
    //   replica
    //2. We should close the "losing" scanners (scanners other than the ones we hear back
    //   from first)
    //
    RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(true,
            RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName, currentScannerCallable.getRow());

    // allocate a boundedcompletion pool of some multiple of number of replicas.
    // We want to accomodate some RPCs for redundant replica scans (but are still in progress)
    ResultBoundedCompletionService<Pair<Result[], ScannerCallable>> cs = new ResultBoundedCompletionService<Pair<Result[], ScannerCallable>>(
            RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool, rl.size() * 5);

    AtomicBoolean done = new AtomicBoolean(false);
    replicaSwitched.set(false);
    // submit call for the primary replica.
    addCallsForCurrentReplica(cs, rl);
    try {
        // wait for the timeout to see whether the primary responds back
        Future<Pair<Result[], ScannerCallable>> f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, microseconds
        if (f != null) {
            Pair<Result[], ScannerCallable> r = f.get();
            if (r != null && r.getSecond() != null) {
                updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool);
            }
            return r == null ? null : r.getFirst(); //great we got a response
        }
    } catch (ExecutionException e) {
        RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries);
    } catch (CancellationException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    }
    // submit call for the all of the secondaries at once
    // TODO: this may be an overkill for large region replication
    addCallsForOtherReplicas(cs, rl, 0, rl.size() - 1);
    try {
        Future<Pair<Result[], ScannerCallable>> f = cs.take();
        Pair<Result[], ScannerCallable> r = f.get();
        if (r != null && r.getSecond() != null) {
            updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool);
        }
        return r == null ? null : r.getFirst(); // great we got an answer
    } catch (ExecutionException e) {
        RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries);
    } catch (CancellationException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } finally {
        // We get there because we were interrupted or because one or more of the
        // calls succeeded or failed. In all case, we stop all our tasks.
        cs.cancelAll();
    }
    return null; // unreachable
}

From source file:org.apache.hadoop.hbase.regionserver.RegionScannerHolder.java

/**
 * Get the prefetched scan result, if any. Otherwise,
 * do a scan synchronously and return the result, which
 * may take some time. Region scan coprocessor, if specified,
 * is invoked properly, which may override the scan result.
 *
 * @param rows the number of rows to scan, which is preferred
 * not to change among scanner.next() calls.
 *
 * @return scan result, which has the data retrieved from
 * the scanner, or some IOException if the scan failed.
 * @throws IOException if failed to retrieve from the scanner.
 *//*from   w w  w  .  j  a  va2  s.  co  m*/
public ScanResult getScanResult(final int rows) throws IOException {
    Preconditions.checkArgument(rows > 0, "Number of rows requested must be positive");
    ScanResult scanResult = null;
    this.rows = rows;

    if (prefetchScanFuture == null) {
        // Need to scan inline if not prefetched
        scanResult = prefetcher.call();
    } else {
        // if we have a prefetched result, then use it
        try {
            scanResult = prefetchScanFuture.get();
            if (scanResult.moreResults) {
                int prefetchedRows = scanResult.results.size();
                if (prefetchedRows != 0 && this.rows > prefetchedRows) {
                    // Try to scan more since we haven't prefetched enough
                    this.rows -= prefetchedRows;
                    ScanResult tmp = prefetcher.call();
                    if (tmp.isException) {
                        return tmp; // Keep the prefetched results for later
                    }
                    if (tmp.results != null && !tmp.results.isEmpty()) {
                        // Merge new results to the old result list
                        scanResult.results.addAll(tmp.results);
                    }
                    // Reset rows for next prefetching
                    this.rows = rows;
                }
            }
            prefetchScanFuture = null;
            if (prefetchedResultSize > 0) {
                globalPrefetchedResultSize.addAndGet(-prefetchedResultSize);
                prefetchedResultSize = 0L;
            }
        } catch (ExecutionException ee) {
            throw new IOException("failed to run prefetching task", ee.getCause());
        } catch (InterruptedException ie) {
            Thread.currentThread().interrupt();
            IOException iie = new InterruptedIOException("scan was interrupted");
            iie.initCause(ie);
            throw iie;
        }
    }

    if (prefetching && scanResult.moreResults && !scanResult.results.isEmpty()) {
        long totalPrefetchedResultSize = globalPrefetchedResultSize.get();
        if (totalPrefetchedResultSize < maxGlobalPrefetchedResultSize) {
            // Schedule a background prefetch for the next result
            // if prefetch is enabled on scans and there are more results
            prefetchScanFuture = scanPrefetchThreadPool.submit(prefetcher);
        } else if (LOG.isTraceEnabled()) {
            LOG.trace("One prefetching is skipped for scanner " + scannerName
                    + " since total prefetched result size " + totalPrefetchedResultSize
                    + " is more than the maximum configured " + maxGlobalPrefetchedResultSize);
        }
    }
    return scanResult;
}

From source file:org.apache.hadoop.hbase.util.ModifyRegionUtils.java

/**
 * Execute the task on the specified set of regions.
 *
 * @param exec Thread Pool Executor//  w ww .  j  av  a  2s  . c om
 * @param regions {@link HRegionInfo} that describes the regions to edit
 * @param task {@link RegionFillTask} custom code to edit the region
 * @throws IOException
 */
public static void editRegions(final ThreadPoolExecutor exec, final Collection<HRegionInfo> regions,
        final RegionEditTask task) throws IOException {
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(exec);
    for (final HRegionInfo hri : regions) {
        completionService.submit(new Callable<Void>() {
            @Override
            public Void call() throws IOException {
                task.editRegion(hri);
                return null;
            }
        });
    }

    try {
        for (HRegionInfo hri : regions) {
            completionService.take().get();
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
}

From source file:ca.sqlpower.architect.ProjectLoader.java

/**
 * Loads the project data from the given input stream.
 * <p>//from w w w  .j av  a 2 s . c om
 * Note: the input stream is always closed afterwards.
 *
 * @param in
 *            Used to load in the project data, must support mark.
 * @param dataSources
 *            Collection of the data sources used in the project
 */
public void load(InputStream in, DataSourceCollection<? extends SPDataSource> dataSources,
        ArchitectSession messageDelegate) throws IOException, SQLObjectException {
    UnclosableInputStream uin = new UnclosableInputStream(in);
    siblingSession = messageDelegate;
    try {
        dbcsLoadIdMap = new HashMap<String, JDBCDataSource>();
        sqlObjectLoadIdMap = new HashMap<String, SQLObject>();

        Digester digester = null;

        // use digester to read from file
        try {
            digester = setupDigester();
            digester.parse(uin);
        } catch (SAXException ex) {
            //The digester likes to wrap the cancelled exception in a SAXException.
            if (ex.getException() instanceof DigesterCancelledException) {
                //Digeseter was cancelled by the user. Do not load anything.
                throw new RuntimeException(new InterruptedIOException("progress"));
            }
            logger.error("SAX Exception in project file parse!", ex);
            String message;
            if (digester == null) {
                message = "Couldn't create an XML parser";
            } else {
                message = "There is an XML parsing error in project file at Line:"
                        + digester.getDocumentLocator().getLineNumber() + " Column:"
                        + digester.getDocumentLocator().getColumnNumber();
            }
            throw new SQLObjectException(message, ex);
        } catch (IOException ex) {
            logger.error("IO Exception in project file parse!", ex);
            throw new SQLObjectException("There was an I/O error while reading the file", ex);
        } catch (Exception ex) {
            logger.error("General Exception in project file parse!", ex);
            throw new SQLObjectException("Unexpected Exception", ex);
        }

        SQLObject dbConnectionContainer = ((SQLObject) getSession().getRootObject());

        // hook up data source parent types
        for (SQLDatabase db : dbConnectionContainer.getChildren(SQLDatabase.class)) {
            JDBCDataSource ds = db.getDataSource();
            String parentTypeId = ds.getPropertiesMap().get(JDBCDataSource.DBCS_CONNECTION_TYPE);
            if (parentTypeId != null) {
                for (JDBCDataSourceType dstype : dataSources.getDataSourceTypes()) {
                    if (dstype.getName().equals(parentTypeId)) {
                        ds.setParentType(dstype);
                        // TODO unit test that this works
                    }
                }
                if (ds.getParentType() == null) {
                    logger.error("Data Source \"" + ds.getName() + "\" has type \"" + parentTypeId
                            + "\", which is not configured in the user prefs.");
                    // TODO either reconstruct the parent type, or bring this problem to the attention of the user.
                    // TODO test this
                } else {
                    // TODO test that the referenced parent type is properly configured (has a driver, etc)
                    // TODO test for this behaviour
                }
            }

        }

        /*
         * for backward compatibilty, in the old project file, we have
         * primaryKeyName in the table attrbute, but nothing
         * in the sqlIndex that indicates primary key index,
         * so, we have to set the index as primary key index
         * if the index name == table.primaryKeyName after load the project,
         * table.primaryKeyName is save in the map now, not in the table object
         */
        for (SQLTable table : (List<SQLTable>) getSession().getTargetDatabase().getTables()) {

            if (logger.isDebugEnabled()) {
                if (!table.isPopulated()) {
                    logger.debug("Table [" + table.getName() + "] not populated");
                } else {
                    logger.debug(
                            "Table [" + table.getName() + "] index folder contents: " + table.getIndices());
                }
            }

            if (table.getPrimaryKeyIndex() == null) {
                logger.debug("primary key index is null in table: " + table);
                logger.debug("number of children found in indices folder: " + table.getIndices().size());
                for (SQLIndex index : table.getIndices()) {
                    if (sqlObjectLoadIdMap.get(table.getName() + "." + index.getName()) != null) {
                        table.getPrimaryKeyIndex().updateToMatch(index);
                        break;
                    }
                }
            }
            logger.debug("Table [" + table.getName() + "]2 index folder contents: " + table.getIndices());
            logger.debug("Table [" + table.getName() + "]3 index folder contents: " + table.getIndices());

            if (logger.isDebugEnabled()) {
                if (!table.isPopulated()) {
                    logger.debug("Table [" + table.getName() + "] not populated");
                } else {
                    logger.debug("Table [" + table.getName() + "] index folder contents: "
                            + table.getIndices().size());
                }
            }

        }

        /*
         * In old versions of the architect, user defined types weren't
         * available, so all columns stored their type as a JDBC type code.
         * For all columns in the playpen, we need to hook up upstream user
         * defined types.
         */
        ListMultimap<String, SQLColumn> columns = ArrayListMultimap.create();
        for (SQLTable table : getSession().getTargetDatabase().getTables()) {
            for (SQLColumn column : table.getChildren(SQLColumn.class)) {
                SQLColumn sourceColumn = column.getSourceColumn();
                if (sourceColumn != null && sourceColumn.getPlatform() != null) {
                    columns.put(column.getSourceColumn().getPlatform(), column);
                } else {
                    columns.put(SQLTypePhysicalPropertiesProvider.GENERIC_PLATFORM, column);
                }
            }
        }
        for (String platform : columns.keySet()) {
            SQLColumn.assignTypes(columns.get(platform), dataSources, platform,
                    new DefaultUserPrompterFactory());
        }

        setModified(false);
    } finally {
        uin.forceClose();
    }
}

From source file:com.healthmarketscience.rmiio.RemoteStreamServer.java

/**
 * Throws an IOException if the stream has been aborted. Should be called
 * at the beginning of any method which accesses the underlying stream,
 * except for the <code>close</code> method.
 *//*from  www.j a  v a2s  .  c o  m*/
protected final void checkAborted() throws IOException {
    if (_state.get() == State.ABORTED) {
        throw new InterruptedIOException("stream server was aborted");
    }
}

From source file:org.apache.hadoop.hbase.regionserver.Compactor.java

void isInterrupted(final Store store, final StoreFile.Writer writer) throws IOException {
    if (store.getHRegion().areWritesEnabled())
        return;/* w w  w.  java 2  s  .c  om*/
    // Else cleanup.
    writer.close();
    store.getFileSystem().delete(writer.getPath(), false);
    throw new InterruptedIOException("Aborting compaction of store " + store + " in region "
            + store.getHRegion() + " because user requested stop.");
}

From source file:org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler.java

void triggerFlushInPrimaryRegion(final HRegion region) throws IOException, RuntimeException {
    long pause = connection.getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
            HConstants.DEFAULT_HBASE_CLIENT_PAUSE);

    int maxAttempts = getRetriesCount(connection.getConfiguration());
    RetryCounter counter = new RetryCounterFactory(maxAttempts, (int) pause).create();

    if (LOG.isDebugEnabled()) {
        LOG.debug("Attempting to do an RPC to the primary region replica "
                + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
                        .getEncodedName()
                + " of region " + region.getRegionInfo().getEncodedName() + " to trigger a flush");
    }/*ww w .  java 2  s .c  o  m*/
    while (!region.isClosing() && !region.isClosed() && !server.isAborted() && !server.isStopped()) {
        FlushRegionCallable flushCallable = new FlushRegionCallable(connection, rpcControllerFactory,
                RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true);

        // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we
        // do not have to wait for the whole flush here, just initiate it.
        FlushRegionResponse response = null;
        try {
            response = rpcRetryingCallerFactory.<FlushRegionResponse>newCaller().callWithRetries(flushCallable,
                    this.operationTimeout);
        } catch (IOException ex) {
            if (ex instanceof TableNotFoundException
                    || connection.isTableDisabled(region.getRegionInfo().getTable())) {
                return;
            }
            throw ex;
        }

        if (response.getFlushed()) {
            // then we have to wait for seeing the flush entry. All reads will be rejected until we see
            // a complete flush cycle or replay a region open event
            if (LOG.isDebugEnabled()) {
                LOG.debug("Successfully triggered a flush of primary region replica "
                        + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
                                .getEncodedName()
                        + " of region " + region.getRegionInfo().getEncodedName()
                        + " Now waiting and blocking reads until observing a full flush cycle");
            }
            break;
        } else {
            if (response.hasWroteFlushWalMarker()) {
                if (response.getWroteFlushWalMarker()) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary "
                                + "region replica "
                                + ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo())
                                        .getEncodedName()
                                + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and "
                                + "blocking reads until observing a flush marker");
                    }
                    break;
                } else {
                    // somehow we were not able to get the primary to write the flush request. It may be
                    // closing or already flushing. Retry flush again after some sleep.
                    if (!counter.shouldRetry()) {
                        throw new IOException("Cannot cause primary to flush or drop a wal marker after "
                                + "retries. Failing opening of this region replica "
                                + region.getRegionInfo().getEncodedName());
                    }
                }
            } else {
                // nothing to do. Are we dealing with an old server?
                LOG.warn("Was not able to trigger a flush from primary region due to old server version? "
                        + "Continuing to open the secondary region replica: "
                        + region.getRegionInfo().getEncodedName());
                region.setReadsEnabled(true);
                break;
            }
        }
        try {
            counter.sleepUntilNextRetry();
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.getMessage());
        }
    }
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil.java

public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs,
        final SnapshotManifest manifest, final StoreFileVisitor visitor) throws IOException {
    final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
    final Path snapshotDir = manifest.getSnapshotDir();

    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
    if (regionManifests == null || regionManifests.size() == 0) {
        LOG.debug("No manifest files present: " + snapshotDir);
        return;//from  w  w  w.  j a va 2 s  .  com
    }

    ExecutorService exec = SnapshotManifest.createExecutor(conf, "VerifySnapshot");
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(exec);
    try {
        for (final SnapshotRegionManifest regionManifest : regionManifests) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws IOException {
                    visitRegionStoreFiles(regionManifest, visitor);
                    return null;
                }
            });
        }
        try {
            for (int i = 0; i < regionManifests.size(); ++i) {
                completionService.take().get();
            }
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.getMessage());
        } catch (ExecutionException e) {
            if (e.getCause() instanceof CorruptedSnapshotException) {
                throw new CorruptedSnapshotException(e.getCause().getMessage(), snapshotDesc);
            } else {
                IOException ex = new IOException();
                ex.initCause(e.getCause());
                throw ex;
            }
        }
    } finally {
        exec.shutdown();
    }
}

From source file:org.apache.http.HC4.impl.execchain.MinimalClientExec.java

@Override
public CloseableHttpResponse execute(final HttpRoute route, final HttpRequestWrapper request,
        final HttpClientContext context, final HttpExecutionAware execAware) throws IOException, HttpException {
    Args.notNull(route, "HTTP route");
    Args.notNull(request, "HTTP request");
    Args.notNull(context, "HTTP context");

    rewriteRequestURI(request, route);/*from  w  w w . j a v a 2 s . c o m*/

    final ConnectionRequest connRequest = connManager.requestConnection(route, null);
    if (execAware != null) {
        if (execAware.isAborted()) {
            connRequest.cancel();
            throw new RequestAbortedException("Request aborted");
        } else {
            execAware.setCancellable(connRequest);
        }
    }

    final RequestConfig config = context.getRequestConfig();

    final HttpClientConnection managedConn;
    try {
        final int timeout = config.getConnectionRequestTimeout();
        managedConn = connRequest.get(timeout > 0 ? timeout : 0, TimeUnit.MILLISECONDS);
    } catch (final InterruptedException interrupted) {
        Thread.currentThread().interrupt();
        throw new RequestAbortedException("Request aborted", interrupted);
    } catch (final ExecutionException ex) {
        Throwable cause = ex.getCause();
        if (cause == null) {
            cause = ex;
        }
        throw new RequestAbortedException("Request execution failed", cause);
    }

    final ConnectionHolder releaseTrigger = new ConnectionHolder(log, connManager, managedConn);
    try {
        if (execAware != null) {
            if (execAware.isAborted()) {
                releaseTrigger.close();
                throw new RequestAbortedException("Request aborted");
            } else {
                execAware.setCancellable(releaseTrigger);
            }
        }

        if (!managedConn.isOpen()) {
            final int timeout = config.getConnectTimeout();
            this.connManager.connect(managedConn, route, timeout > 0 ? timeout : 0, context);
            this.connManager.routeComplete(managedConn, route, context);
        }
        final int timeout = config.getSocketTimeout();
        if (timeout >= 0) {
            managedConn.setSocketTimeout(timeout);
        }

        HttpHost target = null;
        final HttpRequest original = request.getOriginal();
        if (original instanceof HttpUriRequest) {
            final URI uri = ((HttpUriRequest) original).getURI();
            if (uri.isAbsolute()) {
                target = new HttpHost(uri.getHost(), uri.getPort(), uri.getScheme());
            }
        }
        if (target == null) {
            target = route.getTargetHost();
        }

        context.setAttribute(HttpCoreContext.HTTP_TARGET_HOST, target);
        context.setAttribute(HttpCoreContext.HTTP_REQUEST, request);
        context.setAttribute(HttpCoreContext.HTTP_CONNECTION, managedConn);
        context.setAttribute(HttpClientContext.HTTP_ROUTE, route);

        httpProcessor.process(request, context);
        final HttpResponse response = requestExecutor.execute(request, managedConn, context);
        httpProcessor.process(response, context);

        // The connection is in or can be brought to a re-usable state.
        if (reuseStrategy.keepAlive(response, context)) {
            // Set the idle duration of this connection
            final long duration = keepAliveStrategy.getKeepAliveDuration(response, context);
            releaseTrigger.setValidFor(duration, TimeUnit.MILLISECONDS);
            releaseTrigger.markReusable();
        } else {
            releaseTrigger.markNonReusable();
        }

        // check for entity, release connection if possible
        final HttpEntity entity = response.getEntity();
        if (entity == null || !entity.isStreaming()) {
            // connection not needed and (assumed to be) in re-usable state
            releaseTrigger.releaseConnection();
            return new HttpResponseProxy(response, null);
        } else {
            return new HttpResponseProxy(response, releaseTrigger);
        }
    } catch (final ConnectionShutdownException ex) {
        final InterruptedIOException ioex = new InterruptedIOException("Connection has been shut down");
        ioex.initCause(ex);
        throw ioex;
    } catch (final HttpException ex) {
        releaseTrigger.abortConnection();
        throw ex;
    } catch (final IOException ex) {
        releaseTrigger.abortConnection();
        throw ex;
    } catch (final RuntimeException ex) {
        releaseTrigger.abortConnection();
        throw ex;
    }
}

From source file:com.hp.hpl.jena.grddl.impl.GRDDL.java

private boolean transformWith(String string, boolean needRewind) {
    try {//w  w w.  j a v a  2  s . co m
        try {
            final Transformer t = transformerFor(string);
            String mimetype = mimetype(t);
            final Result result = resultFor(mimetype);
            if (result == null)
                return false;
            final Source in = input.startAfresh(needRewind);
            runInSandbox(new TERunnable() {
                public void run() throws TransformerException {
                    t.transform(in, result);
                }
            }, true);
            postProcess(mimetype, result);
            return true;
        } catch (TransformerException e) {
            error(e);
            return false;
        } catch (SAXParseException e) {
            error(e);
            return false;
        } catch (InterruptedException e) {
            throw new InterruptedIOException("In GRDDL transformWith");
        } finally {
            input.close();
            if (subThread != null)
                subThread.interrupt();
        }
    } catch (IOException ioe) {
        error(ioe);
        return false;
    }
}