Example usage for java.util.concurrent TimeoutException toString

List of usage examples for java.util.concurrent TimeoutException toString

Introduction

In this page you can find the example usage for java.util.concurrent TimeoutException toString.

Prototype

public String toString() 

Source Link

Document

Returns a short description of this throwable.

Usage

From source file:io.github.retz.web.ClientHelper.java

public static void getWholeFile(Client c, int id, String filename, String resultDir) throws IOException {
    try {/*from  w  ww . ja  v a2s.c  o  m*/
        getWholeFileWithTerminator(c, id, filename, resultDir, null);
    } catch (TimeoutException e) {
        LOG.error(e.toString());
    }
}

From source file:io.github.retz.web.ClientHelper.java

public static Optional<Job> getWholeFile(Client c, int id, String filename, boolean poll, OutputStream out)
        throws JobNotFoundException, IOException {
    try {//from  w  w w .j  a  va2  s .c om
        return getWholeFileWithTerminator(c, id, filename, poll, out, null);
    } catch (TimeoutException e) {
        LOG.error(e.toString());
        return Optional.empty();
    }
}

From source file:io.github.retz.web.ClientHelper.java

public static Job waitForStart(Job job, Client c, Callable<Boolean> terminate)
        throws IOException, TimeoutException {
    Job current = job;//from   w w w . j a va2 s .c  om
    int interval = INITAL_INTERVAL_MSEC;
    while (current.state() == Job.JobState.QUEUED) {
        maybeSleep(interval);
        interval = Math.min(interval * 2, MAX_INTERVAL_MSEC);

        try {
            if (terminate != null && terminate.call()) {
                throw new TimeoutException("Timeout at waitForStart");
            }
        } catch (TimeoutException e) {
            throw e;
        } catch (Exception e) {
            LOG.error(e.toString(), e);
            return null; // I don't know how to handle it
        }

        Response res = c.getJob(job.id());
        if (res instanceof GetJobResponse) {
            GetJobResponse getJobResponse = (GetJobResponse) res;
            if (getJobResponse.job().isPresent()) {
                current = getJobResponse.job().get();
                continue;
            }
        } else {
            LOG.error(res.status());
            throw new IOException(res.status());
        }
    }
    return current;
}

From source file:io.github.retz.web.ClientHelper.java

public static Optional<Job> getWholeFileWithTerminator(Client c, int id, String filename, boolean poll,
        OutputStream out, long offset, Callable<Boolean> terminator)
        throws IOException, JobNotFoundException, TimeoutException {
    Optional<Job> current;/* w  ww .  ja  va2 s . co m*/

    {
        Response res = c.getJob(id);
        if (!(res instanceof GetJobResponse)) {
            LOG.error(res.status());
            throw new IOException(res.status());
        }
        GetJobResponse getJobResponse = (GetJobResponse) res;
        if (!getJobResponse.job().isPresent()) {
            throw new JobNotFoundException(id);
        }
    }

    int interval = INITAL_INTERVAL_MSEC;
    Job.JobState currentState = Job.JobState.QUEUED;

    long bytesRead = readFileUntilEmpty(c, id, filename, offset, out);
    offset = offset + bytesRead;

    do {
        Response res = c.getJob(id);
        if (!(res instanceof GetJobResponse)) {
            LOG.error(res.status());
            throw new IOException(res.status());
        }
        GetJobResponse getJobResponse = (GetJobResponse) res;
        current = getJobResponse.job();

        bytesRead = readFileUntilEmpty(c, id, filename, offset, out);
        offset = offset + bytesRead;

        if (current.isPresent()) {
            currentState = current.get().state();
            if ((currentState == Job.JobState.FINISHED || currentState == Job.JobState.KILLED)
                    && bytesRead == 0) {
                break;
            }
        }

        if (poll) {
            maybeSleep(interval);

            if (bytesRead == 0) {
                interval = Math.min(interval * 2, MAX_INTERVAL_MSEC);
            } else {
                interval = INITAL_INTERVAL_MSEC;
            }

            try {
                if (terminator != null && terminator.call()) {
                    throw new TimeoutException("Timeout at getWholeFile");
                }
            } catch (TimeoutException e) {
                throw e;
            } catch (Exception e) {
                LOG.error(e.toString(), e);
                return current; // I don't know how to handle it
            }
        } else {
            break;
        }
    } while (currentState != Job.JobState.FINISHED && currentState != Job.JobState.KILLED);

    if (!ClientHelper.fileExists(c, id, filename)) {
        // TODO: remove a file if it's already created
        throw new FileNotFoundException(filename);
    }

    return current;
}

From source file:com.mrfeinberg.translation.AbstractTranslationService.java

public Runnable translate(final String phrase, final LanguagePair lp, final TranslationListener listener) {
    final Language b = lp.b();

    final HttpClient httpClient = new HttpClient();
    if (proxyPrefs.getUseProxy()) {
        httpClient.getHostConfiguration().setProxy(proxyPrefs.getProxyHost(), proxyPrefs.getProxyPort());
    }//from  w  w w .ja  v  a 2 s . c o  m

    final HttpMethod httpMethod = getHttpMethod(phrase, lp);
    final Callable<String> callable = new Callable<String>() {
        public String call() throws Exception {
            int result = httpClient.executeMethod(httpMethod);
            if (result != 200) {
                throw new Exception("Got " + result + " status for " + httpMethod.getURI());
            }
            final BufferedReader in = new BufferedReader(
                    new InputStreamReader(httpMethod.getResponseBodyAsStream(), "utf8"));
            try {
                final StringBuilder sb = new StringBuilder();
                String line;
                while ((line = in.readLine()) != null)
                    sb.append(line);
                return sb.toString();
            } finally {
                in.close();
                httpMethod.releaseConnection();
            }
        }
    };
    final FutureTask<String> tc = new FutureTask<String>(callable);
    return new Runnable() {
        public void run() {
            try {
                executor.execute(tc);
                final String result = tc.get(timeout, TimeUnit.MILLISECONDS);
                String found = findTranslatedText(result);
                if (found == null) {
                    listener.error("Cannot find translated text in result.");
                } else {
                    found = found.replaceAll("\\s+", " ");
                    listener.result(found, b);
                }
            } catch (final TimeoutException e) {
                listener.timedOut();
            } catch (final InterruptedException e) {
                listener.cancelled();
            } catch (final Exception e) {
                e.printStackTrace();
                listener.error(e.toString());
            }
        }
    };
}

From source file:info.pancancer.arch3.jobGenerator.JobGenerator.java

public JobGenerator(String[] argv) throws IOException {
    super();//  w  ww . ja v  a 2 s .co  m

    this.iniDirSpec = super.parser.accepts("ini-dir", "schedule a batch of ini files from this directory")
            .withOptionalArg().ofType(String.class);
    this.workflowNameSpec = super.parser.accepts("workflow-name", "track the name of workflows")
            .withOptionalArg().ofType(String.class).required();
    this.workflowVersionSpec = super.parser.accepts("workflow-version", "track the version of workflows")
            .withOptionalArg().ofType(String.class).required();
    this.workflowPathSpec = super.parser
            .accepts("workflow-path", "Schedule workflows at this path on the container host").withOptionalArg()
            .ofType(String.class).required();
    this.totalJobSpec = super.parser.accepts("total-jobs", "Schedule a specific number of test workflows")
            .requiredUnless(iniDirSpec, this.endlessSpec).withRequiredArg().ofType(Integer.class)
            .defaultsTo(Integer.MAX_VALUE);
    this.forceSpec = super.parser.accepts("force", "Force job generation even if hashing is activated");

    parseOptions(argv);

    String iniDir = options.has(iniDirSpec) ? options.valueOf(iniDirSpec) : null;
    String workflowName = options.valueOf(workflowNameSpec);
    String workflowVersion = options.valueOf(workflowVersionSpec);
    String workflowPath = options.valueOf(workflowPathSpec);

    // UTILS OBJECT
    settings = Utilities.parseConfig(configFile);

    // CONFIG
    queueName = settings.getString(Constants.RABBIT_QUEUE_NAME);
    log.info("queue name: " + queueName);
    try {
        // SETUP QUEUE
        this.jchannel = Utilities.setupQueue(settings, queueName + "_orders");
    } catch (TimeoutException ex) {
        throw new RuntimeException(ex);
    }

    if (options.has(iniDirSpec)) {
        // read an array of files
        log.info("scanning: " + iniDir);
        DirectoryScanner scanner = new DirectoryScanner();
        scanner.setIncludes(new String[] { "**/*.ini" });
        scanner.setBasedir(iniDir);
        scanner.setCaseSensitive(false);
        scanner.scan();
        String[] files = scanner.getIncludedFiles();

        // LOOP, ADDING JOBS EVERY FEW MINUTES
        for (String file : files) {
            generateAndQueueJob(iniDir + "/" + file, workflowName, workflowVersion, workflowPath);
        }
    } else if (options.has(endlessSpec) || options.has(totalJobSpec)) {
        // limit
        log.info("running in test mode");
        boolean endless = options.has(endlessSpec);
        int limit = options.valueOf(totalJobSpec);
        for (int i = 0; endless || i < limit; i++) {
            generateAndQueueJob(null, workflowName, workflowVersion, workflowPath);
        }
    }

    try {

        jchannel.getConnection().close(FIVE_SECOND_IN_MILLISECONDS);

    } catch (IOException ex) {
        log.error(ex.toString());
    }

}

From source file:com.cloud.hypervisor.xenserver.resource.CitrixResourceBase.java

public void shutdownVM(final Connection conn, final VM vm, final String vmName) throws XmlRpcException {
    Task task = null;//from   w  w  w.j a v a 2  s  .co m
    try {
        task = vm.cleanShutdownAsync(conn);
        try {
            // poll every 1 seconds , timeout after 10 minutes
            waitForTask(conn, task, 1000, 10 * 60 * 1000);
            checkForSuccess(conn, task);
        } catch (final TimeoutException e) {
            if (vm.getPowerState(conn) == VmPowerState.HALTED) {
                task = null;
                return;
            }
            throw new CloudRuntimeException("Shutdown VM catch HandleInvalid and VM is not in HALTED state");
        }
    } catch (final XenAPIException e) {
        s_logger.debug("Unable to cleanShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to "
                + e.toString());
        try {
            VmPowerState state = vm.getPowerState(conn);
            if (state == VmPowerState.RUNNING) {
                try {
                    vm.hardShutdown(conn);
                } catch (final Exception e1) {
                    s_logger.debug("Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid()
                            + ") due to " + e.toString());
                    state = vm.getPowerState(conn);
                    if (state == VmPowerState.RUNNING) {
                        forceShutdownVM(conn, vm);
                    }
                    return;
                }
            } else if (state == VmPowerState.HALTED) {
                return;
            } else {
                final String msg = "After cleanShutdown the VM status is " + state.toString()
                        + ", that is not expected";
                s_logger.warn(msg);
                throw new CloudRuntimeException(msg);
            }
        } catch (final Exception e1) {
            final String msg = "Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid()
                    + ") due to " + e.toString();
            s_logger.warn(msg, e1);
            throw new CloudRuntimeException(msg);
        }
    } finally {
        if (task != null) {
            try {
                task.destroy(conn);
            } catch (final Exception e1) {
                s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid()
                        + ") due to " + e1.toString());
            }
        }
    }
}

From source file:org.apache.cassandra.service.StorageProxy.java

/**
 * Use this method to have these Mutations applied
 * across all replicas. This method will take care
 * of the possibility of a replica being down and hint
 * the data across to some other replica.
 *
 * @param mutations the mutations to be applied across the replicas
 * @param consistency_level the consistency level for the operation
*//*from   ww  w . j  a  va  2s  .c o  m*/
public static void mutate(List<? extends IMutation> mutations, ConsistencyLevel consistency_level)
        throws UnavailableException, TimeoutException {
    final String localDataCenter = DatabaseDescriptor.getEndpointSnitch()
            .getDatacenter(FBUtilities.getLocalAddress());

    long startTime = System.nanoTime();
    List<IWriteResponseHandler> responseHandlers = new ArrayList<IWriteResponseHandler>();

    IMutation mostRecentMutation = null;
    try {
        for (IMutation mutation : mutations) {
            mostRecentMutation = mutation;
            if (mutation instanceof CounterMutation) {
                responseHandlers.add(mutateCounter((CounterMutation) mutation, localDataCenter));
            } else {
                responseHandlers.add(
                        performWrite(mutation, consistency_level, localDataCenter, standardWritePerformer));
            }
        }
        // wait for writes.  throws timeoutexception if necessary
        for (IWriteResponseHandler responseHandler : responseHandlers) {
            responseHandler.get();
        }
    } catch (TimeoutException ex) {
        if (logger.isDebugEnabled()) {
            List<String> mstrings = new ArrayList<String>();
            for (IMutation mutation : mutations)
                mstrings.add(mutation.toString(true));
            logger.debug("Write timeout {} for one (or more) of: ", ex.toString(), mstrings);
        }
        throw ex;
    } catch (IOException e) {
        assert mostRecentMutation != null;
        throw new RuntimeException("error writing key " + ByteBufferUtil.bytesToHex(mostRecentMutation.key()),
                e);
    } finally {
        writeStats.addNano(System.nanoTime() - startTime);
    }
}

From source file:org.apache.cassandra.service.StorageProxy.java

public static List<Row> currentReadFetchRow(ReadCommand command, ConsistencyLevel consistency_level,
        List<InetAddress> endpoints) throws IOException, UnavailableException, TimeoutException {
    //        List<ReadCallback<Row>> readCallbacks = new ArrayList<ReadCallback<Row>>();
    List<Row> rows = new ArrayList<Row>();

    // send out read requests
    //        for (ReadCommand command: commands)
    //        {/*  ww w.  j a v a2s  . c  om*/
    //            assert !command.isDigestQuery();
    logger.debug("Command/ConsistencyLevel is {}/{}", command, consistency_level);

    //            List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(command.table, command.key);
    //            DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getLocalAddress(), endpoints);

    NullRowResolver resolver = new NullRowResolver(command.table, command.key);
    ReadCallback<Row> handler = getReadCallback(resolver, command, consistency_level, endpoints);
    handler.assureSufficientLiveNodes();
    assert !handler.endpoints.isEmpty();

    // no digestCommand

    for (InetAddress address : handler.endpoints) {
        if (address.equals(FBUtilities.getLocalAddress())) {
            if (logger.isDebugEnabled())
                logger.debug("reading data locally");
            StageManager.getStage(Stage.READ).execute(new LocalReadRunnable(command, handler));
        } else {
            if (logger.isDebugEnabled())
                logger.debug("reading data from " + address);
            MessagingService.instance().sendRR(command, address, handler);
        }
    }

    //            readCallbacks.add(handler);
    //        }

    //        for (int i = 0; i < commands.size(); i++)
    //        {
    //            ReadCallback<Row> handler = readCallbacks.get(i);
    Row row;
    //            ReadCommand command = commands.get(i);
    try {
        long startTime2 = System.currentTimeMillis();
        row = handler.get(); // CL.ONE is special cased here to ignore digests even if some have arrived
        if (row != null)
            rows.add(row);

        if (logger.isDebugEnabled())
            logger.debug("Read: " + (System.currentTimeMillis() - startTime2) + " ms.");
    } catch (TimeoutException ex) {
        if (logger.isDebugEnabled())
            logger.debug("Read timeout: {}", ex.toString());
        throw ex;
    } catch (DigestMismatchException ex) {
        //                if (logger.isDebugEnabled())
        //                    logger.debug("Digest mismatch: {}", ex.toString());
        //                RowRepairResolver resolver = new RowRepairResolver(command.table, command.key);
        //                RepairCallback<Row> repairHandler = new RepairCallback<Row>(resolver, handler.endpoints);
        //                for (InetAddress endpoint : handler.endpoints)
        //                    MessagingService.instance().sendRR(command, endpoint, repairHandler);
        //
        //                if (repairResponseHandlers == null)
        //                    repairResponseHandlers = new ArrayList<RepairCallback<Row>>();
        //                repairResponseHandlers.add(repairHandler);
    }
    //        }

    return rows;
}

From source file:org.apache.cassandra.service.StorageProxy.java

/**
 * This function executes local and remote reads, and blocks for the results:
 *
 * 1. Get the replica locations, sorted by response time according to the snitch
 * 2. Send a data request to the closest replica, and digest requests to either
 *    a) all the replicas, if read repair is enabled
 *    b) the closest R-1 replicas, where R is the number required to satisfy the ConsistencyLevel
 * 3. Wait for a response from R replicas
 * 4. If the digests (if any) match the data return the data
 * 5. else carry out read repair by getting data from all the nodes.
 *///from  w ww.j a  v  a 2s .c  o m
private static List<Row> fetchRows(List<ReadCommand> commands, ConsistencyLevel consistency_level)
        throws IOException, UnavailableException, TimeoutException {
    List<ReadCallback<Row>> readCallbacks = new ArrayList<ReadCallback<Row>>();
    List<Row> rows = new ArrayList<Row>();

    // send out read requests
    for (ReadCommand command : commands) {
        assert !command.isDigestQuery();
        logger.debug("Command/ConsistencyLevel is {}/{}", command, consistency_level);

        List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(command.table,
                command.key);
        DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getLocalAddress(), endpoints);

        RowDigestResolver resolver = new RowDigestResolver(command.table, command.key);
        ReadCallback<Row> handler = getReadCallback(resolver, command, consistency_level, endpoints);
        handler.assureSufficientLiveNodes();
        assert !handler.endpoints.isEmpty();

        // The data-request message is sent to dataPoint, the node that will actually get
        // the data for us. The other replicas are only sent a digest query.
        ReadCommand digestCommand = null;
        if (handler.endpoints.size() > 1) {
            digestCommand = command.copy();
            digestCommand.setDigestQuery(true);
        }

        InetAddress dataPoint = handler.endpoints.get(0);
        if (dataPoint.equals(FBUtilities.getLocalAddress())) {
            if (logger.isDebugEnabled())
                logger.debug("reading data locally");
            StageManager.getStage(Stage.READ).execute(new LocalReadRunnable(command, handler));
        } else {
            if (logger.isDebugEnabled())
                logger.debug("reading data from " + dataPoint);
            MessagingService.instance().sendRR(command, dataPoint, handler);
        }

        // We lazy-construct the digest Message object since it may not be necessary if we
        // are doing a local digest read, or no digest reads at all.
        MessageProducer producer = new CachingMessageProducer(digestCommand);
        for (InetAddress digestPoint : handler.endpoints.subList(1, handler.endpoints.size())) {
            if (digestPoint.equals(FBUtilities.getLocalAddress())) {
                if (logger.isDebugEnabled())
                    logger.debug("reading digest locally");
                StageManager.getStage(Stage.READ).execute(new LocalReadRunnable(digestCommand, handler));
            } else {
                if (logger.isDebugEnabled())
                    logger.debug("reading digest for from " + digestPoint);
                MessagingService.instance().sendRR(producer, digestPoint, handler);
            }
        }

        readCallbacks.add(handler);
    }

    // read results and make a second pass for any digest mismatches
    List<RepairCallback<Row>> repairResponseHandlers = null;
    for (int i = 0; i < commands.size(); i++) {
        ReadCallback<Row> handler = readCallbacks.get(i);
        Row row;
        ReadCommand command = commands.get(i);
        try {
            long startTime2 = System.currentTimeMillis();
            row = handler.get(); // CL.ONE is special cased here to ignore digests even if some have arrived
            if (row != null)
                rows.add(row);

            if (logger.isDebugEnabled())
                logger.debug("Read: " + (System.currentTimeMillis() - startTime2) + " ms.");
        } catch (TimeoutException ex) {
            if (logger.isDebugEnabled())
                logger.debug("Read timeout: {}", ex.toString());
            throw ex;
        } catch (DigestMismatchException ex) {
            if (logger.isDebugEnabled())
                logger.debug("Digest mismatch: {}", ex.toString());
            RowRepairResolver resolver = new RowRepairResolver(command.table, command.key);
            RepairCallback<Row> repairHandler = new RepairCallback<Row>(resolver, handler.endpoints);
            for (InetAddress endpoint : handler.endpoints)
                MessagingService.instance().sendRR(command, endpoint, repairHandler);

            if (repairResponseHandlers == null)
                repairResponseHandlers = new ArrayList<RepairCallback<Row>>();
            repairResponseHandlers.add(repairHandler);
        }
    }

    // read the results for the digest mismatch retries
    if (repairResponseHandlers != null) {
        for (RepairCallback<Row> handler : repairResponseHandlers) {
            try {
                Row row = handler.get();
                if (row != null)
                    rows.add(row);
            } catch (DigestMismatchException e) {
                throw new AssertionError(e); // full data requested from each node here, no digests should be sent
            }
        }
    }

    return rows;
}