Example usage for java.util.concurrent Future cancel

List of usage examples for java.util.concurrent Future cancel

Introduction

In this page you can find the example usage for java.util.concurrent Future cancel.

Prototype

boolean cancel(boolean mayInterruptIfRunning);

Source Link

Document

Attempts to cancel execution of this task.

Usage

From source file:com.opengamma.financial.portfolio.save.SavePortfolio.java

private void populatePositionMapCache(final PortfolioNode node) {
    final List<Future<Pair<UniqueId, ObjectId>>> futures = new LinkedList<Future<Pair<UniqueId, ObjectId>>>();
    PortfolioNodeTraverser.depthFirst(new AbstractPortfolioNodeTraversalCallback() {
        @Override/*from   w  ww  . ja  v  a  2 s .  c  o  m*/
        public void preOrderOperation(final PortfolioNode parentNode, final Position position) {
            final ExternalId positionId = position.getUniqueId().toExternalId();
            ObjectId id = s_cache.get(positionId);
            if (id == null) {
                futures.add(_executor.submit(new Callable<Pair<UniqueId, ObjectId>>() {
                    @Override
                    public Pair<UniqueId, ObjectId> call() throws Exception {
                        final PositionSearchRequest searchRequest = new PositionSearchRequest();
                        searchRequest.setPositionProviderId(positionId);
                        final PositionSearchResult searchResult = _positions.search(searchRequest);
                        ObjectId id = null;
                        if (searchResult.getFirstPosition() != null) {
                            id = searchResult.getFirstPosition().getUniqueId().getObjectId();
                            s_logger.debug("Found position {} in master at {}", position, id);
                        }
                        if (id == null) {
                            s_cache.putIfAbsent(positionId, MISSING);
                        } else {
                            s_cache.putIfAbsent(positionId, id);
                        }
                        return Pair.of(position.getUniqueId(), id);
                    }
                }));
            } else if (id == MISSING) {
                _positionMap.put(position.getUniqueId(), null);
            } else {
                _positionMap.put(position.getUniqueId(), id);
            }
        }
    }).traverse(node);
    if (futures.isEmpty()) {
        return;
    }
    s_logger.info("{} operations to populate cache", futures.size());
    Iterator<Future<Pair<UniqueId, ObjectId>>> futureItr = futures.iterator();
    while (futureItr.hasNext()) {
        final Future<Pair<UniqueId, ObjectId>> future = futureItr.next();
        try {
            final Pair<UniqueId, ObjectId> value = future.get();
            futureItr.remove();
            _positionMap.put(value.getFirst(), value.getSecond());
        } catch (final InterruptedException e) {
            s_logger.warn("Interrupted", e);
            break;
        } catch (final ExecutionException e) {
            s_logger.warn("Exception", e);
            break;
        }
    }
    futureItr = futures.iterator();
    while (futureItr.hasNext()) {
        final Future<?> future = futureItr.next();
        future.cancel(false);
    }
}

From source file:com.ctrip.infosec.rule.executor.RulesExecutorService.java

/**
 * //from   www . j  av a  2  s.  c om
 */
void executeParallel(RiskFact fact) {

    // matchRules        
    List<Rule> matchedRules = Configs.matchRules(fact, false);

    TraceLogger.traceLog("? " + matchedRules.size() + " ? ...");
    List<Callable<RuleExecuteResultWithEvent>> runs = Lists.newArrayList();
    for (Rule rule : matchedRules) {
        final RiskFact factCopy = BeanMapper.copy(fact, RiskFact.class);

        // set default result
        if (!Constants.eventPointsWithScene.contains(factCopy.eventPoint)) {
            Map<String, Object> defaultResult = Maps.newHashMap();
            defaultResult.put(Constants.riskLevel, 0);
            defaultResult.put(Constants.riskMessage, "PASS");
            factCopy.results.put(rule.getRuleNo(), defaultResult);
        }

        final StatelessRuleEngine statelessRuleEngine = SpringContextHolder.getBean(StatelessRuleEngine.class);
        final String packageName = rule.getRuleNo();
        final String _logPrefix = Contexts.getLogPrefix();
        final String _traceLoggerParentTransId = TraceLogger.getTransId();

        try {
            // add current execute ruleNo before execution
            factCopy.ext.put(Constants.key_ruleNo, rule.getRuleNo());
            factCopy.ext.put(Constants.key_isAsync, false);

            runs.add(new Callable<RuleExecuteResultWithEvent>() {

                @Override
                public RuleExecuteResultWithEvent call() throws Exception {
                    RuleMonitorHelper.newTrans(factCopy, RuleMonitorType.RULE, packageName);
                    TraceLogger.beginTrans(factCopy.eventId);
                    TraceLogger.setParentTransId(_traceLoggerParentTransId);
                    TraceLogger.setLogPrefix("[" + packageName + "]");
                    Contexts.setPolicyOrRuleNo(packageName);
                    try {
                        long start = System.currentTimeMillis();
                        // remove current execute ruleNo when finished execution.
                        statelessRuleEngine.execute(packageName, factCopy);

                        long handlingTime = System.currentTimeMillis() - start;

                        if (!Constants.eventPointsWithScene.contains(factCopy.eventPoint)) {

                            Map<String, Object> resultWithScene = factCopy.resultsGroupByScene.get(packageName);
                            if (resultWithScene != null) {
                                resultWithScene.put(Constants.async, false);
                                resultWithScene.put(Constants.timeUsage, handlingTime);

                                TraceLogger.traceLog(">>>> [" + packageName
                                        + "] : [???] riskLevel = "
                                        + resultWithScene.get(Constants.riskLevel) + ", riskMessage = "
                                        + resultWithScene.get(Constants.riskMessage) + ", riskScene = "
                                        + resultWithScene.get(Constants.riskScene) + ", usage = "
                                        + resultWithScene.get(Constants.timeUsage) + "ms");
                            }

                            Map<String, Object> result = factCopy.results.get(packageName);
                            if (result != null) {
                                result.put(Constants.async, false);
                                result.put(Constants.timeUsage, handlingTime);

                                TraceLogger.traceLog(">>>> [" + packageName + "] : riskLevel = "
                                        + result.get(Constants.riskLevel) + ", riskMessage = "
                                        + result.get(Constants.riskMessage) + ", usage = "
                                        + result.get(Constants.timeUsage) + "ms");
                            }

                        } else {

                            Map<String, Object> result = factCopy.results.get(packageName);
                            if (result != null) {
                                result.put(Constants.async, false);
                                result.put(Constants.timeUsage, handlingTime);
                                int riskLevel = MapUtils.getIntValue(result, Constants.riskLevel, 0);
                                if (riskLevel > 0) {
                                    TraceLogger.traceLog(">>>> [" + packageName
                                            + "] [?]: [??] riskLevel = "
                                            + result.get(Constants.riskLevel) + ", riskMessage = "
                                            + result.get(Constants.riskMessage) + ", usage = "
                                            + result.get(Constants.timeUsage) + "ms");
                                }
                            }

                            Map<String, Object> resultWithScene = factCopy.resultsGroupByScene.get(packageName);
                            if (resultWithScene != null) {
                                resultWithScene.put(Constants.async, false);
                                resultWithScene.put(Constants.timeUsage, handlingTime);

                                TraceLogger.traceLog(
                                        ">>>> [" + packageName + "] [?]: riskLevel = "
                                                + resultWithScene.get(Constants.riskLevel) + ", riskMessage = "
                                                + resultWithScene.get(Constants.riskMessage) + ", riskScene = "
                                                + resultWithScene.get(Constants.riskScene) + ", usage = "
                                                + resultWithScene.get(Constants.timeUsage) + "ms");
                            } else {
                                TraceLogger.traceLog(">>>> [" + packageName
                                        + "] [?]: ?");
                            }
                        }
                        return new RuleExecuteResultWithEvent(packageName, factCopy.results,
                                factCopy.resultsGroupByScene, factCopy.eventBody, factCopy.ext);
                    } catch (Exception e) {
                        logger.warn(_logPrefix + ". packageName: " + packageName, e);
                    } finally {
                        TraceLogger.commitTrans();
                        RuleMonitorHelper.commitTrans2Trunk(factCopy);
                        Contexts.clearLogPrefix();
                    }
                    return null;
                }

            });

        } catch (Throwable ex) {
            logger.warn(_logPrefix + ". packageName: " + packageName, ex);
        }

    }
    List<RuleExecuteResultWithEvent> rawResult = new ArrayList<RuleExecuteResultWithEvent>();
    try {
        List<Future<RuleExecuteResultWithEvent>> results = ParallelExecutorHolder.excutor.invokeAll(runs,
                timeout, TimeUnit.MILLISECONDS);
        for (Future f : results) {
            try {
                if (f.isDone()) {
                    RuleExecuteResultWithEvent r = (RuleExecuteResultWithEvent) f.get();
                    rawResult.add(r);
                } else {
                    f.cancel(true);
                }
            } catch (Exception e) {
                // ignored
            }
        }
    } catch (Exception e) {
        // ignored
    }
    if (rawResult.size() > 0) {
        for (RuleExecuteResultWithEvent item : rawResult) {
            // merge eventBody
            if (item.getEventBody() != null) {
                for (String key : item.getEventBody().keySet()) {
                    Object value = item.getEventBody().get(key);
                    if (!fact.eventBody.containsKey(key) && value != null) {
                        fact.eventBody.put(key, value);
                    }
                }
            }
            // merge ext
            if (item.getExt() != null) {
                for (String key : item.getExt().keySet()) {
                    Object value = item.getExt().get(key);
                    if (!fact.ext.containsKey(key) && value != null) {
                        fact.ext.put(key, value);
                    }
                }
            }
            // merge results
            if (item.getResults() != null) {
                fact.results.putAll(item.getResults());
            }
            // merge resultsGroupByScene
            if (item.getResultsGroupByScene() != null) {
                fact.resultsGroupByScene.putAll(item.getResultsGroupByScene());
            }
        }
    }
}

From source file:com.metamx.druid.client.cache.MemcachedCache.java

@Override
public byte[] get(NamedKey key) {
    Future<Object> future;
    try {//from w ww  .  ja v a2  s .c om
        future = client.asyncGet(computeKeyHash(memcachedPrefix, key));
    } catch (IllegalStateException e) {
        // operation did not get queued in time (queue is full)
        errorCount.incrementAndGet();
        log.warn(e, "Unable to queue cache operation");
        return null;
    }
    try {
        byte[] bytes = (byte[]) future.get(timeout, TimeUnit.MILLISECONDS);
        if (bytes != null) {
            hitCount.incrementAndGet();
        } else {
            missCount.incrementAndGet();
        }
        return bytes == null ? null : deserializeValue(key, bytes);
    } catch (TimeoutException e) {
        timeoutCount.incrementAndGet();
        future.cancel(false);
        return null;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw Throwables.propagate(e);
    } catch (ExecutionException e) {
        errorCount.incrementAndGet();
        log.warn(e, "Exception pulling item from cache");
        return null;
    }
}

From source file:com.orange.clara.cloud.servicedbdumper.integrations.AbstractIntegrationTest.java

public boolean isFinishedAction(String serviceInstanceId) {

    ExecutorService executor = Executors.newCachedThreadPool();
    Callable<Boolean> task = () -> {
        while (true) {
            ServiceInstance serviceInstance = dbDumperServiceInstanceService
                    .getServiceInstance(serviceInstanceId);
            ServiceInstanceLastOperation lastOperation = serviceInstance.getServiceInstanceLastOperation();
            switch (lastOperation.getState()) {
            case "succeeded":
                return true;
            case "in progress":
                break;
            case "failed":
            case "internal error":
                return false;
            }/*from  ww  w  .ja  v  a 2s.  co  m*/
            Thread.sleep(5000L);// we yield the task for 5seconds to let the service do is work (actually, Cloud Controller hit getServiceInstance every 30sec)
        }
    };
    Future<Boolean> future = executor.submit(task);
    try {
        Boolean result = future.get(timeoutAction, TimeUnit.MINUTES);
        return result;
    } catch (Exception ex) {
        future.cancel(true);
        fail("Timeout reached.", ex);
    }
    return false;
}

From source file:org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer.java

private void doCheckpoint() throws InterruptedException, IOException {
    assert canceler != null;
    final long txid;
    final NameNodeFile imageType;

    // Acquire cpLock to make sure no one is modifying the name system.
    // It does not need the full namesystem write lock, since the only thing
    // that modifies namesystem on standby node is edit log replaying.
    namesystem.cpLockInterruptibly();/* w w w . jav  a2  s  . c o  m*/
    try {
        assert namesystem.getEditLog()
                .isOpenForRead() : "Standby Checkpointer should only attempt a checkpoint when "
                        + "NN is in standby mode, but the edit logs are in an unexpected state";

        FSImage img = namesystem.getFSImage();

        long prevCheckpointTxId = img.getStorage().getMostRecentCheckpointTxId();
        long thisCheckpointTxId = img.getLastAppliedOrWrittenTxId();
        assert thisCheckpointTxId >= prevCheckpointTxId;
        if (thisCheckpointTxId == prevCheckpointTxId) {
            LOG.info("A checkpoint was triggered but the Standby Node has not "
                    + "received any transactions since the last checkpoint at txid " + thisCheckpointTxId
                    + ". Skipping...");
            return;
        }

        if (namesystem.isRollingUpgrade() && !namesystem.getFSImage().hasRollbackFSImage()) {
            // if we will do rolling upgrade but have not created the rollback image
            // yet, name this checkpoint as fsimage_rollback
            imageType = NameNodeFile.IMAGE_ROLLBACK;
        } else {
            imageType = NameNodeFile.IMAGE;
        }
        img.saveNamespace(namesystem, imageType, canceler);
        txid = img.getStorage().getMostRecentCheckpointTxId();
        assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" + thisCheckpointTxId
                + " but instead saved at txid=" + txid;

        // Save the legacy OIV image, if the output dir is defined.
        String outputDir = checkpointConf.getLegacyOivImageDir();
        if (outputDir != null && !outputDir.isEmpty()) {
            img.saveLegacyOIVImage(namesystem, outputDir, canceler);
        }
    } finally {
        namesystem.cpUnlock();
    }

    // Upload the saved checkpoint back to the active
    // Do this in a separate thread to avoid blocking transition to active
    // See HDFS-4816
    ExecutorService executor = Executors.newSingleThreadExecutor(uploadThreadFactory);
    Future<Void> upload = executor.submit(new Callable<Void>() {
        @Override
        public Void call() throws IOException {
            TransferFsImage.uploadImageFromStorage(activeNNAddress, conf, namesystem.getFSImage().getStorage(),
                    imageType, txid, canceler);
            return null;
        }
    });
    executor.shutdown();
    try {
        upload.get();
    } catch (InterruptedException e) {
        // The background thread may be blocked waiting in the throttler, so
        // interrupt it.
        upload.cancel(true);
        throw e;
    } catch (ExecutionException e) {
        throw new IOException("Exception during image upload: " + e.getMessage(), e.getCause());
    }
}

From source file:com.palantir.paxos.PaxosConsensusFastTest.java

@Test
public void loseQuorumDiffToken() throws InterruptedException {
    for (int i = QUORUM_SIZE; i < NUM_POTENTIAL_LEADERS; i++) {
        state.goDown(i);/*from  ww w .j  ava  2s  . c  om*/
    }
    LeadershipToken t = state.gainLeadership(0);
    state.goDown(QUORUM_SIZE - 1);
    ExecutorService exec = PTExecutors.newSingleThreadExecutor();
    Future<Void> f = exec.submit(new Callable<Void>() {
        @Override
        public Void call() {
            int i = QUORUM_SIZE - 1;
            while (!Thread.currentThread().isInterrupted()) {
                int next = i + 1;
                if (next == NUM_POTENTIAL_LEADERS) {
                    next = QUORUM_SIZE - 1;
                }
                state.goDown(next);
                state.comeUp(i);
                i = next;
            }
            return null;
        }
    });
    // Don't check leadership immediately after gaining it, since quorum might get lost.
    LeadershipToken token2 = state.gainLeadershipWithoutCheckingAfter(0);
    assertTrue("leader can confirm leadership with quorum", t.sameAs(token2));
    f.cancel(true);
    exec.shutdown();
    exec.awaitTermination(10, TimeUnit.SECONDS);
    for (int i = 0; i < NUM_POTENTIAL_LEADERS; i++) {
        state.comeUp(i);
    }
}

From source file:org.hippoecm.repository.quartz.JCRJobStore.java

private boolean stopLockKeepAlive(final String identifier) {
    final Future<?> future = keepAlives.remove(identifier);
    if (future != null) {
        return future.cancel(true);
    }/* w  w w.  j  a v a2 s . c o m*/
    return false;
}

From source file:com.amazonaws.mobileconnectors.s3.transferutility.UploadTask.java

private Boolean uploadMultipartAndWaitForCompletion() throws ExecutionException {
    /*/*from ww w . ja v  a2  s  .c o  m*/
     * For a new multipart upload, upload.mMultipartId should be null. If
     * it's a resumed upload, upload.mMultipartId would not be null.
     */
    long bytesAlreadyTransferrd = 0;
    if (upload.multipartId == null || upload.multipartId.isEmpty()) {
        final PutObjectRequest putObjectRequest = createPutObjectRequest(upload);
        TransferUtility.appendMultipartTransferServiceUserAgentString(putObjectRequest);
        try {
            upload.multipartId = initiateMultipartUpload(putObjectRequest);
        } catch (final AmazonClientException ace) {
            LOGGER.error("Error initiating multipart upload: " + upload.id + " due to " + ace.getMessage(),
                    ace);
            updater.throwError(upload.id, ace);
            updater.updateState(upload.id, TransferState.FAILED);
            return false;
        }
        dbUtil.updateMultipartId(upload.id, upload.multipartId);
    } else {
        /*
         * For a resumed upload, we should calculate the bytes already
         * transferred.
         */
        bytesAlreadyTransferrd = dbUtil.queryBytesTransferredByMainUploadId(upload.id);
        if (bytesAlreadyTransferrd > 0) {
            LOGGER.debug(String.format("Resume transfer %d from %d bytes", upload.id, bytesAlreadyTransferrd));
        }
    }
    updater.updateProgress(upload.id, bytesAlreadyTransferrd, upload.bytesTotal);

    final List<UploadPartRequest> requestList = dbUtil.getNonCompletedPartRequestsFromDB(upload.id,
            upload.multipartId);
    LOGGER.debug("multipart upload " + upload.id + " in " + requestList.size() + " parts.");
    final ArrayList<Future<Boolean>> futures = new ArrayList<Future<Boolean>>();
    for (final UploadPartRequest request : requestList) {
        TransferUtility.appendMultipartTransferServiceUserAgentString(request);
        request.setGeneralProgressListener(updater.newProgressListener(upload.id));
        futures.add(TransferThreadPool.submitTask(new UploadPartTask(request, s3, dbUtil, networkInfo)));
    }
    try {
        boolean isSuccess = true;
        /*
         * Future.get() will block the current thread until the method
         * returns.
         */
        for (final Future<Boolean> f : futures) {
            // UploadPartTask returns false when it's interrupted by user
            // and the state is set by caller
            final boolean b = f.get();
            isSuccess &= b;
        }
        if (!isSuccess) {
            return false;
        }
    } catch (final InterruptedException e) {
        /*
         * Future.get() will catch InterruptedException, but it's not a
         * failure, it may be caused by a pause operation from applications.
         */
        for (final Future<?> f : futures) {
            f.cancel(true);
        }
        // abort by user
        LOGGER.debug("Transfer " + upload.id + " is interrupted by user");
        return false;
    } catch (final ExecutionException ee) {
        // handle pause, cancel, etc
        boolean isNetworkInterrupted = false;
        if (ee.getCause() != null && ee.getCause() instanceof Exception) {
            // check for network interruption and pause the transfer instead of failing them
            isNetworkInterrupted = dbUtil.checkWaitingForNetworkPartRequestsFromDB(upload.id);
            if (isNetworkInterrupted) {
                LOGGER.debug("Network Connection Interrupted: Transfer " + upload.id + " waits for network");
                updater.updateState(upload.id, TransferState.WAITING_FOR_NETWORK);
                return false;
            }
            final Exception e = (Exception) ee.getCause();
            if (RetryUtils.isInterrupted(e)) {
                /*
                 * thread is interrupted by user. don't update the state as
                 * it's set by caller who interrupted
                 */
                LOGGER.debug("Transfer " + upload.id + " is interrupted by user");
                return false;
            } else if (e.getCause() != null && e.getCause() instanceof IOException
                    && !networkInfo.isNetworkConnected()) {
                LOGGER.debug("Transfer " + upload.id + " waits for network");
                updater.updateState(upload.id, TransferState.WAITING_FOR_NETWORK);
            }
            updater.throwError(upload.id, e);
        }
        updater.updateState(upload.id, TransferState.FAILED);
        return false;
    }

    try {
        completeMultiPartUpload(upload.id, upload.bucketName, upload.key, upload.multipartId);
        updater.updateProgress(upload.id, upload.bytesTotal, upload.bytesTotal);
        updater.updateState(upload.id, TransferState.COMPLETED);
        return true;
    } catch (final AmazonClientException ace) {
        LOGGER.error("Failed to complete multipart: " + upload.id + " due to " + ace.getMessage(), ace);
        updater.throwError(upload.id, ace);
        updater.updateState(upload.id, TransferState.FAILED);
        return false;
    }
}

From source file:org.apache.hadoop.hbase.procedure.ProcedureCoordinator.java

/**
 * Submit an procedure to kick off its dependent subprocedures.
 * @param proc Procedure to execute//  ww w.j a  v a 2 s  .  c  o m
 * @return <tt>true</tt> if the procedure was started correctly, <tt>false</tt> if the
 *         procedure or any subprocedures could not be started.  Failure could be due to
 *         submitting a procedure multiple times (or one with the same name), or some sort
 *         of IO problem.  On errors, the procedure's monitor holds a reference to the exception
 *         that caused the failure.
 */
boolean submitProcedure(Procedure proc) {
    // if the submitted procedure was null, then we don't want to run it
    if (proc == null) {
        return false;
    }
    String procName = proc.getName();

    // make sure we aren't already running a procedure of that name
    synchronized (procedures) {
        Procedure oldProc = procedures.get(procName);
        if (oldProc != null) {
            // procedures are always eventually completed on both successful and failed execution
            try {
                if (!oldProc.isCompleted()) {
                    LOG.warn("Procedure " + procName + " currently running.  Rejecting new request");
                    return false;
                } else {
                    LOG.debug("Procedure " + procName
                            + " was in running list but was completed.  Accepting new attempt.");
                    procedures.remove(procName);
                }
            } catch (ForeignException e) {
                LOG.debug("Procedure " + procName
                        + " was in running list but has exception.  Accepting new attempt.");
                procedures.remove(procName);
            }
        }
    }

    // kick off the procedure's execution in a separate thread
    Future<Void> f = null;
    try {
        synchronized (procedures) {
            this.procedures.put(procName, proc);
            f = this.pool.submit(proc);
        }
        return true;
    } catch (RejectedExecutionException e) {
        LOG.warn("Procedure " + procName + " rejected by execution pool.  Propagating error and "
                + "cancelling operation.", e);
        // Remove the procedure from the list since is not started
        this.procedures.remove(procName);
        // the thread pool is full and we can't run the procedure
        proc.receive(new ForeignException(procName, e));

        // cancel procedure proactively
        if (f != null) {
            f.cancel(true);
        }
    }
    return false;
}

From source file:org.mule.modules.sqs.SQSConnector.java

/**
 * Attempts to receive messages from a queue. Every attribute of the incoming
 * messages will be added as inbound properties. Also the following properties
 * will also be added://from w  w  w .ja va2  s.c  o  m
 * <p/>
 * sqs.message.id = containing the message identification
 * sqs.message.receipt.handle = containing the message identification
 * <p/>
 * {@sample.xml ../../../doc/mule-module-sqs.xml.sample sqs:receive-messages}
 *
 * @param callback          Callback to call when new messages are available.
 * @param visibilityTimeout the duration (in seconds) the retrieved messages are hidden from
 *                          subsequent calls to retrieve.
 * @param preserveMessages  Flag that indicates if you want to preserve the messages
 *                          in the queue. False by default, so the messages are
 *                          going to be deleted.
 * @param pollPeriod        Deprecated. Time in milliseconds to wait between polls (when no messages were retrieved).
 *                          Default period is 1000 ms.
 * @param numberOfMessages  the number of messages to be retrieved on each call (10 messages max).
 *                          By default, 1 message will be retrieved.
 * @param queueUrl          the queue URL where messages are to be fetched from.
 * @throws AmazonClientException  If any internal errors are encountered inside the client while
 *                                attempting to make the request or handle the response.  For example
 *                                if a network connection is not available.
 * @throws AmazonServiceException If an error response is returned by AmazonSQS indicating
 *                                either a problem with the data in the request, or a server side issue.
 */
@Source
public void receiveMessages(SourceCallback callback, @Default("30") Integer visibilityTimeout,
        @Default("false") Boolean preserveMessages, @Optional Long pollPeriod,
        @Default("1") Integer numberOfMessages, @Optional String queueUrl) throws AmazonServiceException {
    if (pollPeriod != null) {
        logger.warn(
                "The pollPeriod parameter has been deprecated and will be removed in future versions of this "
                        + "connector. Messages are received asynchronously, not by polling SQS.");
    }

    ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest().withAttributeNames("All")
            .withMessageAttributeNames("All");
    receiveMessageRequest.setQueueUrl(getQueueUrl(queueUrl));

    if (visibilityTimeout != null) {
        receiveMessageRequest.setVisibilityTimeout(visibilityTimeout);
    }
    receiveMessageRequest.setMaxNumberOfMessages(numberOfMessages);

    while (!Thread.currentThread().isInterrupted()) {
        Future<ReceiveMessageResult> futureMessages = msgQueueAsync.receiveMessageAsync(receiveMessageRequest);
        try {
            List<Message> receivedMessages = futureMessages.get().getMessages();
            for (Message m : receivedMessages) {
                try {
                    callback.process(m.getBody(), createProperties(m));
                } catch (Exception e) {
                    // If an exception is thrown here, we cannot communicate
                    // with the Mule flow, so there is nothing we can do to
                    // handle it.
                    futureMessages.cancel(true);
                    return;
                }
                if (!preserveMessages) {
                    msgQueueAsync.deleteMessageAsync(
                            new DeleteMessageRequest(getQueueUrl(queueUrl), m.getReceiptHandle()));
                }
            }
        } catch (InterruptedException e) {
            futureMessages.cancel(true);
            return;
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
    }
}