List of usage examples for com.amazonaws.services.sqs.model DeleteMessageBatchRequest DeleteMessageBatchRequest
public DeleteMessageBatchRequest()
From source file:com.allogy.amazonaws.elasticbeanstalk.worker.simulator.application.QueueManager.java
License:Apache License
public void deleteMessages(Stream<MessageWrapper> messages) { List<DeleteMessageBatchRequestEntry> deleteEntries = messages .map(MessageWrapper::getMessage).map(m -> new DeleteMessageBatchRequestEntry() .withId(m.getMessageId()).withReceiptHandle(m.getReceiptHandle())) .collect(Collectors.toList()); if (deleteEntries.isEmpty()) return;/*from w w w. jav a2 s. com*/ DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest().withQueueUrl(queueUrl) .withEntries(deleteEntries); logger.debug("About to delete {} messages from queue. queueUrl={}", deleteEntries.size(), queueUrl); amazonSQS.deleteMessageBatch(deleteRequest); }
From source file:com.eucalyptus.portal.SimpleQueueClientManager.java
License:Open Source License
public List<Message> receiveAllMessages(final String queueName, final boolean shouldDelete) throws Exception { try {/*w w w .j a v a 2s. c om*/ final int visibilityTimeout = 600; final int visibilityBuffer = 300; final long startTime = System.currentTimeMillis(); final List<Message> messages = Lists.newArrayList(); while ((System.currentTimeMillis() - startTime) < ((visibilityTimeout - visibilityBuffer) * 1000L)) { final ReceiveMessageRequest req = new ReceiveMessageRequest(); req.setQueueUrl(getQueueUrl(queueName)); req.setMaxNumberOfMessages(10); req.setWaitTimeSeconds(0); req.setVisibilityTimeout(visibilityTimeout); final ReceiveMessageResult result = getSimpleQueueClient().receiveMessage(req); final List<Message> received = result.getMessages(); if (received == null || received.size() <= 0) break; messages.addAll(received); } // TODO: Use PurgeQueue if (shouldDelete) { for (final List<Message> partition : Iterables.partition(messages, 10)) { final DeleteMessageBatchRequest delReq = new DeleteMessageBatchRequest(); delReq.setQueueUrl(getQueueUrl(queueName)); delReq.setEntries(partition.stream().map(m -> new DeleteMessageBatchRequestEntry() .withId(m.getMessageId()).withReceiptHandle(m.getReceiptHandle())) .collect(Collectors.toList())); getSimpleQueueClient().deleteMessageBatch(delReq); } } return messages; } catch (final AmazonServiceException ex) { throw new Exception("Failed to receive messages due to service error", ex); } catch (final AmazonClientException ex) { throw new Exception("Failed to receive messages due to client error", ex); } }
From source file:com.netflix.bdp.s3mper.alert.impl.AlertJanitor.java
License:Apache License
private void delete(String queue, List<Message> messages) { List<DeleteMessageBatchRequestEntry> deleteRequests = new ArrayList<DeleteMessageBatchRequestEntry>(); for (Message m : messages) { deleteRequests.add(new DeleteMessageBatchRequestEntry().withId(m.getMessageId()) .withReceiptHandle(m.getReceiptHandle())); }/* w ww . j a v a 2 s .com*/ log.info(format("Deleting %s messages", deleteRequests.size())); DeleteMessageBatchRequest batchDelete = new DeleteMessageBatchRequest(); batchDelete.setQueueUrl(queue); batchDelete.setEntries(deleteRequests); sqs.deleteMessageBatch(batchDelete); }
From source file:com.netflix.conductor.contribs.queue.sqs.SQSObservableQueue.java
License:Apache License
private List<String> delete(List<Message> messages) { if (messages == null || messages.isEmpty()) { return null; }/*from w ww . j a v a2s.c o m*/ DeleteMessageBatchRequest batch = new DeleteMessageBatchRequest().withQueueUrl(queueURL); List<DeleteMessageBatchRequestEntry> entries = batch.getEntries(); messages.stream().forEach(m -> entries .add(new DeleteMessageBatchRequestEntry().withId(m.getId()).withReceiptHandle(m.getReceipt()))); DeleteMessageBatchResult result = client.deleteMessageBatch(batch); List<String> failures = result.getFailed().stream().map(fm -> fm.getId()).collect(Collectors.toList()); logger.debug("failed to delete: {}", failures); return failures; }
From source file:com.pinterest.teletraan.worker.LaunchEventCollector.java
License:Apache License
public void collectEvents() throws Exception { while (true) { ReceiveMessageRequest request = new ReceiveMessageRequest(); request.setMaxNumberOfMessages(10); ReceiveMessageResult result = sqsClient.receiveMessage(request); List<Message> messageList = result.getMessages(); if (messageList.isEmpty()) { LOG.info("No more Launch activity available at the moment."); return; }// w w w.java2 s . c o m LOG.info(String.format("Collect %d events from AWS SQS.", messageList.size())); ArrayList<DeleteMessageBatchRequestEntry> entries = new ArrayList<>(); for (Message message : messageList) { try { boolean hasProcessed = processMessage(message); if (hasProcessed) { DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry(); entry.setId(message.getMessageId()); entry.setReceiptHandle(message.getReceiptHandle()); entries.add(entry); } } catch (Exception ex) { LOG.error("Failed to process SQS message:", message, ex); } } if (!entries.isEmpty()) { DeleteMessageBatchRequest deleteMessageBatchRequest = new DeleteMessageBatchRequest(); deleteMessageBatchRequest.setEntries(entries); LOG.debug(String.format("Successful process %d messages, deleting them from SQS.", entries.size())); sqsClient.deleteMessageBatch(deleteMessageBatchRequest); } } }
From source file:com.plumbee.flume.source.sqs.BatchConsumer.java
License:Apache License
@Override public void run() { // Initialize variables. receiveMessageRequest = new ReceiveMessageRequest(); receiveMessageRequest.setQueueUrl(queueURL); receiveMessageRequest.setWaitTimeSeconds(queueRecvPollingTimeout); receiveMessageRequest.setVisibilityTimeout(queueRecvVisabilityTimeout); receiveMessageRequest.withAttributeNames(SQS_ATTR_SENTTIMESTAMP); receiveMessageRequest.withAttributeNames(SQS_ATTR_APPROXRECEIVECOUNT); deleteMessageBatchRequest = new DeleteMessageBatchRequest(); deleteMessageBatchRequest.setQueueUrl(receiveMessageRequest.getQueueUrl()); batchEventList = Lists.newArrayListWithCapacity(batchSize); batchDeleteRequestEntries = Lists.newArrayListWithCapacity(batchSize); // Process loop. Adapted from PollableSourceRunner, required to // bypass hardcoded values for maxBackOffSleep and // backOffSleepIncrement. while (!Thread.currentThread().isInterrupted()) { sourceCounter.incrementRunnerPollCount(); try {/*from www. jav a 2s .c om*/ if (process().equals(Status.BACKOFF)) { sourceCounter.incrementRunnerBackoffCount(); consecutiveBackOffs++; Thread.sleep(Math.min(consecutiveBackOffs * backOffSleepIncrement, maxBackOffSleep)); } else { consecutiveBackOffs = 0; } continue; } catch (AbortedException e) { sourceCounter.incrementRunnerInterruptCount(); Thread.currentThread().interrupt(); break; } catch (InterruptedException e) { sourceCounter.incrementRunnerInterruptCount(); Thread.currentThread().interrupt(); break; } catch (EventDeliveryException e) { sourceCounter.incrementRunnerDeliveryExceptionCount(); LOGGER.error("Unable to deliver event, sleeping for " + MAX_BACKOFF_SLEEP + "ms", e); } catch (ChannelException e) { sourceCounter.incrementRunnerChannelExceptionCount(); LOGGER.warn("Channel exception, sleeping for " + MAX_BACKOFF_SLEEP + "ms", e); } catch (Exception e) { sourceCounter.incrementRunnerUnhandledExceptionCount(); LOGGER.error("Unhandled exception, sleeping for " + MAX_BACKOFF_SLEEP + "ms", e); } // An exception occurred, commence throttling (max penalty). try { Thread.sleep(MAX_BACKOFF_SLEEP); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } LOGGER.info("AmazonSQS consumer interrupted, {} messages in flight", batchDeleteRequestEntries.size()); }
From source file:com.streamsets.pipeline.stage.origin.sqs.SqsConsumerWorkerCallable.java
License:Apache License
private void batchFlushHelper(boolean startNew) throws StageException { if (batchContext != null) { context.processBatch(batchContext); if (!context.isPreview() && commitQueueUrlsToMessages.size() > 0) { for (String queueUrl : commitQueueUrlsToMessages.keySet()) { DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest() .withQueueUrl(queueUrl); List<DeleteMessageBatchRequestEntry> deleteRequestEntries = new LinkedList<>(); commitQueueUrlsToMessages.get(queueUrl).forEach(message -> { deleteRequestEntries.add(new DeleteMessageBatchRequestEntry() .withReceiptHandle(message.getReceiptHandle()).withId(message.getMessageId())); // TODO: need to add receiptHandle, and figure out what that is });/*from w ww . ja va 2 s. c o m*/ deleteRequest.setEntries(deleteRequestEntries); Future<DeleteMessageBatchResult> deleteResultFuture = sqsAsync .deleteMessageBatchAsync(deleteRequest); try { DeleteMessageBatchResult deleteResult = deleteResultFuture.get(); if (deleteResult.getFailed() != null) { deleteResult.getFailed().forEach(failed -> LOG.error( "Failed to delete message ID {} from queue %s with code {}, sender fault {}", failed.getId(), queueUrl, failed.getCode(), failed.getSenderFault())); } if (LOG.isDebugEnabled()) { if (deleteResult.getSuccessful() != null) { deleteResult.getSuccessful() .forEach(success -> LOG.debug( "Successfully deleted message ID {} from queue {}", success.getId(), queueUrl)); } } } catch (InterruptedException e) { LOG.error("InterruptedException trying to delete SQS messages with IDs {} in queue {}: {}", getPendingDeleteMessageIds(queueUrl), queueUrl, e.getMessage(), e); Thread.currentThread().interrupt(); break; } catch (ExecutionException e) { String messageIds = getPendingDeleteMessageIds(queueUrl); LOG.error(Errors.SQS_08.getMessage(), messageIds, queueUrl, e.getMessage(), e); throw new StageException(Errors.SQS_08, messageIds, queueUrl, e.getMessage(), e); } } } commitQueueUrlsToMessages.clear(); } batchRecordCount = 0; if (startNew) { batchContext = context.startBatch(); lastBatchStartTimestamp = Clock.systemUTC().millis(); } }
From source file:org.apache.nifi.processors.aws.sqs.DeleteSQS.java
License:Apache License
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { List<FlowFile> flowFiles = session.get(1); if (flowFiles.isEmpty()) { return;/*from w w w.ja v a 2 s . com*/ } final FlowFile firstFlowFile = flowFiles.get(0); final String queueUrl = context.getProperty(QUEUE_URL).evaluateAttributeExpressions(firstFlowFile) .getValue(); final AmazonSQSClient client = getClient(); final DeleteMessageBatchRequest request = new DeleteMessageBatchRequest(); request.setQueueUrl(queueUrl); final List<DeleteMessageBatchRequestEntry> entries = new ArrayList<>(flowFiles.size()); for (final FlowFile flowFile : flowFiles) { final DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry(); entry.setReceiptHandle( context.getProperty(RECEIPT_HANDLE).evaluateAttributeExpressions(flowFile).getValue()); entries.add(entry); } request.setEntries(entries); try { client.deleteMessageBatch(request); getLogger().info("Successfully deleted {} objects from SQS", new Object[] { flowFiles.size() }); session.transfer(flowFiles, REL_SUCCESS); } catch (final Exception e) { getLogger().error("Failed to delete {} objects from SQS due to {}", new Object[] { flowFiles.size(), e }); final List<FlowFile> penalizedFlowFiles = new ArrayList<>(); for (final FlowFile flowFile : flowFiles) { penalizedFlowFiles.add(session.penalize(flowFile)); } session.transfer(penalizedFlowFiles, REL_FAILURE); } }
From source file:org.apache.nifi.processors.aws.sqs.GetSQS.java
License:Apache License
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { final String queueUrl = context.getProperty(DYNAMIC_QUEUE_URL).evaluateAttributeExpressions().getValue(); final AmazonSQSClient client = getClient(); final ReceiveMessageRequest request = new ReceiveMessageRequest(); request.setAttributeNames(Collections.singleton("All")); request.setMessageAttributeNames(Collections.singleton("All")); request.setMaxNumberOfMessages(context.getProperty(BATCH_SIZE).asInteger()); request.setVisibilityTimeout(/* w w w . j a va 2 s. co m*/ context.getProperty(VISIBILITY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue()); request.setQueueUrl(queueUrl); request.setWaitTimeSeconds( context.getProperty(RECEIVE_MSG_WAIT_TIME).asTimePeriod(TimeUnit.SECONDS).intValue()); final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue()); final ReceiveMessageResult result; try { result = client.receiveMessage(request); } catch (final Exception e) { getLogger().error("Failed to receive messages from Amazon SQS due to {}", new Object[] { e }); context.yield(); return; } final List<Message> messages = result.getMessages(); if (messages.isEmpty()) { context.yield(); return; } final boolean autoDelete = context.getProperty(AUTO_DELETE).asBoolean(); for (final Message message : messages) { FlowFile flowFile = session.create(); final Map<String, String> attributes = new HashMap<>(); for (final Map.Entry<String, String> entry : message.getAttributes().entrySet()) { attributes.put("sqs." + entry.getKey(), entry.getValue()); } for (final Map.Entry<String, MessageAttributeValue> entry : message.getMessageAttributes().entrySet()) { attributes.put("sqs." + entry.getKey(), entry.getValue().getStringValue()); } attributes.put("hash.value", message.getMD5OfBody()); attributes.put("hash.algorithm", "md5"); attributes.put("sqs.message.id", message.getMessageId()); attributes.put("sqs.receipt.handle", message.getReceiptHandle()); flowFile = session.putAllAttributes(flowFile, attributes); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { out.write(message.getBody().getBytes(charset)); } }); session.transfer(flowFile, REL_SUCCESS); session.getProvenanceReporter().receive(flowFile, queueUrl); getLogger().info("Successfully received {} from Amazon SQS", new Object[] { flowFile }); } if (autoDelete) { // If we want to auto-delete messages, we must fist commit the session to ensure that the data // is persisted in NiFi's repositories. session.commit(); final DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest(); deleteRequest.setQueueUrl(queueUrl); final List<DeleteMessageBatchRequestEntry> deleteRequestEntries = new ArrayList<>(); for (final Message message : messages) { final DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry(); entry.setId(message.getMessageId()); entry.setReceiptHandle(message.getReceiptHandle()); deleteRequestEntries.add(entry); } deleteRequest.setEntries(deleteRequestEntries); try { client.deleteMessageBatch(deleteRequest); } catch (final Exception e) { getLogger().error( "Received {} messages from Amazon SQS but failed to delete the messages; these messages" + " may be duplicated. Reason for deletion failure: {}", new Object[] { messages.size(), e }); } } }
From source file:org.duracloud.common.queue.aws.SQSTaskQueue.java
License:Apache License
@Override public void deleteTasks(Set<Task> tasks) throws TaskException { if (tasks.size() > 10) { throw new IllegalArgumentException("task set must contain 10 or fewer tasks"); }/*from w ww .j a v a2 s . c o m*/ try { List<DeleteMessageBatchRequestEntry> entries = new ArrayList<>(tasks.size()); for (Task task : tasks) { DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry() .withId(task.getProperty(MsgProp.MSG_ID.name())) .withReceiptHandle(task.getProperty(MsgProp.RECEIPT_HANDLE.name())); entries.add(entry); } DeleteMessageBatchRequest request = new DeleteMessageBatchRequest().withQueueUrl(queueUrl) .withEntries(entries); DeleteMessageBatchResult result = sqsClient.deleteMessageBatch(request); List<BatchResultErrorEntry> failed = result.getFailed(); if (failed != null && failed.size() > 0) { for (BatchResultErrorEntry error : failed) { log.info("failed to delete message: " + error); } } for (DeleteMessageBatchResultEntry entry : result.getSuccessful()) { log.info("successfully deleted {}", entry); } } catch (AmazonServiceException se) { log.error("failed to batch delete tasks " + tasks + ": " + se.getMessage(), se); throw new TaskException(se); } }