List of usage examples for com.amazonaws.services.sqs.model DeleteMessageBatchRequestEntry DeleteMessageBatchRequestEntry
public DeleteMessageBatchRequestEntry()
From source file:com.allogy.amazonaws.elasticbeanstalk.worker.simulator.application.QueueManager.java
License:Apache License
public void deleteMessages(Stream<MessageWrapper> messages) { List<DeleteMessageBatchRequestEntry> deleteEntries = messages .map(MessageWrapper::getMessage).map(m -> new DeleteMessageBatchRequestEntry() .withId(m.getMessageId()).withReceiptHandle(m.getReceiptHandle())) .collect(Collectors.toList()); if (deleteEntries.isEmpty()) return;//www.j av a2s .c o m DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest().withQueueUrl(queueUrl) .withEntries(deleteEntries); logger.debug("About to delete {} messages from queue. queueUrl={}", deleteEntries.size(), queueUrl); amazonSQS.deleteMessageBatch(deleteRequest); }
From source file:com.eucalyptus.portal.SimpleQueueClientManager.java
License:Open Source License
public List<Message> receiveAllMessages(final String queueName, final boolean shouldDelete) throws Exception { try {/*from w ww. ja va 2 s .c o m*/ final int visibilityTimeout = 600; final int visibilityBuffer = 300; final long startTime = System.currentTimeMillis(); final List<Message> messages = Lists.newArrayList(); while ((System.currentTimeMillis() - startTime) < ((visibilityTimeout - visibilityBuffer) * 1000L)) { final ReceiveMessageRequest req = new ReceiveMessageRequest(); req.setQueueUrl(getQueueUrl(queueName)); req.setMaxNumberOfMessages(10); req.setWaitTimeSeconds(0); req.setVisibilityTimeout(visibilityTimeout); final ReceiveMessageResult result = getSimpleQueueClient().receiveMessage(req); final List<Message> received = result.getMessages(); if (received == null || received.size() <= 0) break; messages.addAll(received); } // TODO: Use PurgeQueue if (shouldDelete) { for (final List<Message> partition : Iterables.partition(messages, 10)) { final DeleteMessageBatchRequest delReq = new DeleteMessageBatchRequest(); delReq.setQueueUrl(getQueueUrl(queueName)); delReq.setEntries(partition.stream().map(m -> new DeleteMessageBatchRequestEntry() .withId(m.getMessageId()).withReceiptHandle(m.getReceiptHandle())) .collect(Collectors.toList())); getSimpleQueueClient().deleteMessageBatch(delReq); } } return messages; } catch (final AmazonServiceException ex) { throw new Exception("Failed to receive messages due to service error", ex); } catch (final AmazonClientException ex) { throw new Exception("Failed to receive messages due to client error", ex); } }
From source file:com.netflix.bdp.s3mper.alert.impl.AlertJanitor.java
License:Apache License
private void delete(String queue, List<Message> messages) { List<DeleteMessageBatchRequestEntry> deleteRequests = new ArrayList<DeleteMessageBatchRequestEntry>(); for (Message m : messages) { deleteRequests.add(new DeleteMessageBatchRequestEntry().withId(m.getMessageId()) .withReceiptHandle(m.getReceiptHandle())); }/*from w w w . ja v a2 s . c om*/ log.info(format("Deleting %s messages", deleteRequests.size())); DeleteMessageBatchRequest batchDelete = new DeleteMessageBatchRequest(); batchDelete.setQueueUrl(queue); batchDelete.setEntries(deleteRequests); sqs.deleteMessageBatch(batchDelete); }
From source file:com.netflix.conductor.contribs.queue.sqs.SQSObservableQueue.java
License:Apache License
private List<String> delete(List<Message> messages) { if (messages == null || messages.isEmpty()) { return null; }//from w ww .j a v a 2 s .co m DeleteMessageBatchRequest batch = new DeleteMessageBatchRequest().withQueueUrl(queueURL); List<DeleteMessageBatchRequestEntry> entries = batch.getEntries(); messages.stream().forEach(m -> entries .add(new DeleteMessageBatchRequestEntry().withId(m.getId()).withReceiptHandle(m.getReceipt()))); DeleteMessageBatchResult result = client.deleteMessageBatch(batch); List<String> failures = result.getFailed().stream().map(fm -> fm.getId()).collect(Collectors.toList()); logger.debug("failed to delete: {}", failures); return failures; }
From source file:com.pinterest.teletraan.worker.LaunchEventCollector.java
License:Apache License
public void collectEvents() throws Exception { while (true) { ReceiveMessageRequest request = new ReceiveMessageRequest(); request.setMaxNumberOfMessages(10); ReceiveMessageResult result = sqsClient.receiveMessage(request); List<Message> messageList = result.getMessages(); if (messageList.isEmpty()) { LOG.info("No more Launch activity available at the moment."); return; }/*from w ww. java 2 s .c om*/ LOG.info(String.format("Collect %d events from AWS SQS.", messageList.size())); ArrayList<DeleteMessageBatchRequestEntry> entries = new ArrayList<>(); for (Message message : messageList) { try { boolean hasProcessed = processMessage(message); if (hasProcessed) { DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry(); entry.setId(message.getMessageId()); entry.setReceiptHandle(message.getReceiptHandle()); entries.add(entry); } } catch (Exception ex) { LOG.error("Failed to process SQS message:", message, ex); } } if (!entries.isEmpty()) { DeleteMessageBatchRequest deleteMessageBatchRequest = new DeleteMessageBatchRequest(); deleteMessageBatchRequest.setEntries(entries); LOG.debug(String.format("Successful process %d messages, deleting them from SQS.", entries.size())); sqsClient.deleteMessageBatch(deleteMessageBatchRequest); } } }
From source file:com.plumbee.flume.source.sqs.BatchConsumer.java
License:Apache License
public Status process() throws EventDeliveryException { // Check if we've met the criteria to flush events. if (batchDeleteRequestEntries.size() >= batchSize) { flush();/*from w w w . j a va2s . c o m*/ } else if ((flushInterval > 0) && ((System.currentTimeMillis() - lastFlush) > flushInterval)) { flush(); } // The number of messages pending insertion to the channels should // always by the same as the number of messages to delete from SQS! assert (batchEventList.size() == batchDeleteRequestEntries.size()); // Determine the maximum number of messages to request from SQS. We // never exceed the capacity of the internal buffers. if ((batchDeleteRequestEntries.size() + queueRecvBatchSize) > batchSize) { receiveMessageRequest.setMaxNumberOfMessages(batchSize - batchDeleteRequestEntries.size()); } else { receiveMessageRequest.setMaxNumberOfMessages(queueRecvBatchSize); } // Retrieve messages. List<Message> messages = client.receiveMessage(receiveMessageRequest).getMessages(); sourceCounter.incrementBatchReceiveRequestAttemptCount(); for (Message message : messages) { // Extract SQS message attributes. long sentTimestamp = Long.parseLong(message.getAttributes().get(SQS_ATTR_SENTTIMESTAMP)); long approximateReceiveCount = Long.parseLong(message.getAttributes().get(SQS_ATTR_APPROXRECEIVECOUNT)); // Update statistics. if (approximateReceiveCount > 1) { sourceCounter.incrementEventReprocessedCount(); } // By default the timestamp of the message is set to the // timestamp in UTC that the message was added to the SQS queue as // opposed to the time it was extracted. Map<String, String> headers = new HashMap<String, String>(); headers.put("timestamp", String.valueOf(sentTimestamp)); batchEventList.add(EventBuilder.withBody(message.getBody(), Charsets.UTF_8, headers)); batchDeleteRequestEntries.add(new DeleteMessageBatchRequestEntry() .withId(Long.toString(batchEventList.size())).withReceiptHandle(message.getReceiptHandle())); } sourceCounter.incrementBatchReceiveRequestSuccessCount(); sourceCounter.addToEventReceivedCount((long) messages.size()); // If the payload was less than 90% of the maximum batch size, // instruct the runner to throttle polling. if (messages.size() < (queueRecvBatchSize * 0.9)) { return Status.BACKOFF; } return Status.READY; }
From source file:com.streamsets.pipeline.stage.origin.sqs.SqsConsumerWorkerCallable.java
License:Apache License
private void batchFlushHelper(boolean startNew) throws StageException { if (batchContext != null) { context.processBatch(batchContext); if (!context.isPreview() && commitQueueUrlsToMessages.size() > 0) { for (String queueUrl : commitQueueUrlsToMessages.keySet()) { DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest() .withQueueUrl(queueUrl); List<DeleteMessageBatchRequestEntry> deleteRequestEntries = new LinkedList<>(); commitQueueUrlsToMessages.get(queueUrl).forEach(message -> { deleteRequestEntries.add(new DeleteMessageBatchRequestEntry() .withReceiptHandle(message.getReceiptHandle()).withId(message.getMessageId())); // TODO: need to add receiptHandle, and figure out what that is });/*from w w w.j a v a 2 s .co m*/ deleteRequest.setEntries(deleteRequestEntries); Future<DeleteMessageBatchResult> deleteResultFuture = sqsAsync .deleteMessageBatchAsync(deleteRequest); try { DeleteMessageBatchResult deleteResult = deleteResultFuture.get(); if (deleteResult.getFailed() != null) { deleteResult.getFailed().forEach(failed -> LOG.error( "Failed to delete message ID {} from queue %s with code {}, sender fault {}", failed.getId(), queueUrl, failed.getCode(), failed.getSenderFault())); } if (LOG.isDebugEnabled()) { if (deleteResult.getSuccessful() != null) { deleteResult.getSuccessful() .forEach(success -> LOG.debug( "Successfully deleted message ID {} from queue {}", success.getId(), queueUrl)); } } } catch (InterruptedException e) { LOG.error("InterruptedException trying to delete SQS messages with IDs {} in queue {}: {}", getPendingDeleteMessageIds(queueUrl), queueUrl, e.getMessage(), e); Thread.currentThread().interrupt(); break; } catch (ExecutionException e) { String messageIds = getPendingDeleteMessageIds(queueUrl); LOG.error(Errors.SQS_08.getMessage(), messageIds, queueUrl, e.getMessage(), e); throw new StageException(Errors.SQS_08, messageIds, queueUrl, e.getMessage(), e); } } } commitQueueUrlsToMessages.clear(); } batchRecordCount = 0; if (startNew) { batchContext = context.startBatch(); lastBatchStartTimestamp = Clock.systemUTC().millis(); } }
From source file:org.apache.nifi.processors.aws.sqs.DeleteSQS.java
License:Apache License
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { List<FlowFile> flowFiles = session.get(1); if (flowFiles.isEmpty()) { return;//from ww w. j av a 2s . c o m } final FlowFile firstFlowFile = flowFiles.get(0); final String queueUrl = context.getProperty(QUEUE_URL).evaluateAttributeExpressions(firstFlowFile) .getValue(); final AmazonSQSClient client = getClient(); final DeleteMessageBatchRequest request = new DeleteMessageBatchRequest(); request.setQueueUrl(queueUrl); final List<DeleteMessageBatchRequestEntry> entries = new ArrayList<>(flowFiles.size()); for (final FlowFile flowFile : flowFiles) { final DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry(); entry.setReceiptHandle( context.getProperty(RECEIPT_HANDLE).evaluateAttributeExpressions(flowFile).getValue()); entries.add(entry); } request.setEntries(entries); try { client.deleteMessageBatch(request); getLogger().info("Successfully deleted {} objects from SQS", new Object[] { flowFiles.size() }); session.transfer(flowFiles, REL_SUCCESS); } catch (final Exception e) { getLogger().error("Failed to delete {} objects from SQS due to {}", new Object[] { flowFiles.size(), e }); final List<FlowFile> penalizedFlowFiles = new ArrayList<>(); for (final FlowFile flowFile : flowFiles) { penalizedFlowFiles.add(session.penalize(flowFile)); } session.transfer(penalizedFlowFiles, REL_FAILURE); } }
From source file:org.apache.nifi.processors.aws.sqs.GetSQS.java
License:Apache License
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { final String queueUrl = context.getProperty(DYNAMIC_QUEUE_URL).evaluateAttributeExpressions().getValue(); final AmazonSQSClient client = getClient(); final ReceiveMessageRequest request = new ReceiveMessageRequest(); request.setAttributeNames(Collections.singleton("All")); request.setMessageAttributeNames(Collections.singleton("All")); request.setMaxNumberOfMessages(context.getProperty(BATCH_SIZE).asInteger()); request.setVisibilityTimeout(/*from ww w . j av a 2s .c om*/ context.getProperty(VISIBILITY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue()); request.setQueueUrl(queueUrl); request.setWaitTimeSeconds( context.getProperty(RECEIVE_MSG_WAIT_TIME).asTimePeriod(TimeUnit.SECONDS).intValue()); final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue()); final ReceiveMessageResult result; try { result = client.receiveMessage(request); } catch (final Exception e) { getLogger().error("Failed to receive messages from Amazon SQS due to {}", new Object[] { e }); context.yield(); return; } final List<Message> messages = result.getMessages(); if (messages.isEmpty()) { context.yield(); return; } final boolean autoDelete = context.getProperty(AUTO_DELETE).asBoolean(); for (final Message message : messages) { FlowFile flowFile = session.create(); final Map<String, String> attributes = new HashMap<>(); for (final Map.Entry<String, String> entry : message.getAttributes().entrySet()) { attributes.put("sqs." + entry.getKey(), entry.getValue()); } for (final Map.Entry<String, MessageAttributeValue> entry : message.getMessageAttributes().entrySet()) { attributes.put("sqs." + entry.getKey(), entry.getValue().getStringValue()); } attributes.put("hash.value", message.getMD5OfBody()); attributes.put("hash.algorithm", "md5"); attributes.put("sqs.message.id", message.getMessageId()); attributes.put("sqs.receipt.handle", message.getReceiptHandle()); flowFile = session.putAllAttributes(flowFile, attributes); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { out.write(message.getBody().getBytes(charset)); } }); session.transfer(flowFile, REL_SUCCESS); session.getProvenanceReporter().receive(flowFile, queueUrl); getLogger().info("Successfully received {} from Amazon SQS", new Object[] { flowFile }); } if (autoDelete) { // If we want to auto-delete messages, we must fist commit the session to ensure that the data // is persisted in NiFi's repositories. session.commit(); final DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest(); deleteRequest.setQueueUrl(queueUrl); final List<DeleteMessageBatchRequestEntry> deleteRequestEntries = new ArrayList<>(); for (final Message message : messages) { final DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry(); entry.setId(message.getMessageId()); entry.setReceiptHandle(message.getReceiptHandle()); deleteRequestEntries.add(entry); } deleteRequest.setEntries(deleteRequestEntries); try { client.deleteMessageBatch(deleteRequest); } catch (final Exception e) { getLogger().error( "Received {} messages from Amazon SQS but failed to delete the messages; these messages" + " may be duplicated. Reason for deletion failure: {}", new Object[] { messages.size(), e }); } } }
From source file:org.duracloud.common.queue.aws.SQSTaskQueue.java
License:Apache License
@Override public void deleteTasks(Set<Task> tasks) throws TaskException { if (tasks.size() > 10) { throw new IllegalArgumentException("task set must contain 10 or fewer tasks"); }/* w ww . ja v a2 s . com*/ try { List<DeleteMessageBatchRequestEntry> entries = new ArrayList<>(tasks.size()); for (Task task : tasks) { DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry() .withId(task.getProperty(MsgProp.MSG_ID.name())) .withReceiptHandle(task.getProperty(MsgProp.RECEIPT_HANDLE.name())); entries.add(entry); } DeleteMessageBatchRequest request = new DeleteMessageBatchRequest().withQueueUrl(queueUrl) .withEntries(entries); DeleteMessageBatchResult result = sqsClient.deleteMessageBatch(request); List<BatchResultErrorEntry> failed = result.getFailed(); if (failed != null && failed.size() > 0) { for (BatchResultErrorEntry error : failed) { log.info("failed to delete message: " + error); } } for (DeleteMessageBatchResultEntry entry : result.getSuccessful()) { log.info("successfully deleted {}", entry); } } catch (AmazonServiceException se) { log.error("failed to batch delete tasks " + tasks + ": " + se.getMessage(), se); throw new TaskException(se); } }