List of usage examples for java.util.concurrent.atomic AtomicBoolean get
public final boolean get()
From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java
@Test public void testFlushBeforeCompletingScanWithFilterHint() throws IOException, InterruptedException { final AtomicBoolean timeToGetHint = new AtomicBoolean(false); final int expectedSize = 2; testFlushBeforeCompletingScan(new MyListHook() { @Override/*from ww w .j a va 2 s . co m*/ public void hook(int currentSize) { if (currentSize == expectedSize - 1) { try { flushStore(store, id++); timeToGetHint.set(true); } catch (IOException e) { throw new RuntimeException(e); } } } }, new FilterBase() { @Override public Filter.ReturnCode filterKeyValue(Cell v) throws IOException { if (timeToGetHint.get()) { timeToGetHint.set(false); return Filter.ReturnCode.SEEK_NEXT_USING_HINT; } else { return Filter.ReturnCode.INCLUDE; } } @Override public Cell getNextCellHint(Cell currentCell) throws IOException { return currentCell; } }, expectedSize); }
From source file:org.apache.camel.processor.MulticastProcessor.java
protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs, final boolean streaming, final AsyncCallback callback) throws Exception { ObjectHelper.notNull(executorService, "ExecutorService", this); ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this); final CompletionService<Exchange> completion; if (streaming) { // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence) completion = new ExecutorCompletionService<Exchange>(executorService); } else {//from www. ja v a2 s.c om // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence) completion = new SubmitOrderedCompletionService<Exchange>(executorService); } // when parallel then aggregate on the fly final AtomicBoolean running = new AtomicBoolean(true); final AtomicInteger total = new AtomicInteger(0); final AtomicBoolean allTasksSubmitted = new AtomicBoolean(); final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1); final AtomicException executionException = new AtomicException(); final Iterator<ProcessorExchangePair> it = pairs.iterator(); if (it.hasNext()) { // issue task to execute in separate thread so it can aggregate on-the-fly // while we submit new tasks, and those tasks complete concurrently // this allows us to optimize work and reduce memory consumption AggregateOnTheFlyTask task = new AggregateOnTheFlyTask(result, original, total, completion, running, aggregationOnTheFlyDone, allTasksSubmitted, executionException); // and start the aggregation task so we can aggregate on-the-fly aggregateExecutorService.submit(task); } LOG.trace("Starting to submit parallel tasks"); while (it.hasNext()) { final ProcessorExchangePair pair = it.next(); final Exchange subExchange = pair.getExchange(); updateNewExchange(subExchange, total.intValue(), pairs, it); completion.submit(new Callable<Exchange>() { public Exchange call() throws Exception { if (!running.get()) { // do not start processing the task if we are not running return subExchange; } try { doProcessParallel(pair); } catch (Throwable e) { subExchange.setException(e); } // Decide whether to continue with the multicast or not; similar logic to the Pipeline Integer number = getExchangeIndex(subExchange); boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG); if (stopOnException && !continueProcessing) { // signal to stop running running.set(false); // throw caused exception if (subExchange.getException() != null) { // wrap in exception to explain where it failed throw new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException()); } } if (LOG.isTraceEnabled()) { LOG.trace("Parallel processing complete for exchange: " + subExchange); } return subExchange; } }); total.incrementAndGet(); } // signal all tasks has been submitted if (LOG.isTraceEnabled()) { LOG.trace("Signaling that all " + total.get() + " tasks has been submitted."); } allTasksSubmitted.set(true); // its to hard to do parallel async routing so we let the caller thread be synchronously // and have it pickup the replies and do the aggregation (eg we use a latch to wait) // wait for aggregation to be done if (LOG.isDebugEnabled()) { LOG.debug("Waiting for on-the-fly aggregation to complete aggregating " + total.get() + " responses."); } aggregationOnTheFlyDone.await(); // did we fail for whatever reason, if so throw that caused exception if (executionException.get() != null) { if (LOG.isDebugEnabled()) { LOG.debug("Parallel processing failed due " + executionException.get().getMessage()); } throw executionException.get(); } // no everything is okay so we are done if (LOG.isDebugEnabled()) { LOG.debug("Done parallel processing " + total + " exchanges"); } }
From source file:org.apache.nifi.processors.standard.SplitText.java
/** * Will split the incoming stream releasing all splits as FlowFile at once. *//*from w ww . ja v a 2 s. co m*/ @Override public void onTrigger(ProcessContext context, ProcessSession processSession) throws ProcessException { FlowFile sourceFlowFile = processSession.get(); if (sourceFlowFile == null) { return; } AtomicBoolean error = new AtomicBoolean(); List<SplitInfo> computedSplitsInfo = new ArrayList<>(); AtomicReference<SplitInfo> headerSplitInfoRef = new AtomicReference<>(); processSession.read(sourceFlowFile, new InputStreamCallback() { @Override public void process(InputStream in) throws IOException { TextLineDemarcator demarcator = new TextLineDemarcator(in); SplitInfo splitInfo = null; long startOffset = 0; // Compute fragment representing the header (if available) long start = System.nanoTime(); try { if (SplitText.this.headerLineCount > 0) { splitInfo = SplitText.this.computeHeader(demarcator, startOffset, SplitText.this.headerLineCount, null, null); if ((splitInfo != null) && (splitInfo.lineCount < SplitText.this.headerLineCount)) { error.set(true); getLogger().error("Unable to split " + sourceFlowFile + " due to insufficient amount of header lines. Required " + SplitText.this.headerLineCount + " but was " + splitInfo.lineCount + ". Routing to failure."); } } else if (SplitText.this.headerMarker != null) { splitInfo = SplitText.this.computeHeader(demarcator, startOffset, Long.MAX_VALUE, SplitText.this.headerMarker.getBytes(StandardCharsets.UTF_8), null); } headerSplitInfoRef.set(splitInfo); } catch (IllegalStateException e) { error.set(true); getLogger().error(e.getMessage() + " Routing to failure.", e); } // Compute and collect fragments representing the individual splits if (!error.get()) { if (headerSplitInfoRef.get() != null) { startOffset = headerSplitInfoRef.get().length; } long preAccumulatedLength = startOffset; while ((splitInfo = SplitText.this.nextSplit(demarcator, startOffset, SplitText.this.lineCount, splitInfo, preAccumulatedLength)) != null) { computedSplitsInfo.add(splitInfo); startOffset += splitInfo.length; } long stop = System.nanoTime(); if (getLogger().isDebugEnabled()) { getLogger().debug("Computed splits in " + (stop - start) + " milliseconds."); } } } }); if (error.get()) { processSession.transfer(sourceFlowFile, REL_FAILURE); } else { final String fragmentId = UUID.randomUUID().toString(); List<FlowFile> splitFlowFiles = this.generateSplitFlowFiles(fragmentId, sourceFlowFile, headerSplitInfoRef.get(), computedSplitsInfo, processSession); final FlowFile originalFlowFile = FragmentAttributes.copyAttributesToOriginal(processSession, sourceFlowFile, fragmentId, splitFlowFiles.size()); processSession.transfer(originalFlowFile, REL_ORIGINAL); if (!splitFlowFiles.isEmpty()) { processSession.transfer(splitFlowFiles, REL_SPLITS); } } }
From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java
@Test public void testPostSuccess() throws InterruptedException { final String start = Instant.now().minusMillis(812).atZone(ZoneId.of("UTC")) .format(DateTimeFormatter.ISO_INSTANT); final String end = Instant.now().atZone(ZoneId.of("UTC")).format(DateTimeFormatter.ISO_INSTANT); final String id = UUID.randomUUID().toString(); _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> { // Annotations Assert.assertEquals(7, r.getAnnotationsCount()); assertAnnotation(r.getAnnotationsList(), "foo", "bar"); assertAnnotation(r.getAnnotationsList(), "_service", "myservice"); assertAnnotation(r.getAnnotationsList(), "_cluster", "mycluster"); assertAnnotation(r.getAnnotationsList(), "_start", start); assertAnnotation(r.getAnnotationsList(), "_end", end); assertAnnotation(r.getAnnotationsList(), "_id", id); // Dimensions Assert.assertEquals(3, r.getDimensionsCount()); assertDimension(r.getDimensionsList(), "host", "some.host.com"); assertDimension(r.getDimensionsList(), "service", "myservice"); assertDimension(r.getDimensionsList(), "cluster", "mycluster"); // Samples assertSample(r.getTimersList(), "timerLong", 123L, ClientV2.Unit.Type.Value.SECOND, ClientV2.Unit.Scale.Value.NANO); assertSample(r.getTimersList(), "timerInt", 123, ClientV2.Unit.Type.Value.SECOND, ClientV2.Unit.Scale.Value.NANO); assertSample(r.getTimersList(), "timerShort", (short) 123, ClientV2.Unit.Type.Value.SECOND, ClientV2.Unit.Scale.Value.NANO); assertSample(r.getTimersList(), "timerByte", (byte) 123, ClientV2.Unit.Type.Value.SECOND, ClientV2.Unit.Scale.Value.NANO); assertSample(r.getCountersList(), "counter", 8d); assertSample(r.getGaugesList(), "gauge", 10d, ClientV2.Unit.Type.Value.BYTE, ClientV2.Unit.Scale.Value.UNIT); })).willReturn(WireMock.aResponse().withStatus(200))); final AtomicBoolean assertionResult = new AtomicBoolean(false); final Semaphore semaphore = new Semaphore(0); final Sink sink = new ApacheHttpSink.Builder() .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)) .setEventHandler(new AttemptCompletedAssertionHandler(assertionResult, 1, 451, true, new CompletionHandler(semaphore))) .build();/*from w w w . j a va2s . c o m*/ final Map<String, String> annotations = new LinkedHashMap<>(); annotations.put("foo", "bar"); annotations.put("_start", start); annotations.put("_end", end); annotations.put("_host", "some.host.com"); annotations.put("_service", "myservice"); annotations.put("_cluster", "mycluster"); annotations.put("_id", id); final TsdEvent event = new TsdEvent(annotations, createQuantityMap("timerLong", TsdQuantity.newInstance(123L, Units.NANOSECOND), "timerInt", TsdQuantity.newInstance(123, Units.NANOSECOND), "timerShort", TsdQuantity.newInstance((short) 123, Units.NANOSECOND), "timerByte", TsdQuantity.newInstance((byte) 123, Units.NANOSECOND)), createQuantityMap("counter", TsdQuantity.newInstance(8d, null)), createQuantityMap("gauge", TsdQuantity.newInstance(10d, Units.BYTE))); sink.record(event); semaphore.acquire(); // Ensure expected handler was invoked Assert.assertTrue(assertionResult.get()); // Request matcher final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH)) .withHeader("Content-Type", WireMock.equalTo("application/octet-stream")); // Assert that data was sent _wireMockRule.verify(1, requestPattern); Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty()); }
From source file:org.apache.tez.dag.app.rm.TestTaskScheduler.java
@SuppressWarnings({ "unchecked" }) @Test(timeout = 10000)/*from www . j av a 2s . c om*/ public void testTaskSchedulerWithReuse() throws Exception { RackResolver.init(new YarnConfiguration()); TezAMRMClientAsync<CookieContainerRequest> mockRMClient = mock(TezAMRMClientAsync.class); String appHost = "host"; int appPort = 0; String appUrl = "url"; Configuration conf = new Configuration(); // to match all in the same pass conf.setLong(TezConfiguration.TEZ_AM_CONTAINER_REUSE_LOCALITY_DELAY_ALLOCATION_MILLIS, 0); // to release immediately after deallocate conf.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MIN_MILLIS, 0); conf.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MAX_MILLIS, 0); TaskSchedulerContext mockApp = setupMockTaskSchedulerContext(appHost, appPort, appUrl, conf); final TaskSchedulerContextDrainable drainableAppCallback = createDrainableContext(mockApp); TaskSchedulerWithDrainableContext scheduler = new TaskSchedulerWithDrainableContext(drainableAppCallback, mockRMClient); scheduler.initialize(); drainableAppCallback.drain(); RegisterApplicationMasterResponse mockRegResponse = mock(RegisterApplicationMasterResponse.class); Resource mockMaxResource = mock(Resource.class); Map<ApplicationAccessType, String> mockAcls = mock(Map.class); when(mockRegResponse.getMaximumResourceCapability()).thenReturn(mockMaxResource); when(mockRegResponse.getApplicationACLs()).thenReturn(mockAcls); when(mockRMClient.registerApplicationMaster(anyString(), anyInt(), anyString())) .thenReturn(mockRegResponse); Resource mockClusterResource = mock(Resource.class); when(mockRMClient.getAvailableResources()).thenReturn(mockClusterResource); scheduler.start(); drainableAppCallback.drain(); Object mockTask1 = mock(Object.class); when(mockTask1.toString()).thenReturn("task1"); Object mockCookie1 = mock(Object.class); Resource mockCapability = mock(Resource.class); String[] hosts = { "host1", "host5" }; String[] racks = { "/default-rack", "/default-rack" }; final Priority mockPriority1 = Priority.newInstance(1); final Priority mockPriority2 = Priority.newInstance(2); final Priority mockPriority3 = Priority.newInstance(3); final Priority mockPriority4 = Priority.newInstance(4); final Priority mockPriority5 = Priority.newInstance(5); Object mockTask2 = mock(Object.class); when(mockTask2.toString()).thenReturn("task2"); Object mockCookie2 = mock(Object.class); Object mockTask3 = mock(Object.class); when(mockTask3.toString()).thenReturn("task3"); Object mockCookie3 = mock(Object.class); ArgumentCaptor<CookieContainerRequest> requestCaptor = ArgumentCaptor .forClass(CookieContainerRequest.class); scheduler.allocateTask(mockTask1, mockCapability, hosts, racks, mockPriority1, null, mockCookie1); drainableAppCallback.drain(); verify(mockRMClient, times(1)).addContainerRequest(requestCaptor.capture()); CookieContainerRequest request1 = requestCaptor.getValue(); scheduler.allocateTask(mockTask2, mockCapability, hosts, racks, mockPriority2, null, mockCookie2); drainableAppCallback.drain(); verify(mockRMClient, times(2)).addContainerRequest(requestCaptor.capture()); CookieContainerRequest request2 = requestCaptor.getValue(); scheduler.allocateTask(mockTask3, mockCapability, hosts, racks, mockPriority3, null, mockCookie3); drainableAppCallback.drain(); verify(mockRMClient, times(3)).addContainerRequest(requestCaptor.capture()); CookieContainerRequest request3 = requestCaptor.getValue(); List<Container> containers = new ArrayList<Container>(); // sending lower priority container first to make sure its not matched Container mockContainer4 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer4.getNodeId().getHost()).thenReturn("host4"); when(mockContainer4.toString()).thenReturn("container4"); when(mockContainer4.getPriority()).thenReturn(mockPriority4); ContainerId mockCId4 = mock(ContainerId.class); when(mockContainer4.getId()).thenReturn(mockCId4); when(mockCId4.toString()).thenReturn("container4"); containers.add(mockContainer4); Container mockContainer1 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer1.getNodeId().getHost()).thenReturn("host1"); when(mockContainer1.getPriority()).thenReturn(mockPriority1); when(mockContainer1.toString()).thenReturn("container1"); ContainerId mockCId1 = mock(ContainerId.class); when(mockContainer1.getId()).thenReturn(mockCId1); when(mockCId1.toString()).thenReturn("container1"); containers.add(mockContainer1); Container mockContainer2 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer2.getNodeId().getHost()).thenReturn("host2"); when(mockContainer2.getPriority()).thenReturn(mockPriority2); when(mockContainer2.toString()).thenReturn("container2"); ContainerId mockCId2 = mock(ContainerId.class); when(mockContainer2.getId()).thenReturn(mockCId2); when(mockCId2.toString()).thenReturn("container2"); containers.add(mockContainer2); Container mockContainer3 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer3.getNodeId().getHost()).thenReturn("host3"); when(mockContainer3.getPriority()).thenReturn(mockPriority3); when(mockContainer3.toString()).thenReturn("container3"); ContainerId mockCId3 = mock(ContainerId.class); when(mockContainer3.getId()).thenReturn(mockCId3); when(mockCId3.toString()).thenReturn("container3"); containers.add(mockContainer3); ArrayList<CookieContainerRequest> hostContainers = new ArrayList<CookieContainerRequest>(); hostContainers.add(request1); ArrayList<CookieContainerRequest> rackContainers = new ArrayList<CookieContainerRequest>(); rackContainers.add(request2); ArrayList<CookieContainerRequest> anyContainers = new ArrayList<CookieContainerRequest>(); anyContainers.add(request3); final List<ArrayList<CookieContainerRequest>> hostList = new LinkedList<ArrayList<CookieContainerRequest>>(); hostList.add(hostContainers); final List<ArrayList<CookieContainerRequest>> rackList = new LinkedList<ArrayList<CookieContainerRequest>>(); rackList.add(rackContainers); final List<ArrayList<CookieContainerRequest>> anyList = new LinkedList<ArrayList<CookieContainerRequest>>(); anyList.add(anyContainers); final List<ArrayList<CookieContainerRequest>> emptyList = new LinkedList<ArrayList<CookieContainerRequest>>(); // return pri1 requests for host1 when(mockRMClient.getMatchingRequestsForTopPriority(eq("host1"), (Resource) any())) .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return hostList; } }); // second request matched to rack. RackResolver by default puts hosts in // /default-rack. We need to workaround by returning rack matches only once when(mockRMClient.getMatchingRequestsForTopPriority(eq("/default-rack"), (Resource) any())) .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return rackList; } }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return emptyList; } }); // third request matched to ANY when(mockRMClient.getMatchingRequestsForTopPriority(eq(ResourceRequest.ANY), (Resource) any())) .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return anyList; } }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return emptyList; } }); when(mockRMClient.getTopPriority()).then(new Answer<Priority>() { @Override public Priority answer(InvocationOnMock invocation) throws Throwable { int allocations = drainableAppCallback.count.get(); if (allocations == 0) { return mockPriority1; } if (allocations == 1) { return mockPriority2; } if (allocations == 2) { return mockPriority3; } if (allocations == 3) { return mockPriority4; } return null; } }); AtomicBoolean drainNotifier = new AtomicBoolean(false); scheduler.delayedContainerManager.drainedDelayedContainersForTest = drainNotifier; scheduler.onContainersAllocated(containers); TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier); drainableAppCallback.drain(); // exact number allocations returned verify(mockApp, times(3)).taskAllocated(any(), any(), (Container) any()); // first container allocated verify(mockApp).taskAllocated(mockTask1, mockCookie1, mockContainer1); verify(mockApp).taskAllocated(mockTask2, mockCookie2, mockContainer2); verify(mockApp).taskAllocated(mockTask3, mockCookie3, mockContainer3); verify(mockRMClient).removeContainerRequest(request1); verify(mockRMClient).removeContainerRequest(request2); verify(mockRMClient).removeContainerRequest(request3); // verify unwanted container released verify(mockRMClient).releaseAssignedContainer(mockCId4); // deallocate allocated task assertTrue(scheduler.deallocateTask(mockTask1, true, null, null)); drainableAppCallback.drain(); verify(mockApp).containerBeingReleased(mockCId1); verify(mockRMClient).releaseAssignedContainer(mockCId1); // deallocate allocated container Assert.assertEquals(mockTask2, scheduler.deallocateContainer(mockCId2)); drainableAppCallback.drain(); verify(mockRMClient).releaseAssignedContainer(mockCId2); verify(mockRMClient, times(3)).releaseAssignedContainer((ContainerId) any()); List<ContainerStatus> statuses = new ArrayList<ContainerStatus>(); ContainerStatus mockStatus1 = mock(ContainerStatus.class); when(mockStatus1.getContainerId()).thenReturn(mockCId1); statuses.add(mockStatus1); ContainerStatus mockStatus2 = mock(ContainerStatus.class); when(mockStatus2.getContainerId()).thenReturn(mockCId2); statuses.add(mockStatus2); ContainerStatus mockStatus3 = mock(ContainerStatus.class); when(mockStatus3.getContainerId()).thenReturn(mockCId3); statuses.add(mockStatus3); ContainerStatus mockStatus4 = mock(ContainerStatus.class); when(mockStatus4.getContainerId()).thenReturn(mockCId4); statuses.add(mockStatus4); scheduler.onContainersCompleted(statuses); drainableAppCallback.drain(); // released container status returned verify(mockApp).containerCompleted(mockTask1, mockStatus1); verify(mockApp).containerCompleted(mockTask2, mockStatus2); // currently allocated container status returned and not released verify(mockApp).containerCompleted(mockTask3, mockStatus3); // no other statuses returned verify(mockApp, times(3)).containerCompleted(any(), (ContainerStatus) any()); verify(mockRMClient, times(3)).releaseAssignedContainer((ContainerId) any()); // verify blacklisting verify(mockRMClient, times(0)).addNodeToBlacklist((NodeId) any()); String badHost = "host6"; NodeId badNodeId = mock(NodeId.class); when(badNodeId.getHost()).thenReturn(badHost); scheduler.blacklistNode(badNodeId); verify(mockRMClient, times(1)).addNodeToBlacklist(badNodeId); Object mockTask4 = mock(Object.class); when(mockTask4.toString()).thenReturn("task4"); Object mockCookie4 = mock(Object.class); scheduler.allocateTask(mockTask4, mockCapability, null, null, mockPriority4, null, mockCookie4); drainableAppCallback.drain(); verify(mockRMClient, times(4)).addContainerRequest(requestCaptor.capture()); CookieContainerRequest request4 = requestCaptor.getValue(); anyContainers.clear(); anyContainers.add(request4); Container mockContainer5 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer5.getNodeId().getHost()).thenReturn(badHost); when(mockContainer5.getNodeId()).thenReturn(badNodeId); ContainerId mockCId5 = mock(ContainerId.class); when(mockContainer5.toString()).thenReturn("container5"); when(mockCId5.toString()).thenReturn("container5"); when(mockContainer5.getId()).thenReturn(mockCId5); when(mockContainer5.getPriority()).thenReturn(mockPriority4); containers.clear(); containers.add(mockContainer5); when(mockRMClient.getMatchingRequestsForTopPriority(eq(ResourceRequest.ANY), (Resource) any())) .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return anyList; } }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return emptyList; } }); drainNotifier.set(false); scheduler.onContainersAllocated(containers); TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier); drainableAppCallback.drain(); // no new allocation verify(mockApp, times(3)).taskAllocated(any(), any(), (Container) any()); // verify blacklisted container released verify(mockRMClient).releaseAssignedContainer(mockCId5); verify(mockRMClient, times(4)).releaseAssignedContainer((ContainerId) any()); // verify request added back verify(mockRMClient, times(5)).addContainerRequest(requestCaptor.capture()); CookieContainerRequest request5 = requestCaptor.getValue(); anyContainers.clear(); anyContainers.add(request5); Container mockContainer6 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer6.getNodeId().getHost()).thenReturn("host7"); ContainerId mockCId6 = mock(ContainerId.class); when(mockContainer6.getId()).thenReturn(mockCId6); when(mockContainer6.toString()).thenReturn("container6"); when(mockCId6.toString()).thenReturn("container6"); containers.clear(); containers.add(mockContainer6); when(mockRMClient.getMatchingRequestsForTopPriority(eq(ResourceRequest.ANY), (Resource) any())) .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return anyList; } }).thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return emptyList; } }); drainNotifier.set(false); scheduler.onContainersAllocated(containers); TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier); drainableAppCallback.drain(); // new allocation verify(mockApp, times(4)).taskAllocated(any(), any(), (Container) any()); verify(mockApp).taskAllocated(mockTask4, mockCookie4, mockContainer6); // deallocate allocated task assertTrue(scheduler.deallocateTask(mockTask4, true, null, null)); drainableAppCallback.drain(); verify(mockApp).containerBeingReleased(mockCId6); verify(mockRMClient).releaseAssignedContainer(mockCId6); verify(mockRMClient, times(5)).releaseAssignedContainer((ContainerId) any()); // test unblacklist scheduler.unblacklistNode(badNodeId); verify(mockRMClient, times(1)).removeNodeFromBlacklist(badNodeId); assertEquals(0, scheduler.blacklistedNodes.size()); // verify container level matching // fudge the top level priority to prevent containers from being released // if top level priority is higher than newly allocated containers then // they will not be released final AtomicBoolean fudgePriority = new AtomicBoolean(true); when(mockRMClient.getTopPriority()).then(new Answer<Priority>() { @Override public Priority answer(InvocationOnMock invocation) throws Throwable { if (fudgePriority.get()) { return mockPriority4; } return mockPriority5; } }); // add a dummy task to prevent release of allocated containers Object mockTask5 = mock(Object.class); when(mockTask5.toString()).thenReturn("task5"); Object mockCookie5 = mock(Object.class); scheduler.allocateTask(mockTask5, mockCapability, hosts, racks, mockPriority5, null, mockCookie5); verify(mockRMClient, times(6)).addContainerRequest(requestCaptor.capture()); CookieContainerRequest request6 = requestCaptor.getValue(); drainableAppCallback.drain(); // add containers so that we can reference one of them for container specific // allocation containers.clear(); Container mockContainer7 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer7.getNodeId().getHost()).thenReturn("host5"); ContainerId mockCId7 = mock(ContainerId.class); when(mockContainer7.toString()).thenReturn("container7"); when(mockCId7.toString()).thenReturn("container7"); when(mockContainer7.getId()).thenReturn(mockCId7); when(mockContainer7.getPriority()).thenReturn(mockPriority5); containers.add(mockContainer7); Container mockContainer8 = mock(Container.class, RETURNS_DEEP_STUBS); when(mockContainer8.getNodeId().getHost()).thenReturn("host5"); ContainerId mockCId8 = mock(ContainerId.class); when(mockContainer8.toString()).thenReturn("container8"); when(mockCId8.toString()).thenReturn("container8"); when(mockContainer8.getId()).thenReturn(mockCId8); when(mockContainer8.getPriority()).thenReturn(mockPriority5); containers.add(mockContainer8); drainNotifier.set(false); scheduler.onContainersAllocated(containers); drainableAppCallback.drain(); verify(mockRMClient, times(5)).releaseAssignedContainer((ContainerId) any()); Object mockTask6 = mock(Object.class); when(mockTask6.toString()).thenReturn("task6"); Object mockCookie6 = mock(Object.class); // allocate request with container affinity scheduler.allocateTask(mockTask6, mockCapability, mockCId7, mockPriority5, null, mockCookie6); drainableAppCallback.drain(); verify(mockRMClient, times(7)).addContainerRequest(requestCaptor.capture()); CookieContainerRequest request7 = requestCaptor.getValue(); hostContainers.clear(); hostContainers.add(request6); hostContainers.add(request7); when(mockRMClient.getMatchingRequestsForTopPriority(eq("host5"), (Resource) any())) .thenAnswer(new Answer<List<? extends Collection<CookieContainerRequest>>>() { @Override public List<? extends Collection<CookieContainerRequest>> answer(InvocationOnMock invocation) throws Throwable { return hostList; } }); // stop fudging top priority fudgePriority.set(false); TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier); drainableAppCallback.drain(); verify(mockApp, times(6)).taskAllocated(any(), any(), (Container) any()); // container7 allocated to the task with affinity for it verify(mockApp).taskAllocated(mockTask6, mockCookie6, mockContainer7); // deallocate allocated task assertTrue(scheduler.deallocateTask(mockTask5, true, null, null)); assertTrue(scheduler.deallocateTask(mockTask6, true, null, null)); drainableAppCallback.drain(); verify(mockApp).containerBeingReleased(mockCId7); verify(mockApp).containerBeingReleased(mockCId8); verify(mockRMClient).releaseAssignedContainer(mockCId7); verify(mockRMClient).releaseAssignedContainer(mockCId8); verify(mockRMClient, times(7)).releaseAssignedContainer((ContainerId) any()); float progress = 0.5f; when(mockApp.getProgress()).thenReturn(progress); Assert.assertEquals(progress, scheduler.getProgress(), 0); List<NodeReport> mockUpdatedNodes = mock(List.class); scheduler.onNodesUpdated(mockUpdatedNodes); drainableAppCallback.drain(); verify(mockApp).nodesUpdated(mockUpdatedNodes); ArgumentCaptor<String> argumentCaptor = ArgumentCaptor.forClass(String.class); Exception mockException = new IOException("mockexception"); scheduler.onError(mockException); drainableAppCallback.drain(); verify(mockApp).reportError(eq(YarnTaskSchedulerServiceError.RESOURCEMANAGER_ERROR), argumentCaptor.capture(), any(DagInfo.class)); assertTrue(argumentCaptor.getValue().contains("mockexception")); scheduler.onShutdownRequest(); drainableAppCallback.drain(); verify(mockApp).appShutdownRequested(); String appMsg = "success"; AppFinalStatus finalStatus = new AppFinalStatus(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl); when(mockApp.getFinalAppStatus()).thenReturn(finalStatus); scheduler.shutdown(); drainableAppCallback.drain(); verify(mockRMClient).unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl); verify(mockRMClient).stop(); }
From source file:org.apache.jackrabbit.oak.segment.CompactionAndCleanupIT.java
/** * Regression test for OAK-2192 testing for mixed segments. This test does not * cover OAK-3348. I.e. it does not assert the segment graph is free of cross * gc generation references.//from ww w . j ava2s . c om */ @Test public void testMixedSegments() throws Exception { FileStore store = fileStoreBuilder(getFileStoreFolder()).withMaxFileSize(2).withMemoryMapping(true) .withGCOptions(defaultGCOptions().setForceAfterFail(true)).build(); final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build(); final AtomicBoolean compactionSuccess = new AtomicBoolean(true); NodeBuilder root = nodeStore.getRoot().builder(); createNodes(root.setChildNode("test"), 10, 3); nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); final Set<UUID> beforeSegments = new HashSet<UUID>(); collectSegments(store.getReader(), store.getRevisions(), beforeSegments); final AtomicReference<Boolean> run = new AtomicReference<Boolean>(true); final List<String> failedCommits = newArrayList(); Thread[] threads = new Thread[10]; for (int k = 0; k < threads.length; k++) { final int threadId = k; threads[k] = new Thread(new Runnable() { @Override public void run() { for (int j = 0; run.get(); j++) { String nodeName = "b-" + threadId + "," + j; try { NodeBuilder root = nodeStore.getRoot().builder(); root.setChildNode(nodeName); nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); Thread.sleep(5); } catch (CommitFailedException e) { failedCommits.add(nodeName); } catch (InterruptedException e) { Thread.interrupted(); break; } } } }); threads[k].start(); } store.compact(); run.set(false); for (Thread t : threads) { t.join(); } store.flush(); assumeTrue("Failed to acquire compaction lock", compactionSuccess.get()); assertTrue("Failed commits: " + failedCommits, failedCommits.isEmpty()); Set<UUID> afterSegments = new HashSet<UUID>(); collectSegments(store.getReader(), store.getRevisions(), afterSegments); try { for (UUID u : beforeSegments) { assertFalse("Mixed segments found: " + u, afterSegments.contains(u)); } } finally { store.close(); } }
From source file:org.apache.nifi.processors.csv.ExtractCSVHeader.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final FlowFile original = session.get(); if (original == null) { return;// ww w .j a v a2 s. co m } final AtomicBoolean lineFound = new AtomicBoolean(false); final Map<String, String> attrs = new HashMap<>(); final AtomicInteger headerLength = new AtomicInteger(0); session.read(original, new InputStreamCallback() { @Override public void process(InputStream inputStream) throws IOException { // TODO expose the charset property? LineIterator iterator = IOUtils.lineIterator(inputStream, UTF_8); if (iterator.hasNext()) { lineFound.set(true); final String header = iterator.nextLine(); final String format = context.getProperty(PROP_FORMAT).getValue(); final String delimiter = context.getProperty(PROP_DELIMITER) .evaluateAttributeExpressions(original).getValue(); final String prefix = context.getProperty(PROP_SCHEMA_ATTR_PREFIX) .evaluateAttributeExpressions(original).getValue(); attrs.put(prefix + ATTR_HEADER_ORIGINAL, header); // TODO validate delimiter in the callback first final CSVFormat csvFormat = buildFormat(format, delimiter, true, // we assume first line is the header null); // no custom header final CSVParser parser = csvFormat.parse(new StringReader(header)); final Map<String, Integer> headers = parser.getHeaderMap(); final int columnCount = headers.size(); attrs.put(prefix + ATTR_HEADER_COLUMN_COUNT, String.valueOf(columnCount)); for (Map.Entry<String, Integer> h : headers.entrySet()) { // CSV columns are 1-based in Excel attrs.put(prefix + (h.getValue() + 1), h.getKey()); } // strip the header and send to the 'content' relationship if (StringUtils.isNotBlank(header)) { int hLength = header.length(); // move past the new line if there are more lines if (original.getSize() > hLength + 1) { hLength++; } headerLength.set(hLength); } } } }); if (lineFound.get()) { FlowFile ff = session.putAllAttributes(original, attrs); int offset = headerLength.get(); if (offset > 0) { FlowFile contentOnly = session.clone(ff, offset, original.getSize() - offset); session.transfer(contentOnly, REL_CONTENT); } session.transfer(ff, REL_ORIGINAL); } else { session.transfer(original, REL_FAILURE); } }
From source file:org.apache.solr.servlet.SolrDispatchFilter.java
private boolean authenticateRequest(ServletRequest request, ServletResponse response, final AtomicReference<ServletRequest> wrappedRequest) throws IOException { boolean requestContinues = false; final AtomicBoolean isAuthenticated = new AtomicBoolean(false); AuthenticationPlugin authenticationPlugin = cores.getAuthenticationPlugin(); if (authenticationPlugin == null) { return true; } else {/*from www . java 2 s . c om*/ // /admin/info/key must be always open. see SOLR-9188 // tests work only w/ getPathInfo //otherwise it's just enough to have getServletPath() if (PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest) request).getServletPath()) || PKIAuthenticationPlugin.PATH.equals(((HttpServletRequest) request).getPathInfo())) return true; String header = ((HttpServletRequest) request).getHeader(PKIAuthenticationPlugin.HEADER); if (header != null && cores.getPkiAuthenticationPlugin() != null) authenticationPlugin = cores.getPkiAuthenticationPlugin(); try { log.debug("Request to authenticate: {}, domain: {}, port: {}", request, request.getLocalName(), request.getLocalPort()); // upon successful authentication, this should call the chain's next filter. requestContinues = authenticationPlugin.doAuthenticate(request, response, (req, rsp) -> { isAuthenticated.set(true); wrappedRequest.set(req); }); } catch (Exception e) { log.info("Error authenticating", e); throw new SolrException(ErrorCode.SERVER_ERROR, "Error during request authentication, ", e); } } // requestContinues is an optional short circuit, thus we still need to check isAuthenticated. // This is because the AuthenticationPlugin doesn't always have enough information to determine if // it should short circuit, e.g. the Kerberos Authentication Filter will send an error and not // call later filters in chain, but doesn't throw an exception. We could force each Plugin // to implement isAuthenticated to simplify the check here, but that just moves the complexity to // multiple code paths. if (!requestContinues || !isAuthenticated.get()) { response.flushBuffer(); return false; } return true; }
From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java
@Override public void delete(Iterable<Position> positions) throws InterruptedException, ManagedLedgerException { checkNotNull(positions);//from w w w.j a v a 2 s .c om class Result { ManagedLedgerException exception = null; } final Result result = new Result(); final CountDownLatch counter = new CountDownLatch(1); final AtomicBoolean timeout = new AtomicBoolean(false); asyncDelete(positions, new AsyncCallbacks.DeleteCallback() { @Override public void deleteComplete(Object ctx) { if (timeout.get()) { log.warn("[{}] [{}] Delete operation timeout. Callback deleteComplete at position {}", ledger.getName(), name, positions); } counter.countDown(); } @Override public void deleteFailed(ManagedLedgerException exception, Object ctx) { result.exception = exception; if (timeout.get()) { log.warn("[{}] [{}] Delete operation timeout. Callback deleteFailed at position {}", ledger.getName(), name, positions); } counter.countDown(); } }, null); if (!counter.await(ManagedLedgerImpl.AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) { timeout.set(true); log.warn("[{}] [{}] Delete operation timeout. No callback was triggered at position {}", ledger.getName(), name, positions); throw new ManagedLedgerException("Timeout during delete operation"); } if (result.exception != null) { throw result.exception; } }