Example usage for java.util.concurrent.atomic AtomicBoolean set

List of usage examples for java.util.concurrent.atomic AtomicBoolean set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean set.

Prototype

public final void set(boolean newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.tez.dag.app.rm.TestContainerReuse.java

@Test(timeout = 30000l)
public void testReuseLocalResourcesChanged() throws IOException, InterruptedException, ExecutionException {
    Configuration tezConf = new Configuration(new YarnConfiguration());
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_ENABLED, true);
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_RACK_FALLBACK_ENABLED, true);
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_NON_LOCAL_FALLBACK_ENABLED, true);
    tezConf.setLong(TezConfiguration.TEZ_AM_CONTAINER_REUSE_LOCALITY_DELAY_ALLOCATION_MILLIS, 0);
    tezConf.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MIN_MILLIS, -1);
    RackResolver.init(tezConf);/*ww w.j a v  a 2  s.  c o m*/
    TaskSchedulerAppCallback mockApp = mock(TaskSchedulerAppCallback.class);

    CapturingEventHandler eventHandler = new CapturingEventHandler();
    TezDAGID dagID1 = TezDAGID.getInstance("0", 1, 0);

    AMRMClient<CookieContainerRequest> rmClientCore = new AMRMClientForTest();
    TezAMRMClientAsync<CookieContainerRequest> rmClient = spy(new AMRMClientAsyncForTest(rmClientCore, 100));
    String appUrl = "url";
    String appMsg = "success";
    AppFinalStatus finalStatus = new AppFinalStatus(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl);

    doReturn(finalStatus).when(mockApp).getFinalAppStatus();

    AppContext appContext = mock(AppContext.class);
    ChangingDAGIDAnswer dagIDAnswer = new ChangingDAGIDAnswer(dagID1);
    AMContainerMap amContainerMap = new AMContainerMap(mock(ContainerHeartbeatHandler.class),
            mock(TaskAttemptListener.class), new ContainerContextMatcher(), appContext);
    AMNodeTracker amNodeTracker = new AMNodeTracker(eventHandler, appContext);
    doReturn(amContainerMap).when(appContext).getAllContainers();
    doReturn(amNodeTracker).when(appContext).getNodeTracker();
    doReturn(DAGAppMasterState.RUNNING).when(appContext).getAMState();
    doReturn(true).when(appContext).isSession();
    doAnswer(dagIDAnswer).when(appContext).getCurrentDAGID();
    doReturn(mock(ClusterInfo.class)).when(appContext).getClusterInfo();

    TaskSchedulerEventHandler taskSchedulerEventHandlerReal = new TaskSchedulerEventHandlerForTest(appContext,
            eventHandler, rmClient, new AlwaysMatchesContainerMatcher());
    TaskSchedulerEventHandler taskSchedulerEventHandler = spy(taskSchedulerEventHandlerReal);
    taskSchedulerEventHandler.init(tezConf);
    taskSchedulerEventHandler.start();

    TaskSchedulerWithDrainableAppCallback taskScheduler = (TaskSchedulerWithDrainableAppCallback) ((TaskSchedulerEventHandlerForTest) taskSchedulerEventHandler)
            .getSpyTaskScheduler();
    TaskSchedulerAppCallbackDrainable drainableAppCallback = taskScheduler.getDrainableAppCallback();
    AtomicBoolean drainNotifier = new AtomicBoolean(false);
    taskScheduler.delayedContainerManager.drainedDelayedContainersForTest = drainNotifier;

    Resource resource1 = Resource.newInstance(1024, 1);
    String[] host1 = { "host1" };

    String[] racks = { "/default-rack" };
    Priority priority1 = Priority.newInstance(1);

    String rsrc1 = "rsrc1";
    String rsrc2 = "rsrc2";
    String rsrc3 = "rsrc3";
    LocalResource lr1 = mock(LocalResource.class);
    LocalResource lr2 = mock(LocalResource.class);
    LocalResource lr3 = mock(LocalResource.class);

    AMContainerEventAssignTA assignEvent = null;

    Map<String, LocalResource> dag1LRs = Maps.newHashMap();
    dag1LRs.put(rsrc1, lr1);

    TezVertexID vertexID11 = TezVertexID.getInstance(dagID1, 1);

    //Vertex 1, Task 1, Attempt 1, host1, lr1
    TezTaskAttemptID taID111 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID11, 1), 1);
    TaskAttempt ta111 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent11 = createLaunchRequestEvent(taID111, ta111, resource1, host1,
            racks, priority1, dag1LRs);

    //Vertex 1, Task 2, Attempt 1, host1, lr1
    TezTaskAttemptID taID112 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID11, 2), 1);
    TaskAttempt ta112 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent12 = createLaunchRequestEvent(taID112, ta112, resource1, host1,
            racks, priority1, dag1LRs);

    taskSchedulerEventHandler.handleEvent(lrEvent11);
    taskSchedulerEventHandler.handleEvent(lrEvent12);

    Container container1 = createContainer(1, "host1", resource1, priority1);

    // One container allocated.
    drainNotifier.set(false);
    taskScheduler.onContainersAllocated(Collections.singletonList(container1));
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta111), any(Object.class), eq(container1));
    assignEvent = (AMContainerEventAssignTA) eventHandler.verifyInvocation(AMContainerEventAssignTA.class);
    assertEquals(1, assignEvent.getRemoteTaskLocalResources().size());

    // Task assigned to container completed successfully. Container should be re-used.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta111, container1.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta111), eq(true));
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta112), any(Object.class), eq(container1));
    verify(rmClient, times(0)).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyNoInvocations(AMContainerEventStopRequest.class);
    assignEvent = (AMContainerEventAssignTA) eventHandler.verifyInvocation(AMContainerEventAssignTA.class);
    assertEquals(1, assignEvent.getRemoteTaskLocalResources().size());
    eventHandler.reset();

    // Task assigned to container completed successfully.
    // Verify reuse across hosts.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta112, container1.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta112), eq(true));
    verify(rmClient, times(0)).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyNoInvocations(AMContainerEventStopRequest.class);
    eventHandler.reset();

    // Setup DAG2 with additional resources. Make sure the container, even without all resources, is reused.
    TezDAGID dagID2 = TezDAGID.getInstance("0", 2, 0);
    dagIDAnswer.setDAGID(dagID2);

    Map<String, LocalResource> dag2LRs = Maps.newHashMap();
    dag2LRs.put(rsrc2, lr2);
    dag2LRs.put(rsrc3, lr3);

    TezVertexID vertexID21 = TezVertexID.getInstance(dagID2, 1);

    //Vertex 2, Task 1, Attempt 1, host1, lr2
    TezTaskAttemptID taID211 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID21, 1), 1);
    TaskAttempt ta211 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent21 = createLaunchRequestEvent(taID211, ta211, resource1, host1,
            racks, priority1, dag2LRs);

    //Vertex 2, Task 2, Attempt 1, host1, lr2
    TezTaskAttemptID taID212 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID21, 2), 1);
    TaskAttempt ta212 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent22 = createLaunchRequestEvent(taID212, ta212, resource1, host1,
            racks, priority1, dag2LRs);

    taskSchedulerEventHandler.handleEvent(lrEvent21);
    taskSchedulerEventHandler.handleEvent(lrEvent22);
    drainableAppCallback.drain();

    // TODO This is terrible, need a better way to ensure the scheduling loop has run
    LOG.info("Sleeping to ensure that the scheduling loop runs");
    Thread.sleep(6000l);
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta211), any(Object.class), eq(container1));
    verify(rmClient, times(0)).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyNoInvocations(AMContainerEventStopRequest.class);
    assignEvent = (AMContainerEventAssignTA) eventHandler.verifyInvocation(AMContainerEventAssignTA.class);
    assertEquals(2, assignEvent.getRemoteTaskLocalResources().size());
    eventHandler.reset();

    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta211, container1.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta211), eq(true));
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta212), any(Object.class), eq(container1));
    verify(rmClient, times(0)).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyNoInvocations(AMContainerEventStopRequest.class);
    assignEvent = (AMContainerEventAssignTA) eventHandler.verifyInvocation(AMContainerEventAssignTA.class);
    assertEquals(2, assignEvent.getRemoteTaskLocalResources().size());
    eventHandler.reset();

    taskScheduler.close();
    taskSchedulerEventHandler.close();
}

From source file:org.apache.tez.dag.app.rm.TestContainerReuse.java

@Test(timeout = 10000l)
public void testReuseWithTaskSpecificLaunchCmdOption()
        throws IOException, InterruptedException, ExecutionException {
    Configuration tezConf = new Configuration(new YarnConfiguration());
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_ENABLED, true);
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_RACK_FALLBACK_ENABLED, true);
    tezConf.setLong(TezConfiguration.TEZ_AM_CONTAINER_REUSE_LOCALITY_DELAY_ALLOCATION_MILLIS, 0);
    tezConf.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MIN_MILLIS, 0);
    //Profile 3 tasks
    tezConf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LAUNCH_CMD_OPTS_LIST, "v1[1,3,4]");
    tezConf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LAUNCH_CMD_OPTS, "dir=/tmp/__VERTEX_NAME__/__TASK_INDEX__");
    TaskSpecificLaunchCmdOption taskSpecificLaunchCmdOption = new TaskSpecificLaunchCmdOption(tezConf);

    RackResolver.init(tezConf);//from  w  ww.j a  v  a2s .  c o  m
    TaskSchedulerAppCallback mockApp = mock(TaskSchedulerAppCallback.class);

    CapturingEventHandler eventHandler = new CapturingEventHandler();
    TezDAGID dagID = TezDAGID.getInstance("0", 0, 0);

    AMRMClient<CookieContainerRequest> rmClientCore = new AMRMClientForTest();
    TezAMRMClientAsync<CookieContainerRequest> rmClient = spy(new AMRMClientAsyncForTest(rmClientCore, 100));
    String appUrl = "url";
    String appMsg = "success";
    AppFinalStatus finalStatus = new AppFinalStatus(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl);

    doReturn(finalStatus).when(mockApp).getFinalAppStatus();

    AppContext appContext = mock(AppContext.class);
    AMContainerMap amContainerMap = new AMContainerMap(mock(ContainerHeartbeatHandler.class),
            mock(TaskAttemptListener.class), new ContainerContextMatcher(), appContext);
    AMNodeTracker amNodeTracker = new AMNodeTracker(eventHandler, appContext);
    doReturn(amContainerMap).when(appContext).getAllContainers();
    doReturn(amNodeTracker).when(appContext).getNodeTracker();
    doReturn(DAGAppMasterState.RUNNING).when(appContext).getAMState();
    doReturn(dagID).when(appContext).getCurrentDAGID();
    doReturn(mock(ClusterInfo.class)).when(appContext).getClusterInfo();

    //Use ContainerContextMatcher here.  Otherwise it would not match the JVM options
    TaskSchedulerEventHandler taskSchedulerEventHandlerReal = new TaskSchedulerEventHandlerForTest(appContext,
            eventHandler, rmClient, new ContainerContextMatcher());
    TaskSchedulerEventHandler taskSchedulerEventHandler = spy(taskSchedulerEventHandlerReal);
    taskSchedulerEventHandler.init(tezConf);
    taskSchedulerEventHandler.start();

    TaskSchedulerWithDrainableAppCallback taskScheduler = (TaskSchedulerWithDrainableAppCallback) ((TaskSchedulerEventHandlerForTest) taskSchedulerEventHandler)
            .getSpyTaskScheduler();
    TaskSchedulerAppCallbackDrainable drainableAppCallback = taskScheduler.getDrainableAppCallback();
    AtomicBoolean drainNotifier = new AtomicBoolean(false);
    taskScheduler.delayedContainerManager.drainedDelayedContainersForTest = drainNotifier;

    Resource resource1 = Resource.newInstance(1024, 1);
    String[] host1 = { "host1" };
    String[] host2 = { "host2" };
    String[] host3 = { "host3" };

    String[] racks = { "/default-rack" };
    Priority priority1 = Priority.newInstance(1);

    TezVertexID vertexID1 = TezVertexID.getInstance(dagID, 1);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    String tsLaunchCmdOpts = taskSpecificLaunchCmdOption.getTaskSpecificOption("", "v1", 1);

    /**
     * Schedule 2 tasks (1 with additional launch-cmd option and another in normal mode).
     * Container should not be reused in this case.
     */
    //Vertex 1, Task 1, Attempt 1, host1
    TezTaskAttemptID taID11 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 1), 1);
    TaskAttempt ta11 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent1 = createLaunchRequestEvent(taID11, ta11, resource1, host1, racks,
            priority1, localResources, tsLaunchCmdOpts);

    //Vertex 1, Task 2, Attempt 1, host1
    TezTaskAttemptID taID12 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 2), 1);
    TaskAttempt ta12 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent2 = createLaunchRequestEvent(taID12, ta12, resource1, host1, racks,
            priority1);

    taskSchedulerEventHandler.handleEvent(lrEvent1);
    taskSchedulerEventHandler.handleEvent(lrEvent2);

    Container container1 = createContainer(1, "host1", resource1, priority1);

    // One container allocated.
    drainNotifier.set(false);
    taskScheduler.onContainersAllocated(Collections.singletonList(container1));
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta11), any(Object.class), eq(container1));

    // First task had profiling on. This container can not be reused further.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta11, container1.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta11), eq(true));
    verify(taskSchedulerEventHandler, times(0)).taskAllocated(eq(ta12), any(Object.class), eq(container1));
    verify(rmClient, times(1)).releaseAssignedContainer(eq(container1.getId()));
    eventHandler.verifyInvocation(AMContainerEventStopRequest.class);
    eventHandler.reset();

    /**
     * Schedule 2 tasks (both having different task specific JVM option).
     * Container should not be reused.
     */
    //Vertex 1, Task 3, Attempt 1, host2
    tsLaunchCmdOpts = taskSpecificLaunchCmdOption.getTaskSpecificOption("", "v1", 3);
    TezTaskAttemptID taID13 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 3), 1);
    TaskAttempt ta13 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent3 = createLaunchRequestEvent(taID13, ta13, resource1, host2, racks,
            priority1, localResources, tsLaunchCmdOpts);

    //Vertex 1, Task 4, Attempt 1, host2
    tsLaunchCmdOpts = taskSpecificLaunchCmdOption.getTaskSpecificOption("", "v1", 4);
    TezTaskAttemptID taID14 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 4), 1);
    TaskAttempt ta14 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent4 = createLaunchRequestEvent(taID14, ta14, resource1, host2, racks,
            priority1, localResources, tsLaunchCmdOpts);

    Container container2 = createContainer(2, "host2", resource1, priority1);
    taskSchedulerEventHandler.handleEvent(lrEvent3);
    taskSchedulerEventHandler.handleEvent(lrEvent4);

    // Container started
    drainNotifier.set(false);
    taskScheduler.onContainersAllocated(Collections.singletonList(container2));
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta13), any(Object.class), eq(container2));

    // Verify that the container can not be reused when profiling option is turned on
    // Even for 2 tasks having same profiling option can have container reusability.
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta13, container2.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta13), eq(true));
    verify(taskSchedulerEventHandler, times(0)).taskAllocated(eq(ta14), any(Object.class), eq(container2));
    verify(rmClient, times(1)).releaseAssignedContainer(eq(container2.getId()));
    eventHandler.verifyInvocation(AMContainerEventStopRequest.class);
    eventHandler.reset();

    /**
     * Schedule 2 tasks with same jvm profiling option.
     * Container should be reused.
     */
    tezConf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LAUNCH_CMD_OPTS_LIST, "v1[1,2,3,5,6]");
    tezConf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LAUNCH_CMD_OPTS, "dummyOpts");
    taskSpecificLaunchCmdOption = new TaskSpecificLaunchCmdOption(tezConf);

    //Vertex 1, Task 5, Attempt 1, host3
    TezTaskAttemptID taID15 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 3), 1);
    TaskAttempt ta15 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent5 = createLaunchRequestEvent(taID15, ta15, resource1, host3, racks,
            priority1, localResources, taskSpecificLaunchCmdOption.getTaskSpecificOption("", "v1", 5));

    //Vertex 1, Task 6, Attempt 1, host3
    tsLaunchCmdOpts = taskSpecificLaunchCmdOption.getTaskSpecificOption("", "v1", 4);
    TezTaskAttemptID taID16 = TezTaskAttemptID.getInstance(TezTaskID.getInstance(vertexID1, 4), 1);
    TaskAttempt ta16 = mock(TaskAttempt.class);
    AMSchedulerEventTALaunchRequest lrEvent6 = createLaunchRequestEvent(taID16, ta16, resource1, host3, racks,
            priority1, localResources, taskSpecificLaunchCmdOption.getTaskSpecificOption("", "v1", 6));

    // Container started
    Container container3 = createContainer(2, "host3", resource1, priority1);
    taskSchedulerEventHandler.handleEvent(lrEvent5);
    taskSchedulerEventHandler.handleEvent(lrEvent6);

    drainNotifier.set(false);
    taskScheduler.onContainersAllocated(Collections.singletonList(container3));
    TestTaskSchedulerHelpers.waitForDelayedDrainNotify(drainNotifier);
    drainableAppCallback.drain();
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta15), any(Object.class), eq(container3));

    //Ensure task 6 (of vertex 1) is allocated to same container
    taskSchedulerEventHandler
            .handleEvent(new AMSchedulerEventTAEnded(ta15, container3.getId(), TaskAttemptState.SUCCEEDED));
    drainableAppCallback.drain();
    verify(taskScheduler).deallocateTask(eq(ta15), eq(true));
    verify(taskSchedulerEventHandler).taskAllocated(eq(ta16), any(Object.class), eq(container3));
    eventHandler.reset();

    taskScheduler.close();
    taskSchedulerEventHandler.close();
}

From source file:com.atlassian.jira.bc.group.TestDefaultGroupService.java

@Test
public void testValidateAddUsersToGroupWillExceedLicenseLimit() {
    final AtomicBoolean validateGroupNamesExistCalled = new AtomicBoolean(false);
    final AtomicBoolean isUserNullCalled = new AtomicBoolean(false);
    final AtomicBoolean isExternalUserManagementEnabledCalled = new AtomicBoolean(false);
    final AtomicBoolean getNonMemberGroupsCalled = new AtomicBoolean(false);
    final AtomicBoolean validateUserIsNotInSelectedGroupsCalled = new AtomicBoolean(false);
    final Mock mockUserUtil = new Mock(UserUtil.class);
    mockUserUtil.expectAndReturn("canActivateNumberOfUsers", new Constraint[] { P.eq(1) }, Boolean.FALSE);
    final DefaultGroupService defaultGroupService = new DefaultGroupService(null, null, null, null, null, null,
            null, null, (UserUtil) mockUserUtil.proxy(), null, null, null, null, null) {
        @Override/*from   w w w . j  av a 2s.c o  m*/
        boolean validateGroupNamesExist(final Collection groupNames, final ErrorCollection errorCollection,
                final I18nHelper i18n) {
            validateGroupNamesExistCalled.set(true);
            return true;
        }

        @Override
        boolean isUserNull(final User user) {
            isUserNullCalled.set(true);
            return false;
        }

        @Override
        boolean isExternalUserManagementEnabled() {
            isExternalUserManagementEnabledCalled.set(true);
            return false;
        }

        @Override
        List getGroupNamesUserCanSee(final com.atlassian.crowd.embedded.api.User currentUser) {
            getNonMemberGroupsCalled.set(true);
            return EasyList.build("SomeOtherGroup", "SomeGroup");
        }

        @Override
        boolean validateUserIsNotInSelectedGroups(final JiraServiceContext jiraServiceContext,
                final Collection selectedGroupsNames, final User user) {
            validateUserIsNotInSelectedGroupsCalled.set(true);
            return true;
        }

        @Override
        User getUser(final String userName) {
            return null;
        }

        @Override
        boolean userHasAdminPermission(final User user) {
            return true;
        }

        @Override
        boolean groupsHaveGlobalUsePermissions(final Collection /* <String> */ groupNames) {
            return true;
        }
    };
    final SimpleErrorCollection errors = new SimpleErrorCollection();
    final JiraServiceContext jiraServiceContext = getContext(errors);
    assertFalse(defaultGroupService
            .validateAddUsersToGroup(jiraServiceContext, EasyList.build("SomeGroup"), EasyList.build("dude"))
            .isSuccess());
    assertTrue(validateGroupNamesExistCalled.get());
    assertTrue(isUserNullCalled.get());
    assertTrue(isExternalUserManagementEnabledCalled.get());
    assertTrue(getNonMemberGroupsCalled.get());
    assertTrue(validateUserIsNotInSelectedGroupsCalled.get());
    assertEquals(1, errors.getErrorMessages().size());
    assertEquals(
            "Adding the user to the groups you have selected will grant the 'JIRA Users' permission to the user"
                    + " in JIRA. This will exceed the number of users allowed to use JIRA under your license. Please"
                    + " reduce the number of users with the 'JIRA Users', 'JIRA Administrators' or 'JIRA System"
                    + " Administrators' global permissions or consider upgrading your license.",
            errors.getErrorMessages().iterator().next());
}

From source file:io.cloudslang.worker.management.services.OutboundBufferTest.java

@Test
public void longevityTest() throws InterruptedException {
    int THREADS_NUM = 5;
    long CHECK_DURATION = 5 * 1000L;
    long INFO_FREQUENCY = 2 * 1000L;

    final AtomicBoolean run = new AtomicBoolean(true);
    final CountDownLatch latch = new CountDownLatch(THREADS_NUM + 1);

    for (int i = 1; i <= THREADS_NUM; i++) {
        final int index = i;
        new Thread(new Runnable() {
            private final Class<? extends Message> messageClass = (index % 2) != 0 ? DummyMsg1.class
                    : DummyMsg2.class;

            @Override/*from   w  ww  .  j a  v  a 2  s.co  m*/
            public void run() {
                int counter = 0;
                try {
                    logger.debug("started, will generate messages of " + messageClass.getSimpleName());

                    while (run.get()) {
                        buffer.put(messageClass.newInstance());
                        counter++;
                        Thread.sleep(5L);
                    }
                    logger.debug("thread finished. processed " + counter + " messages");
                } catch (Exception ex) {
                    logger.error("thread finished", ex);
                } finally {
                    latch.countDown();
                }
            }
        }, "T-" + i).start();
    }

    final DrainStatistics statistics = new DrainStatistics();
    //noinspection unchecked
    doAnswer(new Answer<Object>() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            @SuppressWarnings("unchecked")
            List<Message> messages = (List<Message>) invocation.getArguments()[0];
            int weight = 0;
            for (Message message : messages)
                weight += message.getWeight();
            statistics.add(messages.size(), weight);
            return null;
        }
    }).when(dispatcherService).dispatch(anyList(), anyString(), anyString(), anyString());

    new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                logger.debug("started");

                while (run.get()) {
                    buffer.drain();
                    Thread.sleep(30L);
                }

                while (buffer.getSize() > 0)
                    buffer.drain();
            } catch (Exception ex) {
                logger.error("thread finished", ex);
            } finally {
                latch.countDown();
            }
        }
    }, "T-D").start();

    long t = System.currentTimeMillis();
    while (System.currentTimeMillis() - t < CHECK_DURATION) {
        Thread.sleep(INFO_FREQUENCY);
        logger.debug(buffer.getStatus());
    }
    run.set(false);
    latch.await();

    System.out.println("Drain statistics: " + statistics.report());
}

From source file:org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator.java

@Test
public void testHeartbeatHandler() throws Exception {
    LOG.info("Running testHeartbeatHandler");

    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, 1);
    ControlledClock clock = new ControlledClock(new SystemClock());
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClock()).thenReturn(clock);
    when(appContext.getApplicationID()).thenReturn(ApplicationId.newInstance(1, 1));

    RMContainerAllocator allocator = new RMContainerAllocator(mock(ClientService.class), appContext) {
        @Override//ww  w  . j av  a2 s.c  o  m
        protected void register() {
        }

        @Override
        protected ApplicationMasterProtocol createSchedulerProxy() {
            return mock(ApplicationMasterProtocol.class);
        }

        @Override
        protected synchronized void heartbeat() throws Exception {
        }
    };
    allocator.init(conf);
    allocator.start();

    clock.setTime(5);
    int timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(5, allocator.getLastHeartbeatTime());
    clock.setTime(7);
    timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(7, allocator.getLastHeartbeatTime());

    final AtomicBoolean callbackCalled = new AtomicBoolean(false);
    allocator.runOnNextHeartbeat(new Runnable() {
        @Override
        public void run() {
            callbackCalled.set(true);
        }
    });
    clock.setTime(8);
    timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(8, allocator.getLastHeartbeatTime());
    Assert.assertTrue(callbackCalled.get());
}

From source file:org.apache.hadoop.hdfs.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//  w ww  .ja v a2s  . c  om
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();
    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());
    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));
    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());
    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHTable.java

@Override
public <R> void batchCallback(final List<? extends Row> actions, final Object[] results,
        final Callback<R> callback) throws IOException, InterruptedException {
    if (results.length != actions.size()) {
        throw new IllegalArgumentException("argument results must be the same size as argument actions");
    }/*from www . j  a v a2  s. co  m*/
    if (actions.isEmpty()) {
        return;
    }
    ClusterLocator clusterLocator = cachedZKInfo.clusterLocator;
    Map<String, Map<Integer, Row>> clusterMap = new TreeMap<String, Map<Integer, Row>>();
    Map<Integer, Object> rmap = new TreeMap<Integer, Object>();
    int index = 0;
    for (Row action : actions) {
        String clusterName = clusterLocator.getClusterName(action.getRow());
        Map<Integer, Row> rows = clusterMap.get(clusterName);
        if (rows == null) {
            rows = new TreeMap<Integer, Row>();
            clusterMap.put(clusterName, rows);
        }
        rows.put(Integer.valueOf(index++), action);
    }

    final AtomicBoolean hasError = new AtomicBoolean(false);
    Map<String, Future<Map<Integer, Object>>> futures = new HashMap<String, Future<Map<Integer, Object>>>();
    for (final Entry<String, Map<Integer, Row>> entry : clusterMap.entrySet()) {
        futures.put(entry.getKey(), pool.submit(new Callable<Map<Integer, Object>>() {

            @Override
            public Map<Integer, Object> call() throws Exception {
                Map<Integer, Object> map = new TreeMap<Integer, Object>();
                Map<Integer, Row> rowMap = entry.getValue();
                Object[] rs = new Object[rowMap.size()];
                List<Integer> indexes = new ArrayList<Integer>(rowMap.size());
                List<Row> rows = new ArrayList<Row>(rowMap.size());
                try {
                    HTableInterface table = getClusterHTable(entry.getKey());
                    for (Entry<Integer, Row> rowEntry : rowMap.entrySet()) {
                        indexes.add(rowEntry.getKey());
                        rows.add(rowEntry.getValue());
                    }
                    table.batchCallback(rows, rs, callback);
                } catch (IOException e) {
                    // need clear the cached HTable if the connection is refused
                    clearCachedTable(entry.getKey());
                    hasError.set(true);
                    LOG.error(e);
                } finally {
                    int index = 0;
                    for (Object r : rs) {
                        map.put(indexes.get(index++), r);
                    }
                }
                return map;
            }
        }));
    }

    try {
        for (Entry<String, Future<Map<Integer, Object>>> result : futures.entrySet()) {
            rmap.putAll(result.getValue().get());
        }
    } catch (Exception e) {
        // do nothing
    }

    for (int i = 0; i < actions.size(); i++) {
        results[i] = rmap.get(Integer.valueOf(i));
    }
    if (hasError.get()) {
        throw new IOException();
    }
}

From source file:de.dal33t.powerfolder.Controller.java

/**
 * Waits for the repo to finish syncing. Then request system shutdown and
 * exit PF./*from  w  ww  .  j  a va  2  s. co  m*/
 *
 * @param secWait
 *            number of seconds to wait.
 */
public void exitAfterSync(int secWait) {
    logInfo("Sync and exit initiated. Begin check in " + secWait + 's');
    final AtomicBoolean oneShot = new AtomicBoolean(true);
    scheduleAndRepeat(new Runnable() {
        @Override
        public void run() {
            // ALPS Problem: Change to check for all in sync.
            if (oneShot.get() && folderRepository.isInSync()) {
                // Make sure we only try to shutdown once,
                // in case the user aborts the shutdown.
                oneShot.set(false);
                log.info("I'm in sync - exit now. Sync and exit was triggered.");
                exit(0);
            }
        }
    }, 1000L * secWait, 10000);
}

From source file:com.splout.db.integration.TestMultiThreadedQueryAndDeploy.java

@Test
@Ignore // Causes some non-deterministic problems, to be analyzed
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }/*from   w w w  . j av a  2  s  . co  m*/

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final AtomicInteger iteration = new AtomicInteger(0);
    final Set<Integer> iterationsSeen = new HashSet<Integer>();

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // These threads will continuously perform queries and check that the results is consistent.
        // They will also count how many deploys have happened since the beginning.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, (randomDNode * 10) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            Integer seenIteration = (Integer) jsonResult.get("iteration");
                            synchronized (iterationsSeen) {
                                iterationsSeen.add(seenIteration);
                            }
                            assertTrue(seenIteration <= iteration.get());
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        final SploutConfiguration config = SploutConfiguration.getTestConfig();
        final int iterationsToPerform = config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE) + 5;
        for (int i = 0; i < iterationsToPerform; i++) {
            iteration.incrementAndGet();
            log.info("Deploy iteration: " + iteration.get());
            deployIteration(iteration.get(), random, client, testTablespace);

            new TestUtils.NotWaitingForeverCondition() {
                @Override
                public boolean endCondition() {
                    synchronized (iterationsSeen) {
                        return iterationsSeen.size() == (iteration.get() + 1);
                    }
                }
            }.waitAtMost(5000);
        }

        assertEquals(false, failed.get());

        service.shutdownNow(); // will interrupt all threads
        while (!service.isTerminated()) {
            Thread.sleep(100);
        }

        CoordinationStructures coord = TestUtils.getCoordinationStructures(config);
        assertNotNull(coord.getCopyVersionsBeingServed().get(TABLESPACE));

        // Assert that there is only MAX_VERSIONS versions of the tablespace (due to old version cleanup)
        new TestUtils.NotWaitingForeverCondition() {

            @Override
            public boolean endCondition() {
                QNodeHandler handler = (QNodeHandler) qNodes.get(0).getHandler();
                int seenVersions = 0;
                for (Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion : handler.getContext()
                        .getTablespaceVersionsMap().entrySet()) {
                    if (tablespaceVersion.getKey().getTablespace().equals(TABLESPACE)) {
                        seenVersions++;
                    }
                }
                return seenVersions <= config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE);
            }
        }.waitAtMost(5000);
    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:de.dal33t.powerfolder.Controller.java

/**
 * Wait for the repo to finish syncing. Then request system shutdown and
 * exit PF.//from  ww  w .  j a  v a 2  s . c  o m
 *
 * @param password
 *            required only for Linux shutdowns.
 */
public void shutdownAfterSync(final String password) {
    final AtomicBoolean oneShot = new AtomicBoolean(true);
    scheduleAndRepeat(new Runnable() {
        @Override
        public void run() {
            // ALPS Problem: Change to check for all in sync.

            if (oneShot.get() && folderRepository.isInSync()) {
                // Make sure we only try to shutdown once,
                // in case the user aborts the shutdown.
                oneShot.set(false);
                log.info("Sync and shutdown in sync.");
                if (SystemUtil.shutdown(password)) {
                    log.info("Shutdown command issued.");
                    exit(0);
                } else {
                    log.warning("Shutdown command failed.");
                }
            }
        }
    }, 10000, 10000);
}