Example usage for org.apache.cassandra.repair RepairParallelism PARALLEL

List of usage examples for org.apache.cassandra.repair RepairParallelism PARALLEL

Introduction

In this page you can find the example usage for org.apache.cassandra.repair RepairParallelism PARALLEL.

Prototype

RepairParallelism PARALLEL

To view the source code for org.apache.cassandra.repair RepairParallelism PARALLEL.

Click Source Link

Document

All nodes at the same time

Usage

From source file:io.cassandrareaper.resources.view.RepairRunStatusTest.java

License:Apache License

@Test
public void testPausedRepairDuration() {
    RepairRunStatus repairStatus = new RepairRunStatus(UUID.randomUUID(), // runId
            "test", // clusterName
            "test", // keyspaceName
            Collections.EMPTY_LIST, // tables
            10, // segmentsRepaired
            100, // totalSegments
            RepairRun.RunState.PAUSED, // state
            new DateTime().now().minusMinutes(1).minusSeconds(50), // startTime
            new DateTime().now(), // endTime
            "test", // cause
            "alex", // owner
            "", // lastEvent
            new DateTime(2018, 4, 11, 15, 00, 00, DateTimeZone.UTC), // creationTime
            new DateTime().now().minusMinutes(1), // pauseTime
            0.9, // intensity
            false, // incremental
            RepairParallelism.PARALLEL, // repairParellelism
            Collections.EMPTY_LIST, // nodes
            Collections.EMPTY_LIST, // datacenters
            Collections.EMPTY_LIST, // blacklist
            1); // repair thread count

    assertEquals("1 minute 50 seconds", repairStatus.getDuration());
}

From source file:io.cassandrareaper.resources.view.RepairRunStatusTest.java

License:Apache License

@Test
public void testAbortedRepairDuration() {
    RepairRunStatus repairStatus = new RepairRunStatus(UUID.randomUUID(), // runId
            "test", // clusterName
            "test", // keyspaceName
            Collections.EMPTY_LIST, // tables
            10, // segmentsRepaired
            100, // totalSegments
            RepairRun.RunState.ABORTED, // state
            new DateTime().now().minusMinutes(1).minusSeconds(30), // startTime
            null, // endTime
            "test", // cause
            "alex", // owner
            "", // lastEvent
            new DateTime(2018, 4, 11, 15, 00, 00, DateTimeZone.UTC), // creationTime
            new DateTime().now().minusMinutes(1), // pauseTime
            0.9, // intensity
            false, // incremental
            RepairParallelism.PARALLEL, // repairParellelism
            Collections.EMPTY_LIST, // nodes
            Collections.EMPTY_LIST, // datacenters
            Collections.EMPTY_LIST, // blacklist
            1); // repair thread count

    assertEquals("30 seconds", repairStatus.getDuration());
}

From source file:io.cassandrareaper.resources.view.RepairScheduleStatusTest.java

License:Apache License

@Test
public void testJacksonJSONParsing() throws Exception {
    RepairScheduleStatus data = new RepairScheduleStatus();
    data.setClusterName("testCluster");
    data.setColumnFamilies(Lists.<String>newArrayList());
    data.setCreationTime(DateTime.now().withMillis(0));
    data.setDaysBetween(2);//from  ww  w. j  av  a 2  s . co m
    data.setId(UUIDs.timeBased());
    data.setIntensity(0.75);
    data.setIncrementalRepair(false);
    data.setKeyspaceName("testKeyspace");
    data.setOwner("testuser");
    data.setRepairParallelism(RepairParallelism.PARALLEL);
    data.setState(RepairSchedule.State.ACTIVE);

    ObjectMapper mapper = new ObjectMapper();
    String dataAsJson = mapper.writeValueAsString(data);
    LOG.info("DATA: " + dataAsJson);

    RepairScheduleStatus dataAfter = SimpleReaperClient.parseRepairScheduleStatusJSON(dataAsJson);

    assertEquals(data.getClusterName(), dataAfter.getClusterName());
    assertEquals(data.getColumnFamilies(), dataAfter.getColumnFamilies());
    assertEquals(data.getCreationTime(), dataAfter.getCreationTime());
    assertEquals(data.getDaysBetween(), dataAfter.getDaysBetween());
    assertEquals(data.getId(), dataAfter.getId());
    assertEquals(data.getIntensity(), dataAfter.getIntensity(), 0.0);
    assertEquals(data.getIncrementalRepair(), dataAfter.getIncrementalRepair());
    assertEquals(data.getKeyspaceName(), dataAfter.getKeyspaceName());
    assertEquals(data.getRepairParallelism(), dataAfter.getRepairParallelism());
    assertEquals(data.getState(), dataAfter.getState());
}

From source file:io.cassandrareaper.service.RepairManagerTest.java

License:Apache License

/**
 * Verifies that when a RUNNING segment exists that has no leader it will get aborted. Will happen
 * even if a repair runner exists for the run, when using a IDistributedStorage backend
 *
 * @throws ReaperException if some goes wrong :)
 *///from   www  .  j  a v  a2 s .c  om
@Test
public void abortRunningSegmentWithNoLeader() throws ReaperException {
    final String clusterName = "reaper";
    final String ksName = "reaper";
    final Set<String> cfNames = Sets.newHashSet("reaper");
    final boolean incrementalRepair = false;
    final Set<String> nodes = Sets.newHashSet("127.0.0.1");
    final Set<String> datacenters = Collections.emptySet();
    final double intensity = 0.5f;
    final int repairThreadCount = 1;

    // use CassandraStorage so we get both IStorage and IDistributedStorage
    final IStorage storage = mock(CassandraStorage.class);

    storage.addCluster(new Cluster(clusterName, null, Collections.<String>singleton("127.0.0.1")));

    AppContext context = new AppContext();
    context.storage = storage;
    context.config = new ReaperApplicationConfiguration();
    RepairManager repairManager = RepairManager.create(context);
    repairManager = Mockito.spy(repairManager);
    context.repairManager = repairManager;
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final RepairUnit cf = new RepairUnit.Builder(clusterName, ksName, cfNames, incrementalRepair, nodes,
            datacenters, Collections.emptySet(), repairThreadCount).build(UUIDs.timeBased());

    final RepairRun run = new RepairRun.Builder(clusterName, cf.getId(), DateTime.now(), intensity, 1,
            RepairParallelism.PARALLEL).build(UUIDs.timeBased());

    final RepairSegment segment = RepairSegment
            .builder(Segment.builder().withTokenRange(new RingRange("-1", "1")).build(), cf.getId())
            .withRunId(run.getId()).withId(UUIDs.timeBased()).build();

    context.repairManager.repairRunners.put(run.getId(), mock(RepairRunner.class));

    Mockito.doNothing().when(context.repairManager).abortSegments(any(), any(), Mockito.anyBoolean(),
            Mockito.anyBoolean());
    Mockito.doReturn(run).when(context.repairManager).startRepairRun(run);
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.RUNNING)).thenReturn(Arrays.asList(run));
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.PAUSED)).thenReturn(Collections.emptyList());
    when(context.storage.getSegmentsWithState(any(), any())).thenReturn(Arrays.asList(segment));
    when(((IDistributedStorage) context.storage).getLeaders()).thenReturn(Collections.emptyList());

    context.repairManager.resumeRunningRepairRuns();

    // Check that abortSegments was invoked is at least one segment, meaning abortion occurs
    Mockito.verify(context.repairManager, Mockito.times(1)).abortSegments(Mockito.argThat(new NotEmptyList()),
            any(), Mockito.anyBoolean(), Mockito.anyBoolean());
}

From source file:io.cassandrareaper.service.RepairManagerTest.java

License:Apache License

/**
 * Verifies that when a RUNNING segment exists that has a leader it will not get aborted. When
 * using a IDistributedStorage backend/*from w  ww . ja  v a2s.  co  m*/
 *
 * @throws ReaperException if some goes wrong :)
 */
@Test
public void doNotAbortRunningSegmentWithLeader() throws ReaperException {
    final String clusterName = "reaper";
    final String ksName = "reaper";
    final Set<String> cfNames = Sets.newHashSet("reaper");
    final boolean incrementalRepair = false;
    final Set<String> nodes = Sets.newHashSet("127.0.0.1");
    final Set<String> datacenters = Collections.emptySet();
    final double intensity = 0.5f;
    final int repairThreadCount = 1;

    // use CassandraStorage so we get both IStorage and IDistributedStorage
    final IStorage storage = mock(CassandraStorage.class);

    storage.addCluster(new Cluster(clusterName, null, Collections.<String>singleton("127.0.0.1")));

    AppContext context = new AppContext();
    context.storage = storage;
    context.config = new ReaperApplicationConfiguration();
    RepairManager repairManager = RepairManager.create(context);
    repairManager = Mockito.spy(repairManager);
    context.repairManager = repairManager;
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final RepairUnit cf = new RepairUnit.Builder(clusterName, ksName, cfNames, incrementalRepair, nodes,
            datacenters, Collections.emptySet(), repairThreadCount).build(UUIDs.timeBased());

    final RepairRun run = new RepairRun.Builder(clusterName, cf.getId(), DateTime.now(), intensity, 1,
            RepairParallelism.PARALLEL).build(UUIDs.timeBased());

    final RepairSegment segment = RepairSegment
            .builder(Segment.builder().withTokenRange(new RingRange("-1", "1")).build(), cf.getId())
            .withRunId(run.getId()).withId(UUIDs.timeBased()).build();

    context.repairManager.repairRunners.put(run.getId(), mock(RepairRunner.class));

    Mockito.doNothing().when(context.repairManager).abortSegments(any(), any());
    Mockito.doNothing().when(context.repairManager).abortSegments(any(), any(), Mockito.anyBoolean(),
            Mockito.anyBoolean());
    Mockito.doReturn(run).when(context.repairManager).startRepairRun(run);
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.RUNNING)).thenReturn(Arrays.asList(run));
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.PAUSED)).thenReturn(Collections.emptyList());
    when(context.storage.getSegmentsWithState(any(), any())).thenReturn(Arrays.asList(segment));
    when(((IDistributedStorage) context.storage).getLeaders()).thenReturn(Arrays.asList(segment.getId()));

    context.repairManager.resumeRunningRepairRuns();

    // Check that abortSegments was invoked with an empty list, meaning no abortion occurs
    Mockito.verify(context.repairManager, Mockito.times(1)).abortSegments(Mockito.argThat(new EmptyList()),
            any(), Mockito.anyBoolean(), Mockito.anyBoolean());
}

From source file:io.cassandrareaper.service.RepairManagerTest.java

License:Apache License

/**
 * Verifies that when a RUNNING segment exists it will not get aborted when using a non
 * IDistributedStorage backend if a repair runner exists
 *
 * @throws ReaperException if some goes wrong :)
 *//*from   www .ja  v a 2 s .co  m*/
@Test
public void doNotAbortRunningSegmentWithRepairRunnerAndNoDistributedStorage() throws ReaperException {
    final String clusterName = "reaper";
    final String ksName = "reaper";
    final Set<String> cfNames = Sets.newHashSet("reaper");
    final boolean incrementalRepair = false;
    final Set<String> nodes = Sets.newHashSet("127.0.0.1");
    final Set<String> datacenters = Collections.emptySet();
    final double intensity = 0.5f;
    final int repairThreadCount = 1;

    final IStorage storage = mock(IStorage.class);

    storage.addCluster(new Cluster(clusterName, null, Collections.<String>singleton("127.0.0.1")));

    AppContext context = new AppContext();
    context.config = new ReaperApplicationConfiguration();
    context.storage = storage;
    RepairManager repairManager = RepairManager.create(context);
    repairManager = Mockito.spy(repairManager);
    context.repairManager = repairManager;
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final RepairUnit cf = new RepairUnit.Builder(clusterName, ksName, cfNames, incrementalRepair, nodes,
            datacenters, Collections.emptySet(), repairThreadCount).build(UUIDs.timeBased());

    final RepairRun run = new RepairRun.Builder(clusterName, cf.getId(), DateTime.now(), intensity, 1,
            RepairParallelism.PARALLEL).build(UUIDs.timeBased());

    final RepairSegment segment = RepairSegment
            .builder(Segment.builder().withTokenRange(new RingRange("-1", "1")).build(), cf.getId())
            .withRunId(run.getId()).withId(UUIDs.timeBased()).build();

    context.repairManager.repairRunners.put(run.getId(), mock(RepairRunner.class));

    Mockito.doNothing().when(context.repairManager).abortSegments(any(), any(), Mockito.anyBoolean(),
            Mockito.anyBoolean());
    Mockito.doReturn(run).when(context.repairManager).startRepairRun(run);
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.RUNNING)).thenReturn(Arrays.asList(run));
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.PAUSED)).thenReturn(Collections.emptyList());
    when(context.storage.getSegmentsWithState(any(), any())).thenReturn(Arrays.asList(segment));

    context.repairManager.resumeRunningRepairRuns();

    // Check that abortSegments was not invoked at all, meaning no abortion occurs
    Mockito.verify(context.repairManager, Mockito.times(0)).abortSegments(any(), any(), Mockito.anyBoolean(),
            Mockito.anyBoolean());
}

From source file:io.cassandrareaper.service.RepairManagerTest.java

License:Apache License

/**
 * Verifies that when a RUNNING segment exists it will get aborted when using a non
 * IDistributedStorage backend if no repair runner exists (first boot or Reaper)
 *
 * @throws ReaperException if some goes wrong :)
 *//*from   ww  w .  java  2  s .c o  m*/
@Test
public void abortRunningSegmentWithNoRepairRunnerAndNoDistributedStorage() throws ReaperException {
    final String clusterName = "reaper";
    final String ksName = "reaper";
    final Set<String> cfNames = Sets.newHashSet("reaper");
    final boolean incrementalRepair = false;
    final Set<String> nodes = Sets.newHashSet("127.0.0.1");
    final Set<String> datacenters = Collections.emptySet();
    final double intensity = 0.5f;
    final int repairThreadCount = 1;

    final IStorage storage = mock(IStorage.class);

    storage.addCluster(new Cluster(clusterName, null, Collections.<String>singleton("127.0.0.1")));

    AppContext context = new AppContext();
    context.storage = storage;
    context.config = new ReaperApplicationConfiguration();
    RepairManager repairManager = RepairManager.create(context);
    repairManager = Mockito.spy(repairManager);
    context.repairManager = repairManager;
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final RepairUnit cf = new RepairUnit.Builder(clusterName, ksName, cfNames, incrementalRepair, nodes,
            datacenters, Collections.emptySet(), repairThreadCount).build(UUIDs.timeBased());

    final RepairRun run = new RepairRun.Builder(clusterName, cf.getId(), DateTime.now(), intensity, 1,
            RepairParallelism.PARALLEL).build(UUIDs.timeBased());

    final RepairSegment segment = RepairSegment
            .builder(Segment.builder().withTokenRange(new RingRange("-1", "1")).build(), cf.getId())
            .withRunId(run.getId()).withId(UUIDs.timeBased()).build();

    Mockito.doNothing().when(context.repairManager).abortSegments(any(), any(), Mockito.anyBoolean(),
            Mockito.anyBoolean());
    Mockito.doReturn(run).when(context.repairManager).startRepairRun(run);
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.RUNNING)).thenReturn(Arrays.asList(run));
    when(context.storage.getRepairRunsWithState(RepairRun.RunState.PAUSED)).thenReturn(Collections.emptyList());
    when(context.storage.getSegmentsWithState(any(), any())).thenReturn(Arrays.asList(segment));

    context.repairManager.resumeRunningRepairRuns();

    // Check that abortSegments was invoked with an non empty list, meaning abortion occurs
    Mockito.verify(context.repairManager, Mockito.times(1)).abortSegments(Mockito.argThat(new NotEmptyList()),
            any(), Mockito.anyBoolean(), Mockito.anyBoolean());
}

From source file:io.cassandrareaper.service.RepairManagerTest.java

License:Apache License

@Test
public void updateRepairRunIntensityTest() throws ReaperException {
    final String clusterName = "reaper";

    AppContext context = new AppContext();
    context.config = new ReaperApplicationConfiguration();
    context.storage = mock(IStorage.class);
    context.storage.addCluster(new Cluster(clusterName, null, Collections.<String>singleton("127.0.0.1")));
    context.repairManager = RepairManager.create(context);
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final String ksName = "reaper";
    final Set<String> cfNames = Sets.newHashSet("reaper");
    final boolean incrementalRepair = false;
    final Set<String> nodes = Sets.newHashSet("127.0.0.1");
    final Set<String> datacenters = Collections.emptySet();
    final int repairThreadCount = 1;

    final RepairUnit cf = new RepairUnit.Builder(clusterName, ksName, cfNames, incrementalRepair, nodes,
            datacenters, Collections.emptySet(), repairThreadCount).build(UUIDs.timeBased());

    double intensity = 0.5f;

    final RepairRun run = new RepairRun.Builder(clusterName, cf.getId(), DateTime.now(), intensity, 1,
            RepairParallelism.PARALLEL).build(UUIDs.timeBased());

    when(context.storage.updateRepairRun(any())).thenReturn(true);

    intensity = 0.1;/*from w ww  . ja va2 s.  co  m*/
    RepairRun updated = context.repairManager.updateRepairRunIntensity(run, intensity);

    Assertions.assertThat(updated.getId()).isEqualTo(run.getId());
    Assertions.assertThat(updated.getIntensity()).isEqualTo(intensity);
    Mockito.verify(context.storage, Mockito.times(1)).updateRepairRun(any());
}

From source file:io.cassandrareaper.service.RepairRunnerTest.java

License:Apache License

@Test
public void testHangingRepair() throws InterruptedException, ReaperException {
    final String CLUSTER_NAME = "reaper";
    final String KS_NAME = "reaper";
    final Set<String> CF_NAMES = Sets.newHashSet("reaper");
    final boolean INCREMENTAL_REPAIR = false;
    final Set<String> NODES = Sets.newHashSet("127.0.0.1");
    final Set<String> DATACENTERS = Collections.emptySet();
    final Set<String> BLACKLISTED_TABLES = Collections.emptySet();
    final long TIME_RUN = 41L;
    final double INTENSITY = 0.5f;
    final int REPAIR_THREAD_COUNT = 1;

    final IStorage storage = new MemoryStorage();
    storage.addCluster(new Cluster(CLUSTER_NAME, null, Collections.<String>singleton("127.0.0.1")));
    RepairUnit cf = storage.addRepairUnit(new RepairUnit.Builder(CLUSTER_NAME, KS_NAME, CF_NAMES,
            INCREMENTAL_REPAIR, NODES, DATACENTERS, BLACKLISTED_TABLES, REPAIR_THREAD_COUNT));
    DateTimeUtils.setCurrentMillisFixed(TIME_RUN);
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder(CLUSTER_NAME, cf.getId(), DateTime.now(), INTENSITY, 1,
                    RepairParallelism.PARALLEL),
            Collections.singleton(RepairSegment.builder(
                    Segment.builder().withTokenRange(new RingRange(BigInteger.ZERO, BigInteger.ONE)).build(),
                    cf.getId())));/*from ww w  .  j  a v a  2s .  c  o  m*/
    final UUID RUN_ID = run.getId();
    final UUID SEGMENT_ID = storage.getNextFreeSegmentInRange(run.getId(), Optional.absent()).get().getId();

    assertEquals(storage.getRepairSegment(RUN_ID, SEGMENT_ID).get().getState(),
            RepairSegment.State.NOT_STARTED);
    AppContext context = new AppContext();
    context.storage = storage;
    context.config = new ReaperApplicationConfiguration();
    context.repairManager = RepairManager.create(context);
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final Semaphore mutex = new Semaphore(0);

    context.jmxConnectionFactory = new JmxConnectionFactory() {
        final AtomicInteger repairAttempts = new AtomicInteger(1);

        @Override
        protected JmxProxy connect(final Optional<RepairStatusHandler> handler, Node host,
                int connectionTimeout) throws ReaperException {

            final JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn(CLUSTER_NAME);
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(Segment.class))).thenReturn(Lists.newArrayList(""));
            when(jmx.getRangeToEndpointMap(anyString())).thenReturn(RepairRunnerTest.sixNodeCluster());
            when(jmx.getDataCenter()).thenReturn("dc1");
            when(jmx.getDataCenter(anyString())).thenReturn("dc1");

            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), any(),
                    any(RepairParallelism.class), any(), anyBoolean(), any(), any(), any(), any(Integer.class)))
                            .then((invocation) -> {
                                assertEquals(RepairSegment.State.NOT_STARTED,
                                        storage.getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());
                                final int repairNumber = repairAttempts.getAndIncrement();
                                switch (repairNumber) {
                                case 1:
                                    new Thread() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(repairNumber,
                                                    Optional.of(ActiveRepairService.Status.STARTED),
                                                    Optional.absent(), null, jmx);

                                            assertEquals(RepairSegment.State.RUNNING, storage
                                                    .getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());
                                        }
                                    }.start();
                                    break;
                                case 2:
                                    new Thread() {
                                        @Override
                                        public void run() {

                                            handler.get().handle(repairNumber,
                                                    Optional.of(ActiveRepairService.Status.STARTED),
                                                    Optional.absent(), null, jmx);

                                            assertEquals(RepairSegment.State.RUNNING, storage
                                                    .getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());

                                            handler.get().handle(repairNumber,
                                                    Optional.of(ActiveRepairService.Status.SESSION_SUCCESS),
                                                    Optional.absent(), null, jmx);

                                            assertEquals(RepairSegment.State.DONE, storage
                                                    .getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());

                                            handler.get().handle(repairNumber,
                                                    Optional.of(ActiveRepairService.Status.FINISHED),
                                                    Optional.absent(), null, jmx);

                                            mutex.release();
                                            LOG.info("MUTEX RELEASED");
                                        }
                                    }.start();
                                    break;
                                default:
                                    fail("triggerRepair should only have been called twice");
                                }
                                LOG.info("repair number : " + repairNumber);
                                return repairNumber;
                            });
            return jmx;
        }
    };
    context.repairManager.startRepairRun(run);

    await().with().atMost(20, TimeUnit.SECONDS).until(() -> {
        try {
            mutex.acquire();
            LOG.info("MUTEX ACQUIRED");
            // TODO: refactor so that we can properly wait for the repair runner to finish rather than
            // TODO: using this sleep().
            Thread.sleep(1000);
            return true;
        } catch (InterruptedException ex) {
            throw new IllegalStateException(ex);
        }
    });
    assertEquals(RepairRun.RunState.DONE, storage.getRepairRun(RUN_ID).get().getRunState());
}

From source file:io.cassandrareaper.service.RepairRunnerTest.java

License:Apache License

@Test
public void testHangingRepairNewAPI() throws InterruptedException, ReaperException {
    final String CLUSTER_NAME = "reaper";
    final String KS_NAME = "reaper";
    final Set<String> CF_NAMES = Sets.newHashSet("reaper");
    final boolean INCREMENTAL_REPAIR = false;
    final Set<String> NODES = Sets.newHashSet("127.0.0.1");
    final Set<String> DATACENTERS = Collections.emptySet();
    final Set<String> BLACKLISTED_TABLES = Collections.emptySet();
    final long TIME_RUN = 41L;
    final double INTENSITY = 0.5f;
    final int REPAIR_THREAD_COUNT = 1;

    final IStorage storage = new MemoryStorage();

    storage.addCluster(new Cluster(CLUSTER_NAME, null, Collections.<String>singleton("127.0.0.1")));
    RepairUnit cf = storage.addRepairUnit(new RepairUnit.Builder(CLUSTER_NAME, KS_NAME, CF_NAMES,
            INCREMENTAL_REPAIR, NODES, DATACENTERS, BLACKLISTED_TABLES, REPAIR_THREAD_COUNT));
    DateTimeUtils.setCurrentMillisFixed(TIME_RUN);
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder(CLUSTER_NAME, cf.getId(), DateTime.now(), INTENSITY, 1,
                    RepairParallelism.PARALLEL),
            Collections.singleton(RepairSegment.builder(
                    Segment.builder().withTokenRange(new RingRange(BigInteger.ZERO, BigInteger.ONE)).build(),
                    cf.getId())));/*from  w ww . j  a v  a 2 s .co  m*/
    final UUID RUN_ID = run.getId();
    final UUID SEGMENT_ID = storage.getNextFreeSegmentInRange(run.getId(), Optional.absent()).get().getId();

    assertEquals(storage.getRepairSegment(RUN_ID, SEGMENT_ID).get().getState(),
            RepairSegment.State.NOT_STARTED);
    AppContext context = new AppContext();
    context.storage = storage;
    context.config = new ReaperApplicationConfiguration();
    context.repairManager = RepairManager.create(context);
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final Semaphore mutex = new Semaphore(0);

    context.jmxConnectionFactory = new JmxConnectionFactory() {
        final AtomicInteger repairAttempts = new AtomicInteger(1);

        @Override
        protected JmxProxy connect(final Optional<RepairStatusHandler> handler, Node host,
                int connectionTimeout) throws ReaperException {

            final JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn(CLUSTER_NAME);
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(Segment.class))).thenReturn(Lists.newArrayList(""));
            when(jmx.getRangeToEndpointMap(anyString())).thenReturn(RepairRunnerTest.sixNodeCluster());
            when(jmx.getDataCenter()).thenReturn("dc1");
            when(jmx.getDataCenter(anyString())).thenReturn("dc1");
            //doNothing().when(jmx).cancelAllRepairs();

            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), any(),
                    any(RepairParallelism.class), any(), anyBoolean(), any(), any(), any(), any(Integer.class)))
                            .then((invocation) -> {
                                assertEquals(RepairSegment.State.NOT_STARTED,
                                        storage.getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());

                                final int repairNumber = repairAttempts.getAndIncrement();
                                switch (repairNumber) {
                                case 1:
                                    new Thread() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(repairNumber, Optional.absent(),
                                                    Optional.of(ProgressEventType.START), null, jmx);
                                            assertEquals(RepairSegment.State.RUNNING, storage
                                                    .getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());
                                        }
                                    }.start();
                                    break;
                                case 2:
                                    new Thread() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(repairNumber, Optional.absent(),
                                                    Optional.of(ProgressEventType.START), null, jmx);
                                            assertEquals(RepairSegment.State.RUNNING, storage
                                                    .getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());
                                            handler.get().handle(repairNumber, Optional.absent(),
                                                    Optional.of(ProgressEventType.SUCCESS), null, jmx);
                                            assertEquals(RepairSegment.State.DONE, storage
                                                    .getRepairSegment(RUN_ID, SEGMENT_ID).get().getState());
                                            handler.get().handle(repairNumber, Optional.absent(),
                                                    Optional.of(ProgressEventType.COMPLETE), null, jmx);
                                            mutex.release();
                                            LOG.info("MUTEX RELEASED");
                                        }
                                    }.start();
                                    break;
                                default:
                                    fail("triggerRepair should only have been called twice");
                                }
                                return repairNumber;
                            });
            return jmx;
        }
    };
    context.repairManager.startRepairRun(run);

    await().with().atMost(20, TimeUnit.SECONDS).until(() -> {
        try {
            mutex.acquire();
            LOG.info("MUTEX ACQUIRED");
            // TODO: refactor so that we can properly wait for the repair runner to finish rather than
            // TODO: using this sleep().
            Thread.sleep(1000);
            return true;
        } catch (InterruptedException ex) {
            throw new IllegalStateException(ex);
        }
    });
    assertEquals(RepairRun.RunState.DONE, storage.getRepairRun(RUN_ID).get().getRunState());
}