Example usage for org.apache.cassandra.repair RepairParallelism PARALLEL

List of usage examples for org.apache.cassandra.repair RepairParallelism PARALLEL

Introduction

In this page you can find the example usage for org.apache.cassandra.repair RepairParallelism PARALLEL.

Prototype

RepairParallelism PARALLEL

To view the source code for org.apache.cassandra.repair RepairParallelism PARALLEL.

Click Source Link

Document

All nodes at the same time

Usage

From source file:com.spotify.reaper.unit.service.RepairRunnerTest.java

License:Apache License

@Test
public void testHangingRepair() throws InterruptedException {
    final String CLUSTER_NAME = "reaper";
    final String KS_NAME = "reaper";
    final Set<String> CF_NAMES = Sets.newHashSet("reaper");
    final long TIME_RUN = 41l;
    final double INTENSITY = 0.5f;

    final IStorage storage = new MemoryStorage();

    storage.addCluster(new Cluster(CLUSTER_NAME, null, Collections.<String>singleton(null)));
    RepairUnit cf = storage.addRepairUnit(new RepairUnit.Builder(CLUSTER_NAME, KS_NAME, CF_NAMES));
    DateTimeUtils.setCurrentMillisFixed(TIME_RUN);
    RepairRun run = storage.addRepairRun(new RepairRun.Builder(CLUSTER_NAME, cf.getId(), DateTime.now(),
            INTENSITY, 1, RepairParallelism.PARALLEL));
    storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ZERO, BigInteger.ONE), cf.getId())),
            run.getId());//  w ww. jav a2  s . com
    final long RUN_ID = run.getId();
    final long SEGMENT_ID = storage.getNextFreeSegment(run.getId()).get().getId();

    assertEquals(storage.getRepairSegment(SEGMENT_ID).get().getState(), RepairSegment.State.NOT_STARTED);
    AppContext context = new AppContext();
    context.storage = storage;
    context.repairManager = new RepairManager();
    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    final Semaphore mutex = new Semaphore(0);

    context.jmxConnectionFactory = new JmxConnectionFactory() {
        final AtomicInteger repairAttempts = new AtomicInteger(1);

        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host)
                throws ReaperException {
            final JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn(CLUSTER_NAME);
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.getRangeToEndpointMap(anyString())).thenReturn(RepairRunnerTest.sixNodeCluster());
            //doNothing().when(jmx).cancelAllRepairs();
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) throws Throwable {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            storage.getRepairSegment(SEGMENT_ID).get().getState());

                                    final int repairNumber = repairAttempts.getAndIncrement();
                                    switch (repairNumber) {
                                    case 1:
                                        new Thread() {
                                            @Override
                                            public void run() {
                                                handler.get().handle(repairNumber,
                                                        ActiveRepairService.Status.STARTED, null);
                                                assertEquals(RepairSegment.State.RUNNING,
                                                        storage.getRepairSegment(SEGMENT_ID).get().getState());
                                            }
                                        }.start();
                                        break;
                                    case 2:
                                        new Thread() {
                                            @Override
                                            public void run() {
                                                handler.get().handle(repairNumber,
                                                        ActiveRepairService.Status.STARTED, null);
                                                assertEquals(RepairSegment.State.RUNNING,
                                                        storage.getRepairSegment(SEGMENT_ID).get().getState());
                                                handler.get().handle(repairNumber,
                                                        ActiveRepairService.Status.SESSION_SUCCESS, null);
                                                assertEquals(RepairSegment.State.DONE,
                                                        storage.getRepairSegment(SEGMENT_ID).get().getState());
                                                handler.get().handle(repairNumber,
                                                        ActiveRepairService.Status.FINISHED, null);
                                                mutex.release();
                                                System.out.println("MUTEX RELEASED");
                                            }
                                        }.start();
                                        break;
                                    default:
                                        fail("triggerRepair should only have been called twice");
                                    }
                                    return repairNumber;
                                }
                            });
            return jmx;
        }
    };
    context.repairManager.startRepairRun(context, run);

    // TODO: refactor so that we can properly wait for the repair runner to finish rather than
    // TODO: using this sleep().
    mutex.acquire();
    System.out.println("MUTEX ACQUIRED");
    Thread.sleep(100);
    assertEquals(RepairRun.RunState.DONE, storage.getRepairRun(RUN_ID).get().getRunState());
}

From source file:com.spotify.reaper.unit.service.RepairRunnerTest.java

License:Apache License

@Test
public void testResumeRepair() throws InterruptedException {
    final String CLUSTER_NAME = "reaper";
    final String KS_NAME = "reaper";
    final Set<String> CF_NAMES = Sets.newHashSet("reaper");
    final long TIME_RUN = 41l;
    final double INTENSITY = 0.5f;

    final IStorage storage = new MemoryStorage();
    AppContext context = new AppContext();
    context.storage = storage;//from w  ww. j  ava 2 s  . com
    context.repairManager = new RepairManager();

    storage.addCluster(new Cluster(CLUSTER_NAME, null, Collections.<String>singleton(null)));
    long cf = storage.addRepairUnit(new RepairUnit.Builder(CLUSTER_NAME, KS_NAME, CF_NAMES)).getId();
    DateTimeUtils.setCurrentMillisFixed(TIME_RUN);
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder(CLUSTER_NAME, cf, DateTime.now(), INTENSITY, 1, RepairParallelism.PARALLEL));
    storage.addRepairSegments(
            Lists.newArrayList(
                    new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ZERO, BigInteger.ONE), cf)
                            .state(RepairSegment.State.RUNNING).startTime(DateTime.now())
                            .coordinatorHost("reaper").repairCommandId(1337),
                    new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf)),
            run.getId());
    final long RUN_ID = run.getId();
    final long SEGMENT_ID = storage.getNextFreeSegment(run.getId()).get().getId();

    context.repairManager.initializeThreadPool(1, 500, TimeUnit.MILLISECONDS, 1, TimeUnit.MILLISECONDS);

    assertEquals(storage.getRepairSegment(SEGMENT_ID).get().getState(), RepairSegment.State.NOT_STARTED);
    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host)
                throws ReaperException {
            final JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn(CLUSTER_NAME);
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.getRangeToEndpointMap(anyString())).thenReturn(RepairRunnerTest.sixNodeCluster());
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) throws Throwable {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            storage.getRepairSegment(SEGMENT_ID).get().getState());
                                    new Thread() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED, null);
                                            handler.get().handle(1, ActiveRepairService.Status.SESSION_SUCCESS,
                                                    null);
                                            handler.get().handle(1, ActiveRepairService.Status.FINISHED, null);
                                        }
                                    }.start();
                                    return 1;
                                }
                            });
            return jmx;
        }
    };

    assertEquals(RepairRun.RunState.NOT_STARTED, storage.getRepairRun(RUN_ID).get().getRunState());
    context.repairManager.resumeRunningRepairRuns(context);
    assertEquals(RepairRun.RunState.NOT_STARTED, storage.getRepairRun(RUN_ID).get().getRunState());
    storage.updateRepairRun(run.with().runState(RepairRun.RunState.RUNNING).build(RUN_ID));
    context.repairManager.resumeRunningRepairRuns(context);
    Thread.sleep(200);
    assertEquals(RepairRun.RunState.DONE, storage.getRepairRun(RUN_ID).get().getRunState());
}

From source file:com.spotify.reaper.unit.service.SegmentRunnerTest.java

License:Apache License

@Test
public void timeoutTest() throws InterruptedException, ReaperException, ExecutionException {
    final AppContext context = new AppContext();
    context.storage = new MemoryStorage();
    RepairUnit cf = context.storage/*ww w .  j  ava 2 s .c o  m*/
            .addRepairUnit(new RepairUnit.Builder("reaper", "reaper", Sets.newHashSet("reaper")));
    RepairRun run = context.storage.addRepairRun(
            new RepairRun.Builder("reaper", cf.getId(), DateTime.now(), 0.5, 1, RepairParallelism.PARALLEL));
    context.storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf.getId())),
            run.getId());
    final long segmentId = context.storage.getNextFreeSegment(run.getId()).get().getId();

    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final MutableObject<Future<?>> future = new MutableObject<>();

    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host) {
            JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn("reaper");
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            context.storage.getRepairSegment(segmentId).get().getState());
                                    future.setValue(executor.submit(new Thread() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED,
                                                    "Repair command 1 has started");
                                            assertEquals(RepairSegment.State.RUNNING, context.storage
                                                    .getRepairSegment(segmentId).get().getState());
                                        }
                                    }));
                                    return 1;
                                }
                            });

            return jmx;
        }
    };
    RepairRunner rr = mock(RepairRunner.class);
    RepairUnit ru = mock(RepairUnit.class);
    SegmentRunner sr = new SegmentRunner(context, segmentId, Collections.singleton(""), 100, 0.5,
            RepairParallelism.PARALLEL, "reaper", ru, rr);
    sr.run();

    future.getValue().get();
    executor.shutdown();

    assertEquals(RepairSegment.State.NOT_STARTED, context.storage.getRepairSegment(segmentId).get().getState());
    assertEquals(1, context.storage.getRepairSegment(segmentId).get().getFailCount());
}

From source file:com.spotify.reaper.unit.service.SegmentRunnerTest.java

License:Apache License

@Test
public void successTest() throws InterruptedException, ReaperException, ExecutionException {
    final IStorage storage = new MemoryStorage();
    RepairUnit cf = storage/*from  ww w. j  a  v a 2 s  . co  m*/
            .addRepairUnit(new RepairUnit.Builder("reaper", "reaper", Sets.newHashSet("reaper")));
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder("reaper", cf.getId(), DateTime.now(), 0.5, 1, RepairParallelism.PARALLEL));
    storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf.getId())),
            run.getId());
    final long segmentId = storage.getNextFreeSegment(run.getId()).get().getId();

    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final MutableObject<Future<?>> future = new MutableObject<>();

    AppContext context = new AppContext();
    context.storage = storage;
    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host) {
            JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn("reaper");
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            storage.getRepairSegment(segmentId).get().getState());
                                    future.setValue(executor.submit(new Runnable() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED,
                                                    "Repair command 1 has started");
                                            assertEquals(RepairSegment.State.RUNNING,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            // report about an unrelated repair. Shouldn't affect anything.
                                            handler.get().handle(2, ActiveRepairService.Status.SESSION_FAILED,
                                                    "Repair command 2 has failed");
                                            handler.get().handle(1, ActiveRepairService.Status.SESSION_SUCCESS,
                                                    "Repair session succeeded in command 1");
                                            assertEquals(RepairSegment.State.DONE,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            handler.get().handle(1, ActiveRepairService.Status.FINISHED,
                                                    "Repair command 1 has finished");
                                            assertEquals(RepairSegment.State.DONE,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                        }
                                    }));
                                    return 1;
                                }
                            });

            return jmx;
        }
    };
    RepairRunner rr = mock(RepairRunner.class);
    RepairUnit ru = mock(RepairUnit.class);
    SegmentRunner sr = new SegmentRunner(context, segmentId, Collections.singleton(""), 1000, 0.5,
            RepairParallelism.PARALLEL, "reaper", ru, rr);
    sr.run();

    future.getValue().get();
    executor.shutdown();

    assertEquals(RepairSegment.State.DONE, storage.getRepairSegment(segmentId).get().getState());
    assertEquals(0, storage.getRepairSegment(segmentId).get().getFailCount());
}

From source file:com.spotify.reaper.unit.service.SegmentRunnerTest.java

License:Apache License

@Test
public void failureTest() throws InterruptedException, ReaperException, ExecutionException {
    final IStorage storage = new MemoryStorage();
    RepairUnit cf = storage/* w  ww  . j  a  v  a2  s  .  c  o m*/
            .addRepairUnit(new RepairUnit.Builder("reaper", "reaper", Sets.newHashSet("reaper")));
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder("reaper", cf.getId(), DateTime.now(), 0.5, 1, RepairParallelism.PARALLEL));
    storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf.getId())),
            run.getId());
    final long segmentId = storage.getNextFreeSegment(run.getId()).get().getId();

    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final MutableObject<Future<?>> future = new MutableObject<>();

    AppContext context = new AppContext();
    context.storage = storage;
    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host) {
            JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn("reaper");
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            storage.getRepairSegment(segmentId).get().getState());
                                    future.setValue(executor.submit(new Runnable() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED,
                                                    "Repair command 1 has started");
                                            assertEquals(RepairSegment.State.RUNNING,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            handler.get().handle(1, ActiveRepairService.Status.SESSION_FAILED,
                                                    "Repair command 1 has failed");
                                            assertEquals(RepairSegment.State.NOT_STARTED,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            handler.get().handle(1, ActiveRepairService.Status.FINISHED,
                                                    "Repair command 1 has finished");
                                            assertEquals(RepairSegment.State.NOT_STARTED,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                        }
                                    }));

                                    return 1;
                                }
                            });

            return jmx;
        }
    };
    RepairRunner rr = mock(RepairRunner.class);
    RepairUnit ru = mock(RepairUnit.class);
    SegmentRunner sr = new SegmentRunner(context, segmentId, Collections.singleton(""), 1000, 0.5,
            RepairParallelism.PARALLEL, "reaper", ru, rr);
    sr.run();

    future.getValue().get();
    executor.shutdown();

    assertEquals(RepairSegment.State.NOT_STARTED, storage.getRepairSegment(segmentId).get().getState());
    assertEquals(1, storage.getRepairSegment(segmentId).get().getFailCount());
}

From source file:io.cassandrareaper.jmx.JmxProxyImpl.java

License:Apache License

private int triggerRepair2dot1(boolean fullRepair, RepairParallelism repairParallelism, String keyspace,
        Collection<String> columnFamilies, BigInteger beginToken, BigInteger endToken, String cassandraVersion,
        Collection<String> datacenters, RepairStatusHandler repairStatusHandler) {

    if (fullRepair) {
        // full repair
        if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE)) {
            int commandId = ((StorageServiceMBean) ssProxy).forceRepairRangeAsync(beginToken.toString(),
                    endToken.toString(), keyspace, repairParallelism.ordinal(), datacenters,
                    cassandraVersion.startsWith("2.2") ? new HashSet<String>() : null, fullRepair,
                    columnFamilies.toArray(new String[columnFamilies.size()]));

            repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
            return commandId;
        }/*from   w w  w . j  a  va2s.  c  om*/

        boolean snapshotRepair = repairParallelism.equals(RepairParallelism.SEQUENTIAL);

        int commandId = ((StorageServiceMBean) ssProxy).forceRepairRangeAsync(beginToken.toString(),
                endToken.toString(), keyspace,
                snapshotRepair ? RepairParallelism.SEQUENTIAL.ordinal() : RepairParallelism.PARALLEL.ordinal(),
                datacenters, cassandraVersion.startsWith("2.2") ? new HashSet<String>() : null, fullRepair,
                columnFamilies.toArray(new String[columnFamilies.size()]));

        repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
        return commandId;
    }

    // incremental repair
    int commandId = ((StorageServiceMBean) ssProxy).forceRepairAsync(keyspace, Boolean.FALSE, Boolean.FALSE,
            Boolean.FALSE, fullRepair, columnFamilies.toArray(new String[columnFamilies.size()]));

    repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
    return commandId;
}

From source file:io.cassandrareaper.resources.RepairRunResource.java

License:Apache License

/**
 * Endpoint used to create a repair run. Does not allow triggering the run. triggerRepairRun()
 * must be called to initiate the repair. Creating a repair run includes generating the repair
 * segments.//from w w w  .  j av a 2 s.co  m
 *
 * <p>Notice that query parameter "tables" can be a single String, or a comma-separated list of
 * table names. If the "tables" parameter is omitted, and only the keyspace is defined, then
 * created repair run will target all the tables in the keyspace.
 *
 * @return repair run ID in case of everything going well, and a status code 500 in case of
 *     errors.
 */
@POST
public Response addRepairRun(@Context UriInfo uriInfo, @QueryParam("clusterName") Optional<String> clusterName,
        @QueryParam("keyspace") Optional<String> keyspace,
        @QueryParam("tables") Optional<String> tableNamesParam, @QueryParam("owner") Optional<String> owner,
        @QueryParam("cause") Optional<String> cause,
        @QueryParam("segmentCount") Optional<Integer> segmentCountPerNode,
        @QueryParam("repairParallelism") Optional<String> repairParallelism,
        @QueryParam("intensity") Optional<String> intensityStr,
        @QueryParam("incrementalRepair") Optional<String> incrementalRepairStr,
        @QueryParam("nodes") Optional<String> nodesToRepairParam,
        @QueryParam("datacenters") Optional<String> datacentersToRepairParam,
        @QueryParam("blacklistedTables") Optional<String> blacklistedTableNamesParam,
        @QueryParam("repairThreadCount") Optional<Integer> repairThreadCountParam) {

    try {
        final Response possibleFailedResponse = RepairRunResource.checkRequestForAddRepair(context, clusterName,
                keyspace, owner, segmentCountPerNode, repairParallelism, intensityStr, incrementalRepairStr,
                nodesToRepairParam, datacentersToRepairParam, repairThreadCountParam);
        if (null != possibleFailedResponse) {
            return possibleFailedResponse;
        }

        Double intensity;
        if (intensityStr.isPresent()) {
            intensity = Double.parseDouble(intensityStr.get());
        } else {
            intensity = context.config.getRepairIntensity();
            LOG.debug("no intensity given, so using default value: {}", intensity);
        }

        boolean incrementalRepair;
        if (incrementalRepairStr.isPresent()) {
            incrementalRepair = Boolean.parseBoolean(incrementalRepairStr.get());
        } else {
            incrementalRepair = context.config.getIncrementalRepair();
            LOG.debug("no incremental repair given, so using default value: {}", incrementalRepair);
        }

        int segments = context.config.getSegmentCountPerNode();
        if (!incrementalRepair) {
            if (segmentCountPerNode.isPresent()) {
                LOG.debug("using given segment count {} instead of configured value {}",
                        segmentCountPerNode.get(), context.config.getSegmentCount());
                segments = segmentCountPerNode.get();
            }
        } else {
            // hijack the segment count in case of incremental repair
            segments = -1;
        }

        final Cluster cluster = context.storage.getCluster(Cluster.toSymbolicName(clusterName.get())).get();
        Set<String> tableNames;
        try {
            tableNames = repairRunService.getTableNamesBasedOnParam(cluster, keyspace.get(), tableNamesParam);
        } catch (IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        Set<String> blacklistedTableNames;
        try {
            blacklistedTableNames = repairRunService.getTableNamesBasedOnParam(cluster, keyspace.get(),
                    blacklistedTableNamesParam);
        } catch (IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        final Set<String> nodesToRepair;
        try {
            nodesToRepair = repairRunService.getNodesToRepairBasedOnParam(cluster, nodesToRepairParam);
        } catch (IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        final Set<String> datacentersToRepair;
        try {
            datacentersToRepair = RepairRunService.getDatacentersToRepairBasedOnParam(cluster,
                    datacentersToRepairParam);

        } catch (IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        RepairUnit.Builder builder = new RepairUnit.Builder(cluster.getName(), keyspace.get(), tableNames,
                incrementalRepair, nodesToRepair, datacentersToRepair, blacklistedTableNames,
                repairThreadCountParam.or(context.config.getRepairThreadCount()));

        RepairUnit theRepairUnit = repairUnitService.getNewOrExistingRepairUnit(cluster, builder);

        if (theRepairUnit.getIncrementalRepair() != incrementalRepair) {
            String msg = String.format("A repair unit %s already exist for the same cluster/keyspace/tables"
                    + " but with a different incremental repair value. Requested value %s | Existing value: %s",
                    theRepairUnit.getId(), incrementalRepair, theRepairUnit.getIncrementalRepair());

            return Response.status(Response.Status.BAD_REQUEST).entity(msg).build();
        }

        RepairParallelism parallelism = context.config.getRepairParallelism();
        if (repairParallelism.isPresent()) {
            LOG.debug("using given repair parallelism {} instead of configured value {}",
                    repairParallelism.get(), context.config.getRepairParallelism());

            parallelism = RepairParallelism.valueOf(repairParallelism.get().toUpperCase());
        }

        if (incrementalRepair) {
            parallelism = RepairParallelism.PARALLEL;
        }

        final RepairRun newRepairRun = repairRunService.registerRepairRun(cluster, theRepairUnit, cause,
                owner.get(), 0, segments, parallelism, intensity);

        return Response.created(buildRepairRunUri(uriInfo, newRepairRun))
                .entity(new RepairRunStatus(newRepairRun, theRepairUnit, 0)).build();

    } catch (ReaperException e) {
        LOG.error(e.getMessage(), e);
        return Response.serverError().entity(e.getMessage()).build();
    }
}

From source file:io.cassandrareaper.resources.RepairScheduleResource.java

License:Apache License

/**
 * Endpoint used to create a repair schedule. Does not allow triggering the run. Repair schedule
 * will create new repair runs based on the schedule.
 *
 * <p>Notice that query parameter "tables" can be a single String, or a comma-separated list of
 * table names. If the "tables" parameter is omitted, and only the keyspace is defined, then
 * created repair runs will target all the tables in the keyspace.
 *
 * @return created repair schedule data as JSON.
 *///from ww w  .  j av a  2  s  . c  o m
@POST
public Response addRepairSchedule(@Context UriInfo uriInfo,
        @QueryParam("clusterName") Optional<String> clusterName,
        @QueryParam("keyspace") Optional<String> keyspace,
        @QueryParam("tables") Optional<String> tableNamesParam, @QueryParam("owner") Optional<String> owner,
        @QueryParam("segmentCountPerNode") Optional<Integer> segmentCountPerNode,
        @QueryParam("repairParallelism") Optional<String> repairParallelism,
        @QueryParam("intensity") Optional<String> intensityStr,
        @QueryParam("incrementalRepair") Optional<String> incrementalRepairStr,
        @QueryParam("scheduleDaysBetween") Optional<Integer> scheduleDaysBetween,
        @QueryParam("scheduleTriggerTime") Optional<String> scheduleTriggerTime,
        @QueryParam("nodes") Optional<String> nodesToRepairParam,
        @QueryParam("datacenters") Optional<String> datacentersToRepairParam,
        @QueryParam("blacklistedTables") Optional<String> blacklistedTableNamesParam,
        @QueryParam("repairThreadCount") Optional<Integer> repairThreadCountParam) {

    try {
        Response possibleFailResponse = RepairRunResource.checkRequestForAddRepair(context, clusterName,
                keyspace, owner, segmentCountPerNode, repairParallelism, intensityStr, incrementalRepairStr,
                nodesToRepairParam, datacentersToRepairParam, repairThreadCountParam);

        if (null != possibleFailResponse) {
            return possibleFailResponse;
        }

        DateTime nextActivation;
        try {
            nextActivation = getNextActivationTime(scheduleTriggerTime);
            if (nextActivation.isBeforeNow()) {
                return Response.status(Response.Status.BAD_REQUEST)
                        .entity("given schedule_trigger_time is in the past: "
                                + RepairRunStatus.dateTimeToIso8601(nextActivation))
                        .build();
            }
        } catch (IllegalArgumentException ex) {
            LOG.info("cannot parse data string: {}", scheduleTriggerTime.get(), ex);
            return Response.status(Response.Status.BAD_REQUEST).entity("invalid schedule_trigger_time").build();
        }

        if (!scheduleDaysBetween.isPresent()) {
            return Response.status(Response.Status.BAD_REQUEST)
                    .entity("missing required parameter: scheduleDaysBetween").build();
        }

        int segments = getSegmentCount(segmentCountPerNode);
        int daysBetween = getDaysBetween(scheduleDaysBetween);
        Cluster cluster = context.storage.getCluster(Cluster.toSymbolicName(clusterName.get())).get();
        Set<String> tableNames;
        try {
            tableNames = repairRunService.getTableNamesBasedOnParam(cluster, keyspace.get(), tableNamesParam);
        } catch (IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        Set<String> blacklistedTableNames;
        try {
            blacklistedTableNames = repairRunService.getTableNamesBasedOnParam(cluster, keyspace.get(),
                    blacklistedTableNamesParam);
        } catch (IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        final Set<String> nodesToRepair;
        try {
            nodesToRepair = repairRunService.getNodesToRepairBasedOnParam(cluster, nodesToRepairParam);
        } catch (final IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        final Set<String> datacentersToRepair;
        try {
            datacentersToRepair = RepairRunService.getDatacentersToRepairBasedOnParam(cluster,
                    datacentersToRepairParam);
        } catch (final IllegalArgumentException ex) {
            LOG.error(ex.getMessage(), ex);
            return Response.status(Response.Status.NOT_FOUND).entity(ex.getMessage()).build();
        }

        boolean incrementalRepair = isIncrementalRepair(incrementalRepairStr);

        RepairUnit.Builder builder = new RepairUnit.Builder(cluster.getName(), keyspace.get(), tableNames,
                incrementalRepair, nodesToRepair, datacentersToRepair, blacklistedTableNames,
                repairThreadCountParam.or(context.config.getRepairThreadCount()));

        RepairUnit unit = repairUnitService.getNewOrExistingRepairUnit(cluster, builder);
        Preconditions.checkState(unit.getIncrementalRepair() == incrementalRepair);
        RepairParallelism parallelism = context.config.getRepairParallelism();
        if (repairParallelism.isPresent()) {
            LOG.debug("using given repair parallelism {} over configured value {}", repairParallelism.get(),
                    parallelism);
            parallelism = RepairParallelism.valueOf(repairParallelism.get().toUpperCase());
        }

        if (!parallelism.equals(RepairParallelism.PARALLEL) && incrementalRepair) {
            return Response.status(Response.Status.BAD_REQUEST)
                    .entity("Can't mix sequential repair and incremental repairs").build();
        }

        Double intensity = getIntensity(intensityStr);
        Optional<RepairSchedule> conflictingRepairSchedule = repairScheduleService
                .conflictingRepairSchedule(cluster, unit);

        if (conflictingRepairSchedule.isPresent()) {
            RepairSchedule existingSchedule = conflictingRepairSchedule.get();

            if (existingSchedule.getRepairUnitId().equals(unit.getId())
                    && existingSchedule.getDaysBetween() == daysBetween
                    && existingSchedule.getOwner().equals(owner.get())
                    && existingSchedule.getRepairParallelism() == parallelism) {

                return Response.noContent().location(buildRepairScheduleUri(uriInfo, existingSchedule)).build();
            }

            String msg = String.format(
                    "A repair schedule already exists for cluster \"%s\", keyspace \"%s\", and column families: %s",
                    cluster.getName(), unit.getKeyspaceName(), unit.getColumnFamilies());

            return Response.status(Response.Status.CONFLICT)
                    .location(buildRepairScheduleUri(uriInfo, existingSchedule)).entity(msg).build();
        } else {

            RepairSchedule newRepairSchedule = repairScheduleService.storeNewRepairSchedule(cluster, unit,
                    daysBetween, nextActivation, owner.get(), segments, parallelism, intensity);

            return Response.created(buildRepairScheduleUri(uriInfo, newRepairSchedule)).build();
        }
    } catch (ReaperException e) {
        LOG.error(e.getMessage(), e);
        return Response.serverError().entity(e.getMessage()).build();
    }
}

From source file:io.cassandrareaper.resources.view.RepairRunStatusTest.java

License:Apache License

@Test
public void testRunningRepairDuration() {
    RepairRunStatus repairStatus = new RepairRunStatus(UUID.randomUUID(), // runId
            "test", // clusterName
            "test", // keyspaceName
            Collections.EMPTY_LIST, // tables
            10, // segmentsRepaired
            100, // totalSegments
            RepairRun.RunState.RUNNING, // state
            new DateTime().now().minusMinutes(1), // startTime
            null, // endTime
            "test", // cause
            "alex", // owner
            "", // lastEvent
            new DateTime(2018, 4, 11, 15, 00, 00, DateTimeZone.UTC), // creationTime
            null, // pauseTime
            0.9, // intensity
            false, // incremental
            RepairParallelism.PARALLEL, // repairParellelism
            Collections.EMPTY_LIST, // nodes
            Collections.EMPTY_LIST, // datacenters
            Collections.EMPTY_LIST, // blacklist
            1); // repair thread count

    assertEquals("1 minute 0 seconds", repairStatus.getDuration());
}

From source file:io.cassandrareaper.resources.view.RepairRunStatusTest.java

License:Apache License

@Test
public void testFinishedRepairDuration() {
    RepairRunStatus repairStatus = new RepairRunStatus(UUID.randomUUID(), // runId
            "test", // clusterName
            "test", // keyspaceName
            Collections.EMPTY_LIST, // tables
            10, // segmentsRepaired
            100, // totalSegments
            RepairRun.RunState.DONE, // state
            new DateTime().now().minusMinutes(1).minusSeconds(30), // startTime
            new DateTime().now(), // endTime
            "test", // cause
            "alex", // owner
            "", // lastEvent
            new DateTime(2018, 4, 11, 15, 00, 00, DateTimeZone.UTC), // creationTime
            null, // pauseTime
            0.9, // intensity
            false, // incremental
            RepairParallelism.PARALLEL, // repairParellelism
            Collections.EMPTY_LIST, // nodes
            Collections.EMPTY_LIST, // datacenters
            Collections.EMPTY_LIST, // blacklist
            1); // repair thread count

    assertEquals("1 minute 30 seconds", repairStatus.getDuration());
}