Example usage for org.apache.cassandra.repair RepairParallelism DATACENTER_AWARE

List of usage examples for org.apache.cassandra.repair RepairParallelism DATACENTER_AWARE

Introduction

In this page you can find the example usage for org.apache.cassandra.repair RepairParallelism DATACENTER_AWARE.

Prototype

RepairParallelism DATACENTER_AWARE

To view the source code for org.apache.cassandra.repair RepairParallelism DATACENTER_AWARE.

Click Source Link

Document

One node per data center at a time

Usage

From source file:com.spotify.reaper.cassandra.JmxProxy.java

License:Apache License

/**
 * Triggers a repair of range (beginToken, endToken] for given keyspace and column family.
 * The repair is triggered by {@link org.apache.cassandra.service.StorageServiceMBean#forceRepairRangeAsync}
 * For time being, we don't allow local nor snapshot repairs.
 *
 * @return Repair command number, or 0 if nothing to repair
 *//*  w w w  .  j  a  va  2s.c  om*/
public int triggerRepair(BigInteger beginToken, BigInteger endToken, String keyspace,
        RepairParallelism repairParallelism, Collection<String> columnFamilies) {
    checkNotNull(ssProxy, "Looks like the proxy is not connected");
    String cassandraVersion = ssProxy.getReleaseVersion();
    boolean canUseDatacenterAware = false;
    try {
        canUseDatacenterAware = versionCompare(cassandraVersion, "2.0.12") >= 0;
    } catch (ReaperException e) {
        LOG.warn("failed on version comparison, not using dc aware repairs by default");
    }
    String msg = String.format(
            "Triggering repair of range (%s,%s] for keyspace \"%s\" on "
                    + "host %s, with repair parallelism %s, in cluster with Cassandra "
                    + "version '%s' (can use DATACENTER_AWARE '%s'), " + "for column families: %s",
            beginToken.toString(), endToken.toString(), keyspace, this.host, repairParallelism,
            cassandraVersion, canUseDatacenterAware, columnFamilies);
    LOG.info(msg);
    if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE)) {
        if (canUseDatacenterAware) {
            return ssProxy.forceRepairRangeAsync(beginToken.toString(), endToken.toString(), keyspace,
                    repairParallelism.ordinal(), null, null,
                    columnFamilies.toArray(new String[columnFamilies.size()]));
        } else {
            LOG.info("Cannot use DATACENTER_AWARE repair policy for Cassandra cluster with version {},"
                    + " falling back to SEQUENTIAL repair.", cassandraVersion);
            repairParallelism = RepairParallelism.SEQUENTIAL;
        }
    }
    boolean snapshotRepair = repairParallelism.equals(RepairParallelism.SEQUENTIAL);
    return ssProxy.forceRepairRangeAsync(beginToken.toString(), endToken.toString(), keyspace, snapshotRepair,
            false, columnFamilies.toArray(new String[columnFamilies.size()]));
}

From source file:io.cassandrareaper.jmx.JmxProxyImpl.java

License:Apache License

@Override
public int triggerRepair(BigInteger beginToken, BigInteger endToken, String keyspace,
        RepairParallelism repairParallelism, Collection<String> columnFamilies, boolean fullRepair,
        Collection<String> datacenters, RepairStatusHandler repairStatusHandler,
        List<RingRange> associatedTokens, int repairThreadCount) throws ReaperException {

    checkNotNull(ssProxy, "Looks like the proxy is not connected");
    String cassandraVersion = getCassandraVersion();
    boolean canUseDatacenterAware = false;
    canUseDatacenterAware = versionCompare(cassandraVersion, "2.0.12") >= 0;

    String msg = String.format(
            "Triggering repair of range (%s,%s] for keyspace \"%s\" on "
                    + "host %s, with repair parallelism %s, in cluster with Cassandra "
                    + "version '%s' (can use DATACENTER_AWARE '%s'), " + "for column families: %s",
            beginToken.toString(), endToken.toString(), keyspace, this.host, repairParallelism,
            cassandraVersion, canUseDatacenterAware, columnFamilies);
    LOG.info(msg);/*w w  w.  ja  v  a2  s  . co m*/
    if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE) && !canUseDatacenterAware) {
        LOG.info("Cannot use DATACENTER_AWARE repair policy for Cassandra cluster with version {},"
                + " falling back to SEQUENTIAL repair.", cassandraVersion);
        repairParallelism = RepairParallelism.SEQUENTIAL;
    }
    try {
        if (cassandraVersion.startsWith("2.0") || cassandraVersion.startsWith("1.")) {
            return triggerRepairPre2dot1(repairParallelism, keyspace, columnFamilies, beginToken, endToken,
                    datacenters.size() > 0 ? datacenters : null, repairStatusHandler);
        } else if (cassandraVersion.startsWith("2.1")) {
            return triggerRepair2dot1(fullRepair, repairParallelism, keyspace, columnFamilies, beginToken,
                    endToken, cassandraVersion, datacenters.size() > 0 ? datacenters : null,
                    repairStatusHandler);
        } else {
            return triggerRepairPost2dot2(fullRepair, repairParallelism, keyspace, columnFamilies, beginToken,
                    endToken, cassandraVersion, datacenters, repairStatusHandler, associatedTokens,
                    repairThreadCount);
        }
    } catch (RuntimeException e) {
        LOG.error("Segment repair failed", e);
        throw new ReaperException(e);
    }
}

From source file:io.cassandrareaper.jmx.JmxProxyImpl.java

License:Apache License

private int triggerRepair2dot1(boolean fullRepair, RepairParallelism repairParallelism, String keyspace,
        Collection<String> columnFamilies, BigInteger beginToken, BigInteger endToken, String cassandraVersion,
        Collection<String> datacenters, RepairStatusHandler repairStatusHandler) {

    if (fullRepair) {
        // full repair
        if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE)) {
            int commandId = ((StorageServiceMBean) ssProxy).forceRepairRangeAsync(beginToken.toString(),
                    endToken.toString(), keyspace, repairParallelism.ordinal(), datacenters,
                    cassandraVersion.startsWith("2.2") ? new HashSet<String>() : null, fullRepair,
                    columnFamilies.toArray(new String[columnFamilies.size()]));

            repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
            return commandId;
        }//w ww. j a  v a  2 s.co  m

        boolean snapshotRepair = repairParallelism.equals(RepairParallelism.SEQUENTIAL);

        int commandId = ((StorageServiceMBean) ssProxy).forceRepairRangeAsync(beginToken.toString(),
                endToken.toString(), keyspace,
                snapshotRepair ? RepairParallelism.SEQUENTIAL.ordinal() : RepairParallelism.PARALLEL.ordinal(),
                datacenters, cassandraVersion.startsWith("2.2") ? new HashSet<String>() : null, fullRepair,
                columnFamilies.toArray(new String[columnFamilies.size()]));

        repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
        return commandId;
    }

    // incremental repair
    int commandId = ((StorageServiceMBean) ssProxy).forceRepairAsync(keyspace, Boolean.FALSE, Boolean.FALSE,
            Boolean.FALSE, fullRepair, columnFamilies.toArray(new String[columnFamilies.size()]));

    repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
    return commandId;
}

From source file:io.cassandrareaper.jmx.JmxProxyImpl.java

License:Apache License

private int triggerRepairPre2dot1(RepairParallelism repairParallelism, String keyspace,
        Collection<String> columnFamilies, BigInteger beginToken, BigInteger endToken,
        Collection<String> datacenters, RepairStatusHandler repairStatusHandler) {

    // Cassandra 1.2 and 2.0 compatibility
    if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE)) {
        int commandId = ((StorageServiceMBean20) ssProxy).forceRepairRangeAsync(beginToken.toString(),
                endToken.toString(), keyspace, repairParallelism.ordinal(), datacenters, null,
                columnFamilies.toArray(new String[columnFamilies.size()]));

        repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
        return commandId;
    }//from  w  ww .j av  a 2 s.c  o m
    boolean snapshotRepair = repairParallelism.equals(RepairParallelism.SEQUENTIAL);
    int commandId = ((StorageServiceMBean20) ssProxy).forceRepairRangeAsync(beginToken.toString(),
            endToken.toString(), keyspace, snapshotRepair, false,
            columnFamilies.toArray(new String[columnFamilies.size()]));

    repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler);
    return commandId;
}

From source file:io.cassandrareaper.ReaperApplicationConfigurationTest.java

License:Apache License

@Before
public void setUp() {
    //create a valid config
    DataSourceFactory dataSourceFactory = new DataSourceFactory();
    dataSourceFactory.setDriverClass("org.postgresql.Driver");
    dataSourceFactory.setUrl("jdbc:postgresql://db.example.com/db-prod");
    dataSourceFactory.setUser("user");
    CassandraFactory cassandraFactory = new CassandraFactory();
    cassandraFactory.setContactPoints(new String[] { "127.0.0.1" });
    config.setCassandraFactory(cassandraFactory);
    config.setPostgresDataSourceFactory(dataSourceFactory);
    config.setHangingRepairTimeoutMins(1);
    config.setRepairParallelism(RepairParallelism.DATACENTER_AWARE);
    config.setRepairRunThreadCount(1);/*from  w  ww.j  a  v a 2s  .c o  m*/
    config.setSegmentCount(1);
    config.setScheduleDaysBetween(7);
    config.setStorageType("foo");
    config.setIncrementalRepair(false);
}

From source file:io.cassandrareaper.service.ClusterRepairSchedulerTest.java

License:Apache License

private RepairSchedule.Builder aRepairSchedule(Cluster cluster, String keyspace, DateTime creationTime) {
    RepairUnit repairUnit = context.storage.addRepairUnit(aRepair(cluster, keyspace));
    return new RepairSchedule.Builder(repairUnit.getId(), RepairSchedule.State.ACTIVE, 1, DateTime.now(),
            ImmutableList.of(), 10, RepairParallelism.DATACENTER_AWARE, 0.9, creationTime, 0);
}

From source file:io.cassandrareaper.service.PurgeManagerTest.java

License:Apache License

@Test
public void testPurgeByDate() throws InterruptedException, ReaperException {
    AppContext context = new AppContext();
    context.config = new ReaperApplicationConfiguration();
    context.config.setPurgeRecordsAfterInDays(1);

    // Create storage mock
    context.storage = mock(IStorage.class);

    List<Cluster> clusters = Arrays.asList(new Cluster(CLUSTER_NAME, "", Collections.EMPTY_SET));
    when(context.storage.getClusters()).thenReturn(clusters);

    // Add repair runs to the mock
    List<RepairRun> repairRuns = Lists.newArrayList();
    DateTime currentDate = DateTime.now();
    for (int i = 0; i < 10; i++) {
        UUID repairUnitId = UUIDs.timeBased();
        DateTime startTime = currentDate.minusDays(i).minusHours(1);

        repairRuns.add(RepairRun.builder(CLUSTER_NAME, repairUnitId).startTime(startTime).intensity(0.9)
                .segmentCount(10).repairParallelism(RepairParallelism.DATACENTER_AWARE)
                .endTime(startTime.plusSeconds(1)).runState(RunState.DONE).build(UUIDs.timeBased()));
    }// ww  w  . j  a  v  a 2s. c  o m

    when(context.storage.getRepairRunsForCluster(anyString(), any())).thenReturn(repairRuns);

    // Invoke the purge manager
    int purged = PurgeManager.create(context).purgeDatabase();

    // Check that runs were removed
    assertEquals(9, purged);
}

From source file:io.cassandrareaper.service.PurgeManagerTest.java

License:Apache License

@Test
public void testPurgeByHistoryDepth() throws InterruptedException, ReaperException {
    AppContext context = new AppContext();
    context.config = new ReaperApplicationConfiguration();
    context.config.setNumberOfRunsToKeepPerUnit(5);

    // Create storage mock
    context.storage = mock(IStorage.class);

    List<Cluster> clusters = Arrays.asList(new Cluster(CLUSTER_NAME, "", Collections.EMPTY_SET));
    when(context.storage.getClusters()).thenReturn(clusters);

    // Add repair runs to the mock
    List<RepairRun> repairRuns = Lists.newArrayList();
    DateTime currentDate = DateTime.now();
    UUID repairUnitId = UUIDs.timeBased();
    for (int i = 0; i < 20; i++) {
        DateTime startTime = currentDate.minusDays(i).minusHours(1);

        repairRuns.add(RepairRun.builder(CLUSTER_NAME, repairUnitId).startTime(startTime).intensity(0.9)
                .segmentCount(10).repairParallelism(RepairParallelism.DATACENTER_AWARE)
                .endTime(startTime.plusSeconds(1)).runState(RunState.DONE).build(UUIDs.timeBased()));
    }//from w  w  w  . j a v  a 2s.c  om

    when(context.storage.getRepairRunsForCluster(anyString(), any())).thenReturn(repairRuns);

    // Invoke the purge manager
    int purged = PurgeManager.create(context).purgeDatabase();

    // Check that runs were removed
    assertEquals(15, purged);
}

From source file:io.cassandrareaper.service.PurgeManagerTest.java

License:Apache License

@Test
public void testSkipPurgeOngoingRuns() throws InterruptedException, ReaperException {
    AppContext context = new AppContext();
    context.config = new ReaperApplicationConfiguration();
    context.config.setPurgeRecordsAfterInDays(1);

    // Create storage mock
    context.storage = mock(IStorage.class);

    List<Cluster> clusters = Arrays.asList(new Cluster(CLUSTER_NAME, "", Collections.EMPTY_SET));
    when(context.storage.getClusters()).thenReturn(clusters);

    // Add repair runs to the mock
    List<RepairRun> repairRuns = Lists.newArrayList();
    DateTime currentDate = DateTime.now();
    for (int i = 0; i < 10; i++) {
        UUID repairUnitId = UUIDs.timeBased();
        DateTime startTime = currentDate.minusDays(i).minusHours(1);

        repairRuns.add(RepairRun.builder(CLUSTER_NAME, repairUnitId).startTime(startTime).intensity(0.9)
                .segmentCount(10).repairParallelism(RepairParallelism.DATACENTER_AWARE)
                .endTime(startTime.plusSeconds(1)).runState(RunState.PAUSED).build(UUIDs.timeBased()));
    }//from www  .  j  a va2s .  c  o  m

    when(context.storage.getRepairRunsForCluster(anyString(), any())).thenReturn(repairRuns);

    // Invoke the purge manager
    int purged = PurgeManager.create(context).purgeDatabase();

    // Check that runs were removed
    assertEquals(0, purged);
}

From source file:io.cassandrareaper.service.SegmentRunnerTest.java

License:Apache License

@Test
public void getNodeMetricsInLocalDCAvailabilityForRemoteDCNodeTest() throws Exception {
    final AppContext context = new AppContext();
    context.storage = Mockito.mock(CassandraStorage.class);
    when(((IDistributedStorage) context.storage).getNodeMetrics(any(), any())).thenReturn(Optional.absent());
    JmxConnectionFactory jmxConnectionFactory = mock(JmxConnectionFactory.class);
    when(jmxConnectionFactory.connect(any(), anyInt())).thenReturn(mock(JmxProxy.class));
    context.jmxConnectionFactory = jmxConnectionFactory;
    context.config = new ReaperApplicationConfiguration();
    context.config.setDatacenterAvailability(DatacenterAvailability.LOCAL);
    SegmentRunner segmentRunner = new SegmentRunner(context, UUID.randomUUID(), Collections.emptyList(), 1000,
            1.1, RepairParallelism.DATACENTER_AWARE, "test", mock(RepairUnit.class), mock(RepairRunner.class));

    Pair<String, Optional<NodeMetrics>> result = segmentRunner.getNodeMetrics("node-some", "dc1", "dc2").call();
    assertFalse(result.getRight().isPresent());
    verify(jmxConnectionFactory, times(0)).connect(any(), anyInt());
}