Example usage for org.apache.lucene.index CorruptIndexException CorruptIndexException

List of usage examples for org.apache.lucene.index CorruptIndexException CorruptIndexException

Introduction

In this page you can find the example usage for org.apache.lucene.index CorruptIndexException CorruptIndexException.

Prototype

public CorruptIndexException(String message, String resourceDescription) 

Source Link

Document

Create exception with a message only

Usage

From source file:com.rocana.lucene.codec.v1.RocanaBlockTreeTermsReader.java

License:Apache License

/** Sole constructor. */
public RocanaBlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state)
        throws IOException {
    boolean success = false;
    IndexInput indexIn = null;// w ww .j  a v a2  s  . com

    this.postingsReader = postingsReader;
    this.segment = state.segmentInfo.name;

    String termsName = IndexFileNames.segmentFileName(segment, state.segmentSuffix, TERMS_EXTENSION);
    try {
        termsIn = state.directory.openInput(termsName, state.context);
        version = CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC_NAME, VERSION_START, VERSION_CURRENT,
                state.segmentInfo.getId(), state.segmentSuffix);

        if (version < VERSION_AUTO_PREFIX_TERMS) {
            // Old (pre-5.2.0) index, no auto-prefix terms:
            this.anyAutoPrefixTerms = false;
        } else if (version == VERSION_AUTO_PREFIX_TERMS) {
            // 5.2.x index, might have auto-prefix terms:
            this.anyAutoPrefixTerms = true;
        } else {
            // 5.3.x index, we record up front if we may have written any auto-prefix terms:
            assert version >= VERSION_AUTO_PREFIX_TERMS_COND;
            byte b = termsIn.readByte();
            if (b == 0) {
                this.anyAutoPrefixTerms = false;
            } else if (b == 1) {
                this.anyAutoPrefixTerms = true;
            } else {
                throw new CorruptIndexException("invalid anyAutoPrefixTerms: expected 0 or 1 but got " + b,
                        termsIn);
            }
        }

        String indexName = IndexFileNames.segmentFileName(segment, state.segmentSuffix, TERMS_INDEX_EXTENSION);
        indexIn = state.directory.openInput(indexName, state.context);
        CodecUtil.checkIndexHeader(indexIn, TERMS_INDEX_CODEC_NAME, version, version, state.segmentInfo.getId(),
                state.segmentSuffix);

        // IMPORTANT: comment out this one line to prevent checksumming the entire file.
        //            This is the reason we have a custom Lucene codec and forked Lucene classes.
        //CodecUtil.checksumEntireFile(indexIn);

        // Have PostingsReader init itself
        postingsReader.init(termsIn, state);

        // NOTE: data file is too costly to verify checksum against all the bytes on open,
        // but for now we at least verify proper structure of the checksum footer: which looks
        // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
        // such as file truncation.
        CodecUtil.retrieveChecksum(termsIn);

        // Read per-field details
        seekDir(termsIn, dirOffset);
        seekDir(indexIn, indexDirOffset);

        final int numFields = termsIn.readVInt();
        if (numFields < 0) {
            throw new CorruptIndexException("invalid numFields: " + numFields, termsIn);
        }

        for (int i = 0; i < numFields; ++i) {
            final int field = termsIn.readVInt();
            final long numTerms = termsIn.readVLong();
            if (numTerms <= 0) {
                throw new CorruptIndexException("Illegal numTerms for field number: " + field, termsIn);
            }
            final int numBytes = termsIn.readVInt();
            if (numBytes < 0) {
                throw new CorruptIndexException(
                        "invalid rootCode for field number: " + field + ", numBytes=" + numBytes, termsIn);
            }
            final BytesRef rootCode = new BytesRef(new byte[numBytes]);
            termsIn.readBytes(rootCode.bytes, 0, numBytes);
            rootCode.length = numBytes;
            final FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
            if (fieldInfo == null) {
                throw new CorruptIndexException("invalid field number: " + field, termsIn);
            }
            final long sumTotalTermFreq = fieldInfo.getIndexOptions() == IndexOptions.DOCS ? -1
                    : termsIn.readVLong();
            final long sumDocFreq = termsIn.readVLong();
            final int docCount = termsIn.readVInt();
            final int longsSize = termsIn.readVInt();
            if (longsSize < 0) {
                throw new CorruptIndexException(
                        "invalid longsSize for field: " + fieldInfo.name + ", longsSize=" + longsSize, termsIn);
            }
            BytesRef minTerm = readBytesRef(termsIn);
            BytesRef maxTerm = readBytesRef(termsIn);
            if (docCount < 0 || docCount > state.segmentInfo.maxDoc()) { // #docs with field must be <= #docs
                throw new CorruptIndexException(
                        "invalid docCount: " + docCount + " maxDoc: " + state.segmentInfo.maxDoc(), termsIn);
            }
            if (sumDocFreq < docCount) { // #postings must be >= #docs with field
                throw new CorruptIndexException("invalid sumDocFreq: " + sumDocFreq + " docCount: " + docCount,
                        termsIn);
            }
            if (sumTotalTermFreq != -1 && sumTotalTermFreq < sumDocFreq) { // #positions must be >= #postings
                throw new CorruptIndexException(
                        "invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq,
                        termsIn);
            }
            final long indexStartFP = indexIn.readVLong();
            RocanaFieldReader previous = fields.put(fieldInfo.name,
                    new RocanaFieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq,
                            docCount, indexStartFP, longsSize, indexIn, minTerm, maxTerm));
            if (previous != null) {
                throw new CorruptIndexException("duplicate field: " + fieldInfo.name, termsIn);
            }
        }

        indexIn.close();
        success = true;
    } finally {
        if (!success) {
            // this.close() will close in:
            IOUtils.closeWhileHandlingException(indexIn, this);
        }
    }
}

From source file:com.vmware.xenon.services.common.Lucene60FieldInfosFormatWithCache.java

License:Open Source License

@Override
public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, IOContext context)
        throws IOException {
    //////////////////////
    boolean checkInfosCache = true;
    //////////////////////
    final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
    try (ChecksumIndexInput input = directory.openChecksumInput(fileName, context)) {
        Throwable priorE = null;/*from   w  w  w .  j a  v a 2s. c om*/
        FieldInfo[] infos = null;
        try {
            CodecUtil.checkIndexHeader(input, Lucene60FieldInfosFormatWithCache.CODEC_NAME,
                    Lucene60FieldInfosFormatWithCache.FORMAT_START,
                    Lucene60FieldInfosFormatWithCache.FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);

            final int size = input.readVInt(); //read in the size
            infos = new FieldInfo[size];

            // previous field's attribute map, we share when possible:
            Map<String, String> lastAttributes = Collections.emptyMap();

            for (int i = 0; i < size; i++) {
                String name = input.readString();
                final int fieldNumber = input.readVInt();
                if (fieldNumber < 0) {
                    throw new CorruptIndexException(
                            "invalid field number for field: " + name + ", fieldNumber=" + fieldNumber, input);
                }
                byte bits = input.readByte();
                boolean storeTermVector = (bits & STORE_TERMVECTOR) != 0;
                boolean omitNorms = (bits & OMIT_NORMS) != 0;
                boolean storePayloads = (bits & STORE_PAYLOADS) != 0;

                final IndexOptions indexOptions = getIndexOptions(input, input.readByte());

                // DV Types are packed in one byte
                final DocValuesType docValuesType = getDocValuesType(input, input.readByte());
                final long dvGen = input.readLong();
                Map<String, String> attributes = input.readMapOfStrings();
                // just use the last field's map if its the same
                if (attributes.equals(lastAttributes)) {
                    attributes = lastAttributes;
                }
                lastAttributes = attributes;
                int pointDimensionCount = input.readVInt();
                int pointNumBytes;
                if (pointDimensionCount != 0) {
                    pointNumBytes = input.readVInt();
                } else {
                    pointNumBytes = 0;
                }

                try {
                    //////////////////////
                    if (dvGen >= 0) {
                        // skip fields with docValues, they don't cache well
                        checkInfosCache = false;
                        infos[i] = new FieldInfo(name, fieldNumber, storeTermVector, omitNorms, storePayloads,
                                indexOptions, docValuesType, dvGen, attributes, pointDimensionCount,
                                pointNumBytes);
                    } else {
                        infos[i] = this.cache.dedupFieldInfo(name, fieldNumber, storeTermVector, omitNorms,
                                storePayloads, indexOptions, docValuesType, dvGen, attributes,
                                pointDimensionCount, pointNumBytes);
                    }
                    //////////////////////
                } catch (IllegalStateException e) {
                    throw new CorruptIndexException(
                            "invalid fieldinfo for field: " + name + ", fieldNumber=" + fieldNumber, input, e);
                }
            }
        } catch (Throwable exception) {
            priorE = exception;
        } finally {
            CodecUtil.checkFooter(input, priorE);
        }

        //////////////////////
        if (checkInfosCache) {
            return this.cache.dedupFieldInfos(infos);
        } else {
            FieldInfos result = new FieldInfos(infos);
            this.cache.trimFieldInfos(result);
            return result;
        }
        //////////////////////
    }
}

From source file:com.vmware.xenon.services.common.Lucene60FieldInfosFormatWithCache.java

License:Open Source License

private static DocValuesType getDocValuesType(IndexInput input, byte b) throws IOException {
    switch (b) {//from w  ww  .  j  a  va2 s  .  co m
    case 0:
        return DocValuesType.NONE;
    case 1:
        return DocValuesType.NUMERIC;
    case 2:
        return DocValuesType.BINARY;
    case 3:
        return DocValuesType.SORTED;
    case 4:
        return DocValuesType.SORTED_SET;
    case 5:
        return DocValuesType.SORTED_NUMERIC;
    default:
        throw new CorruptIndexException("invalid docvalues byte: " + b, input);
    }
}

From source file:com.vmware.xenon.services.common.Lucene60FieldInfosFormatWithCache.java

License:Open Source License

private static IndexOptions getIndexOptions(IndexInput input, byte b) throws IOException {
    switch (b) {//from  w  w w . ja va2s. c om
    case 0:
        return IndexOptions.NONE;
    case 1:
        return IndexOptions.DOCS;
    case 2:
        return IndexOptions.DOCS_AND_FREQS;
    case 3:
        return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
    case 4:
        return IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
    default:
        // BUG
        throw new CorruptIndexException("invalid IndexOptions byte: " + b, input);
    }
}

From source file:org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanationTests.java

License:Apache License

public void testDecisionAndExplanation() {
    Exception e = new IOException("stuff's broke, yo");
    Exception corruptE = new CorruptIndexException("stuff's corrupt, yo", "");
    Float nodeWeight = randomFloat();
    Set<String> activeAllocationIds = new HashSet<>();
    activeAllocationIds.add("eggplant");
    ShardRouting primaryStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), true,
            StoreRecoverySource.EXISTING_STORE_INSTANCE,
            new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo"));
    ShardRouting replicaStartedShard = ShardRouting.newUnassigned(new ShardId(i, 0), false,
            PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo"));

    IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42,
            "eggplant", IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, e);
    NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard,
            indexMetaData, node, yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
    assertExplanations(ne, "the copy of the shard cannot be read",
            ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.IO_ERROR);

    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
            yesDecision, nodeWeight, null, "", activeAllocationIds, false);
    assertExplanations(ne, "the shard can be assigned", ClusterAllocationExplanation.FinalDecision.YES,
            ClusterAllocationExplanation.StoreCopy.NONE);

    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData,
            node, yesDecision, nodeWeight, null, "", activeAllocationIds, false);
    assertExplanations(ne, "there is no copy of the shard available",
            ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE);

    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
            noDecision, nodeWeight, null, "", activeAllocationIds, false);
    assertExplanations(ne,//from  w ww  .java  2  s. c  o  m
            "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
            ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
            noDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
    assertExplanations(ne,
            "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
            ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, corruptE);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData,
            node, yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
    assertExplanations(ne, "the copy of the shard is corrupt", ClusterAllocationExplanation.FinalDecision.NO,
            ClusterAllocationExplanation.StoreCopy.CORRUPT);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
            yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
    assertExplanations(ne, "the shard can be assigned", ClusterAllocationExplanation.FinalDecision.YES,
            ClusterAllocationExplanation.StoreCopy.STALE);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData,
            node, yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
    assertExplanations(ne, "the copy of the shard is stale, allocation ids do not match",
            ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.STALE);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
            yesDecision, nodeWeight, storeStatus, "node-0", activeAllocationIds, false);
    assertExplanations(ne, "the shard is already assigned to this node",
            ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED,
            ClusterAllocationExplanation.StoreCopy.AVAILABLE);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
            yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, false);
    assertExplanations(ne, "the shard can be assigned and the node contains a valid copy of the shard data",
            ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.AVAILABLE);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData,
            node, yesDecision, nodeWeight, storeStatus, "", activeAllocationIds, true);
    assertExplanations(ne, "the shard's state is still being fetched so it cannot be allocated",
            ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);

    storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
            IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null);
    ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(replicaStartedShard, indexMetaData,
            node, noDecision, nodeWeight, storeStatus, "", activeAllocationIds, true);
    assertExplanations(ne, "the shard cannot be assigned because allocation deciders return a NO decision",
            ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
}

From source file:org.elasticsearch.action.admin.indices.segments.IndicesShardStoreRequestTests.java

License:Apache License

@Test
public void testCorruptedShards() throws Exception {
    String index = "test";
    internalCluster().ensureAtLeastNumDataNodes(2);
    assertAcked(//from w  w  w  . j  a  v a2s  .com
            prepareCreate(index).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5")
                    .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false)));
    indexRandomData(index);
    ensureGreen(index);

    logger.info("--> disable allocation");
    disableAllocation(index);

    logger.info("--> corrupt random shard copies");
    Map<Integer, Set<String>> corruptedShardIDMap = new HashMap<>();
    for (String node : internalCluster().nodesInclude(index)) {
        IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node);
        IndexService indexShards = indexServices.indexServiceSafe(index);
        for (Integer shardId : indexShards.shardIds()) {
            IndexShard shard = indexShards.shardSafe(shardId);
            if (randomBoolean()) {
                shard.failShard("test", new CorruptIndexException("test corrupted", ""));
                Set<String> nodes = corruptedShardIDMap.get(shardId);
                if (nodes == null) {
                    nodes = new HashSet<>();
                }
                nodes.add(node);
                corruptedShardIDMap.put(shardId, nodes);
            }
        }
    }

    IndicesShardStoresResponse rsp = client().admin().indices().prepareShardStores(index)
            .setShardStatuses("all").get();
    ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> shardStatuses = rsp.getStoreStatuses()
            .get(index);
    assertNotNull(shardStatuses);
    assertThat(shardStatuses.size(), greaterThan(0));
    for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStatus : shardStatuses) {
        for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) {
            if (corruptedShardIDMap.containsKey(shardStatus.key)
                    && corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) {
                assertThat(status.getVersion(), greaterThanOrEqualTo(0l));
                assertThat(status.getStoreException(), notNullValue());
            } else {
                assertThat(status.getVersion(), greaterThanOrEqualTo(0l));
                assertNull(status.getStoreException());
            }
        }
    }
    logger.info("--> enable allocation");
    enableAllocation(index);
}

From source file:org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestIT.java

License:Apache License

@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/12416")
public void testCorruptedShards() throws Exception {
    String index = "test";
    internalCluster().ensureAtLeastNumDataNodes(2);
    assertAcked(/*ww  w.j  a va2 s .c  o m*/
            prepareCreate(index).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5")
                    .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)));

    indexRandomData(index);
    ensureGreen(index);

    logger.info("--> disable allocation");
    disableAllocation(index);

    logger.info("--> corrupt random shard copies");
    Map<Integer, Set<String>> corruptedShardIDMap = new HashMap<>();
    Index idx = resolveIndex(index);
    for (String node : internalCluster().nodesInclude(index)) {
        IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node);
        IndexService indexShards = indexServices.indexServiceSafe(idx);
        for (Integer shardId : indexShards.shardIds()) {
            IndexShard shard = indexShards.getShard(shardId);
            if (randomBoolean()) {
                shard.failShard("test", new CorruptIndexException("test corrupted", ""));
                Set<String> nodes = corruptedShardIDMap.get(shardId);
                if (nodes == null) {
                    nodes = new HashSet<>();
                }
                nodes.add(node);
                corruptedShardIDMap.put(shardId, nodes);
            }
        }
    }

    IndicesShardStoresResponse rsp = client().admin().indices().prepareShardStores(index)
            .setShardStatuses("all").get();
    ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> shardStatuses = rsp.getStoreStatuses()
            .get(index);
    assertNotNull(shardStatuses);
    assertThat(shardStatuses.size(), greaterThan(0));
    for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStatus : shardStatuses) {
        for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) {
            if (corruptedShardIDMap.containsKey(shardStatus.key)
                    && corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().getName())) {
                assertThat(status.getLegacyVersion(), greaterThanOrEqualTo(0L));
                assertThat(status.getStoreException(), notNullValue());
            } else {
                assertThat(status.getLegacyVersion(), greaterThanOrEqualTo(0L));
                assertNull(status.getStoreException());
            }
        }
    }
    logger.info("--> enable allocation");
    enableAllocation(index);
}

From source file:org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestTests.java

License:Apache License

@Test
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/12416")
public void testCorruptedShards() throws Exception {
    String index = "test";
    internalCluster().ensureAtLeastNumDataNodes(2);
    assertAcked(/*from www.  ja  va2 s  .com*/
            prepareCreate(index).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5")
                    .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false)));
    indexRandomData(index);
    ensureGreen(index);

    logger.info("--> disable allocation");
    disableAllocation(index);

    logger.info("--> corrupt random shard copies");
    Map<Integer, Set<String>> corruptedShardIDMap = new HashMap<>();
    for (String node : internalCluster().nodesInclude(index)) {
        IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node);
        IndexService indexShards = indexServices.indexServiceSafe(index);
        for (Integer shardId : indexShards.shardIds()) {
            IndexShard shard = indexShards.shardSafe(shardId);
            if (randomBoolean()) {
                shard.failShard("test", new CorruptIndexException("test corrupted", ""));
                Set<String> nodes = corruptedShardIDMap.get(shardId);
                if (nodes == null) {
                    nodes = new HashSet<>();
                }
                nodes.add(node);
                corruptedShardIDMap.put(shardId, nodes);
            }
        }
    }

    IndicesShardStoresResponse rsp = client().admin().indices().prepareShardStores(index)
            .setShardStatuses("all").get();
    ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> shardStatuses = rsp.getStoreStatuses()
            .get(index);
    assertNotNull(shardStatuses);
    assertThat(shardStatuses.size(), greaterThan(0));
    for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStatus : shardStatuses) {
        for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) {
            if (corruptedShardIDMap.containsKey(shardStatus.key)
                    && corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) {
                assertThat(status.getVersion(), greaterThanOrEqualTo(0l));
                assertThat(status.getStoreException(), notNullValue());
            } else {
                assertThat(status.getVersion(), greaterThanOrEqualTo(0l));
                assertNull(status.getStoreException());
            }
        }
    }
    logger.info("--> enable allocation");
    enableAllocation(index);
}

From source file:org.elasticsearch.action.support.replication.ReplicationOperationTests.java

License:Apache License

public void testReplication() throws Exception {
    final String index = "test";
    final ShardId shardId = new ShardId(index, "_na_", 0);

    ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
    IndexMetaData indexMetaData = state.getMetaData().index(index);
    final long primaryTerm = indexMetaData.primaryTerm(0);
    final IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId);
    ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
    if (primaryShard.relocating() && randomBoolean()) {
        // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
        state = ClusterState.builder(state)
                .nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId()))
                .build();/*from w w w  .j av a 2  s .c om*/
        primaryShard = primaryShard.getTargetRelocatingShard();
    }
    // add a few in-sync allocation ids that don't have corresponding routing entries
    Set<String> staleAllocationIds = Sets.newHashSet(generateRandomStringArray(4, 10, false));
    state = ClusterState.builder(state)
            .metaData(
                    MetaData.builder(state.metaData())
                            .put(IndexMetaData.builder(indexMetaData).putInSyncAllocationIds(0,
                                    Sets.union(indexMetaData.inSyncAllocationIds(0), staleAllocationIds))))
            .build();

    final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, state);

    final Map<ShardRouting, Exception> expectedFailures = new HashMap<>();
    final Set<ShardRouting> expectedFailedShards = new HashSet<>();
    for (ShardRouting replica : expectedReplicas) {
        if (randomBoolean()) {
            Exception t;
            boolean criticalFailure = randomBoolean();
            if (criticalFailure) {
                t = new CorruptIndexException("simulated", (String) null);
            } else {
                t = new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING);
            }
            logger.debug("--> simulating failure on {} with [{}]", replica, t.getClass().getSimpleName());
            expectedFailures.put(replica, t);
            if (criticalFailure) {
                expectedFailedShards.add(replica);
            }
        }
    }

    Request request = new Request(shardId);
    PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
    final ClusterState finalState = state;
    final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures);
    final TestReplicationOperation op = new TestReplicationOperation(request,
            new TestPrimary(primaryShard, primaryTerm), listener, replicasProxy, () -> finalState);
    op.execute();

    assertThat(request.primaryTerm(), equalTo(primaryTerm));
    assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
    assertThat(request.processedOnReplicas, equalTo(expectedReplicas));
    assertThat(replicasProxy.failedReplicas, equalTo(expectedFailedShards));
    assertThat(replicasProxy.markedAsStaleCopies, equalTo(staleAllocationIds));
    assertTrue("listener is not marked as done", listener.isDone());
    ShardInfo shardInfo = listener.actionGet().getShardInfo();
    assertThat(shardInfo.getFailed(), equalTo(expectedFailedShards.size()));
    assertThat(shardInfo.getFailures(), arrayWithSize(expectedFailedShards.size()));
    assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size() - expectedFailures.size()));
    final List<ShardRouting> unassignedShards = indexShardRoutingTable
            .shardsWithState(ShardRoutingState.UNASSIGNED);
    final int totalShards = 1 + expectedReplicas.size() + unassignedShards.size();
    assertThat(shardInfo.getTotal(), equalTo(totalShards));
}

From source file:org.elasticsearch.action.support.replication.ReplicationOperationTests.java

License:Apache License

public void testDemotedPrimary() throws Exception {
    final String index = "test";
    final ShardId shardId = new ShardId(index, "_na_", 0);

    ClusterState state = stateWithActivePrimary(index, true, 1 + randomInt(2), randomInt(2));
    IndexMetaData indexMetaData = state.getMetaData().index(index);
    final long primaryTerm = indexMetaData.primaryTerm(0);
    ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
    if (primaryShard.relocating() && randomBoolean()) {
        // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
        state = ClusterState.builder(state)
                .nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId()))
                .build();/* ww  w . jav a2s .c o  m*/
        primaryShard = primaryShard.getTargetRelocatingShard();
    }
    // add in-sync allocation id that doesn't have a corresponding routing entry
    state = ClusterState.builder(state)
            .metaData(MetaData.builder(state.metaData())
                    .put(IndexMetaData.builder(indexMetaData).putInSyncAllocationIds(0, Sets.union(
                            indexMetaData.inSyncAllocationIds(0), Sets.newHashSet(randomAsciiOfLength(10))))))
            .build();

    final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, state);

    final Map<ShardRouting, Exception> expectedFailures = new HashMap<>();
    final ShardRouting failedReplica = randomFrom(new ArrayList<>(expectedReplicas));
    expectedFailures.put(failedReplica, new CorruptIndexException("simulated", (String) null));

    Request request = new Request(shardId);
    PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
    final ClusterState finalState = state;
    final boolean testPrimaryDemotedOnStaleShardCopies = randomBoolean();
    final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) {
        @Override
        public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception,
                Runnable onSuccess, Consumer<Exception> onPrimaryDemoted,
                Consumer<Exception> onIgnoredFailure) {
            if (testPrimaryDemotedOnStaleShardCopies) {
                super.failShard(replica, primaryTerm, message, exception, onSuccess, onPrimaryDemoted,
                        onIgnoredFailure);
            } else {
                assertThat(replica, equalTo(failedReplica));
                onPrimaryDemoted.accept(new ElasticsearchException("the king is dead"));
            }
        }

        @Override
        public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm,
                Runnable onSuccess, Consumer<Exception> onPrimaryDemoted,
                Consumer<Exception> onIgnoredFailure) {
            if (testPrimaryDemotedOnStaleShardCopies) {
                onPrimaryDemoted.accept(new ElasticsearchException("the king is dead"));
            } else {
                super.markShardCopyAsStale(shardId, allocationId, primaryTerm, onSuccess, onPrimaryDemoted,
                        onIgnoredFailure);
            }
        }
    };
    AtomicBoolean primaryFailed = new AtomicBoolean();
    final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) {
        @Override
        public void failShard(String message, Exception exception) {
            assertTrue(primaryFailed.compareAndSet(false, true));
        }
    };
    final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy,
            () -> finalState);
    op.execute();

    assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
    assertTrue("listener is not marked as done", listener.isDone());
    assertTrue(primaryFailed.get());
    assertListenerThrows("should throw exception to trigger retry", listener,
            ReplicationOperation.RetryOnPrimaryException.class);
}