Example usage for org.apache.lucene.index SegmentInfos getVersion

List of usage examples for org.apache.lucene.index SegmentInfos getVersion

Introduction

In this page you can find the example usage for org.apache.lucene.index SegmentInfos getVersion.

Prototype

public long getVersion() 

Source Link

Document

version number when this SegmentInfos was generated.

Usage

From source file:org.elasticsearch.index.gateway.IndexShardGateway.java

License:Apache License

/**
 * Recovers the state of the shard from the gateway.
 *///from  w  ww.  j  a  va 2s.  com
public void recover(boolean indexShouldExists, RecoveryState recoveryState)
        throws IndexShardGatewayRecoveryException {
    indexShard.prepareForIndexRecovery();
    long version = -1;
    final Map<String, Mapping> typesToUpdate;
    SegmentInfos si = null;
    indexShard.store().incRef();
    try {
        try {
            indexShard.store().failIfCorrupted();
            try {
                si = Lucene.readSegmentInfos(indexShard.store().directory());
            } catch (Throwable e) {
                String files = "_unknown_";
                try {
                    files = Arrays.toString(indexShard.store().directory().listAll());
                } catch (Throwable e1) {
                    files += " (failure=" + ExceptionsHelper.detailedMessage(e1) + ")";
                }
                if (indexShouldExists) {
                    throw new IndexShardGatewayRecoveryException(shardId(),
                            "shard allocated for local recovery (post api), should exist, but doesn't, current files: "
                                    + files,
                            e);
                }
            }
            if (si != null) {
                if (indexShouldExists) {
                    version = si.getVersion();
                } else {
                    // it exists on the directory, but shouldn't exist on the FS, its a leftover (possibly dangling)
                    // its a "new index create" API, we have to do something, so better to clean it than use same data
                    logger.trace("cleaning existing shard, shouldn't exists");
                    IndexWriter writer = new IndexWriter(indexShard.store().directory(),
                            new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
                                    .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
                    writer.close();
                    recoveryState.getTranslog().totalOperations(0);
                }
            }
        } catch (Throwable e) {
            throw new IndexShardGatewayRecoveryException(shardId(),
                    "failed to fetch index version after copying it over", e);
        }
        recoveryState.getIndex().updateVersion(version);

        // since we recover from local, just fill the files and size
        try {
            final RecoveryState.Index index = recoveryState.getIndex();
            if (si != null) {
                final Directory directory = indexShard.store().directory();
                for (String name : Lucene.files(si)) {
                    long length = directory.fileLength(name);
                    index.addFileDetail(name, length, true);
                }
            }
        } catch (IOException e) {
            logger.debug("failed to list file details", e);
        }
        if (indexShouldExists == false) {
            recoveryState.getTranslog().totalOperations(0);
            recoveryState.getTranslog().totalOperationsOnStart(0);
        }
        typesToUpdate = indexShard.performTranslogRecovery();

        indexShard.finalizeRecovery();
        for (Map.Entry<String, Mapping> entry : typesToUpdate.entrySet()) {
            validateMappingUpdate(entry.getKey(), entry.getValue());
        }
        indexShard.postRecovery("post recovery from gateway");
    } catch (EngineException e) {
        throw new IndexShardGatewayRecoveryException(shardId, "failed to recovery from gateway", e);
    } finally {
        indexShard.store().decRef();
    }
}

From source file:org.elasticsearch.index.gateway.local.LocalIndexShardGateway.java

License:Apache License

@Override
public void recover(boolean indexShouldExists, RecoveryStatus recoveryStatus)
        throws IndexShardGatewayRecoveryException {
    recoveryStatus.index().startTime(System.currentTimeMillis());
    recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
    long version = -1;
    long translogId = -1;
    try {// w  w  w  .  ja v  a2  s .c om
        SegmentInfos si = null;
        try {
            si = Lucene.readSegmentInfos(indexShard.store().directory());
        } catch (Throwable e) {
            String files = "_unknown_";
            try {
                files = Arrays.toString(indexShard.store().directory().listAll());
            } catch (Throwable e1) {
                files += " (failure=" + ExceptionsHelper.detailedMessage(e1) + ")";
            }
            if (indexShouldExists && indexShard.store().indexStore().persistent()) {
                throw new IndexShardGatewayRecoveryException(shardId(),
                        "shard allocated for local recovery (post api), should exist, but doesn't, current files: "
                                + files,
                        e);
            }
        }
        if (si != null) {
            if (indexShouldExists) {
                version = si.getVersion();
                if (si.getUserData().containsKey(Translog.TRANSLOG_ID_KEY)) {
                    translogId = Long.parseLong(si.getUserData().get(Translog.TRANSLOG_ID_KEY));
                } else {
                    translogId = version;
                }
                logger.trace("using existing shard data, translog id [{}]", translogId);
            } else {
                // it exists on the directory, but shouldn't exist on the FS, its a leftover (possibly dangling)
                // its a "new index create" API, we have to do something, so better to clean it than use same data
                logger.trace("cleaning existing shard, shouldn't exists");
                IndexWriter writer = new IndexWriter(indexShard.store().directory(),
                        new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)
                                .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
                writer.close();
            }
        }
    } catch (Throwable e) {
        throw new IndexShardGatewayRecoveryException(shardId(),
                "failed to fetch index version after copying it over", e);
    }
    recoveryStatus.index().updateVersion(version);
    recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());

    // since we recover from local, just fill the files and size
    try {
        int numberOfFiles = 0;
        long totalSizeInBytes = 0;
        for (String name : indexShard.store().directory().listAll()) {
            numberOfFiles++;
            totalSizeInBytes += indexShard.store().directory().fileLength(name);
        }
        recoveryStatus.index().files(numberOfFiles, totalSizeInBytes, numberOfFiles, totalSizeInBytes);
    } catch (Exception e) {
        // ignore
    }

    recoveryStatus.start().startTime(System.currentTimeMillis());
    recoveryStatus.updateStage(RecoveryStatus.Stage.START);
    if (translogId == -1) {
        // no translog files, bail
        indexShard.postRecovery("post recovery from gateway, no translog");
        // no index, just start the shard and bail
        recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
        recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
        return;
    }

    // move an existing translog, if exists, to "recovering" state, and start reading from it
    FsTranslog translog = (FsTranslog) indexShard.translog();
    String translogName = "translog-" + translogId;
    String recoverTranslogName = translogName + ".recovering";

    File recoveringTranslogFile = null;
    for (File translogLocation : translog.locations()) {
        File tmpRecoveringFile = new File(translogLocation, recoverTranslogName);
        if (!tmpRecoveringFile.exists()) {
            File tmpTranslogFile = new File(translogLocation, translogName);
            if (tmpTranslogFile.exists()) {
                for (int i = 0; i < 3; i++) {
                    if (tmpTranslogFile.renameTo(tmpRecoveringFile)) {
                        recoveringTranslogFile = tmpRecoveringFile;
                        break;
                    }
                }
            }
        } else {
            recoveringTranslogFile = tmpRecoveringFile;
            break;
        }
    }

    if (recoveringTranslogFile == null || !recoveringTranslogFile.exists()) {
        // no translog to recovery from, start and bail
        // no translog files, bail
        indexShard.postRecovery("post recovery from gateway, no translog");
        // no index, just start the shard and bail
        recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
        recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());
        return;
    }

    // recover from the translog file
    indexShard.performRecoveryPrepareForTranslog();
    recoveryStatus.start().time(System.currentTimeMillis() - recoveryStatus.start().startTime());
    recoveryStatus.start().checkIndexTime(indexShard.checkIndexTook());

    recoveryStatus.translog().startTime(System.currentTimeMillis());
    recoveryStatus.updateStage(RecoveryStatus.Stage.TRANSLOG);
    FileInputStream fs = null;
    try {
        fs = new FileInputStream(recoveringTranslogFile);
        InputStreamStreamInput si = new InputStreamStreamInput(fs);
        while (true) {
            Translog.Operation operation;
            try {
                int opSize = si.readInt();
                operation = TranslogStreams.readTranslogOperation(si);
            } catch (EOFException e) {
                // ignore, not properly written the last op
                break;
            } catch (IOException e) {
                // ignore, not properly written last op
                break;
            }
            try {
                indexShard.performRecoveryOperation(operation);
                recoveryStatus.translog().addTranslogOperations(1);
            } catch (ElasticsearchException e) {
                if (e.status() == RestStatus.BAD_REQUEST) {
                    // mainly for MapperParsingException and Failure to detect xcontent
                    logger.info("ignoring recovery of a corrupt translog entry", e);
                } else {
                    throw e;
                }
            }
        }
    } catch (Throwable e) {
        // we failed to recovery, make sure to delete the translog file (and keep the recovering one)
        indexShard.translog().closeWithDelete();
        throw new IndexShardGatewayRecoveryException(shardId, "failed to recover shard", e);
    } finally {
        try {
            fs.close();
        } catch (IOException e) {
            // ignore
        }
    }
    indexShard.performRecoveryFinalization(true);

    recoveringTranslogFile.delete();

    recoveryStatus.translog().time(System.currentTimeMillis() - recoveryStatus.translog().startTime());
}

From source file:org.elasticsearch.index.shard.StoreRecovery.java

License:Apache License

/**
 * Recovers the state of the shard from the store.
 *//*from   w w  w  .  j a va2 s  .c  o m*/
private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRecoveryException {
    final RecoveryState recoveryState = indexShard.recoveryState();
    final boolean indexShouldExists = recoveryState.getRecoverySource()
            .getType() != RecoverySource.Type.EMPTY_STORE;
    indexShard.prepareForIndexRecovery();
    long version = -1;
    SegmentInfos si = null;
    final Store store = indexShard.store();
    store.incRef();
    try {
        try {
            store.failIfCorrupted();
            try {
                si = store.readLastCommittedSegmentsInfo();
            } catch (Exception e) {
                String files = "_unknown_";
                try {
                    files = Arrays.toString(store.directory().listAll());
                } catch (Exception inner) {
                    inner.addSuppressed(e);
                    files += " (failure=" + ExceptionsHelper.detailedMessage(inner) + ")";
                }
                if (indexShouldExists) {
                    throw new IndexShardRecoveryException(shardId,
                            "shard allocated for local recovery (post api), should exist, but doesn't, current files: "
                                    + files,
                            e);
                }
            }
            if (si != null) {
                if (indexShouldExists) {
                    version = si.getVersion();
                } else {
                    // it exists on the directory, but shouldn't exist on the FS, its a leftover (possibly dangling)
                    // its a "new index create" API, we have to do something, so better to clean it than use same data
                    logger.trace("cleaning existing shard, shouldn't exists");
                    Lucene.cleanLuceneIndex(store.directory());
                    si = null;
                }
            }
        } catch (Exception e) {
            throw new IndexShardRecoveryException(shardId,
                    "failed to fetch index version after copying it over", e);
        }
        recoveryState.getIndex().updateVersion(version);
        if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
            assert indexShouldExists;
            indexShard.skipTranslogRecovery();
        } else {
            // since we recover from local, just fill the files and size
            try {
                final RecoveryState.Index index = recoveryState.getIndex();
                if (si != null) {
                    addRecoveredFileDetails(si, store, index);
                }
            } catch (IOException e) {
                logger.debug("failed to list file details", e);
            }
            indexShard.performTranslogRecovery(indexShouldExists);
        }
        indexShard.finalizeRecovery();
        indexShard.postRecovery("post recovery from shard_store");
    } catch (EngineException | IOException e) {
        throw new IndexShardRecoveryException(shardId, "failed to recovery from gateway", e);
    } finally {
        store.decRef();
    }
}

From source file:org.elasticsearch.index.shard.StoreRecoveryService.java

License:Apache License

/**
 * Recovers the state of the shard from the store.
 *//*from www  . java 2 s .c o m*/
private void recoverFromStore(IndexShard indexShard, boolean indexShouldExists, RecoveryState recoveryState)
        throws IndexShardRecoveryException {
    indexShard.prepareForIndexRecovery();
    long version = -1;
    final Map<String, Mapping> typesToUpdate;
    SegmentInfos si = null;
    final Store store = indexShard.store();
    store.incRef();
    try {
        try {
            store.failIfCorrupted();
            try {
                si = store.readLastCommittedSegmentsInfo();
            } catch (Throwable e) {
                String files = "_unknown_";
                try {
                    files = Arrays.toString(store.directory().listAll());
                } catch (Throwable e1) {
                    files += " (failure=" + ExceptionsHelper.detailedMessage(e1) + ")";
                }
                /*
                if (indexShouldExists) {
                throw new IndexShardRecoveryException(shardId(), "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
                }
                */
                indexShouldExists = false;
            }
            if (si != null) {
                version = si.getVersion();
                /*
                if (indexShouldExists) {
                version = si.getVersion();
                } else {
                // it exists on the directory, but shouldn't exist on the FS, its a leftover (possibly dangling)
                // its a "new index create" API, we have to do something, so better to clean it than use same data
                logger.trace("cleaning existing shard, shouldn't exists");
                IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
                writer.close();
                recoveryState.getTranslog().totalOperations(0);
                }
                */
            }
        } catch (Throwable e) {
            throw new IndexShardRecoveryException(shardId(),
                    "failed to fetch index version after copying it over", e);
        }
        recoveryState.getIndex().updateVersion(version);

        // since we recover from local, just fill the files and size
        try {
            final RecoveryState.Index index = recoveryState.getIndex();
            if (si != null) {
                final Directory directory = store.directory();
                for (String name : Lucene.files(si)) {
                    long length = directory.fileLength(name);
                    index.addFileDetail(name, length, true);
                }
            }
        } catch (IOException e) {
            logger.debug("failed to list file details", e);
        }
        if (indexShouldExists == false) {
            recoveryState.getTranslog().totalOperations(0);
            recoveryState.getTranslog().totalOperationsOnStart(0);
        }
        typesToUpdate = indexShard.performTranslogRecovery(indexShouldExists);

        indexShard.finalizeRecovery();
        String indexName = indexShard.shardId().index().name();
        for (Map.Entry<String, Mapping> entry : typesToUpdate.entrySet()) {
            validateMappingUpdate(indexName, entry.getKey(), entry.getValue());
        }
        indexShard.postRecovery("post recovery from shard_store");
    } catch (EngineException e) {
        throw new IndexShardRecoveryException(shardId, "failed to recovery from gateway", e);
    } finally {
        store.decRef();
    }
}