Example usage for org.apache.lucene.store IndexInput readBytes

List of usage examples for org.apache.lucene.store IndexInput readBytes

Introduction

In this page you can find the example usage for org.apache.lucene.store IndexInput readBytes.

Prototype

public void readBytes(byte[] b, int offset, int len, boolean useBuffer) throws IOException 

Source Link

Document

Reads a specified number of bytes into an array at the specified offset with control over whether the read should be buffered (callers who have their own buffer should pass in "false" for useBuffer).

Usage

From source file:com.browseengine.bobo.geosearch.solo.impl.IDGeoRecordSerializer.java

License:Apache License

@Override
public IDGeoRecord readGeoRecord(IndexInput input, int recordByteCount) throws IOException {
    long highOrder = input.readLong();
    int lowOrder = input.readInt();
    int countIdBytes = recordByteCount - INTERLACE_BYTES;
    byte[] id = new byte[countIdBytes];
    input.readBytes(id, 0, countIdBytes, false);
    return new IDGeoRecord(highOrder, lowOrder, id);
}

From source file:org.elasticsearch.common.compress.lzf.LZFCompressedIndexInput.java

License:Apache License

@Override
protected void readHeader(IndexInput in) throws IOException {
    byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
    in.readBytes(header, 0, header.length, false);
    if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
        throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
    }/*  ww  w. j a  va  2  s.  co m*/
}

From source file:org.elasticsearch.index.shard.recovery.RecoverySource.java

License:Apache License

private RecoveryResponse recover(final StartRecoveryRequest request) {
    final InternalIndexShard shard = (InternalIndexShard) indicesService
            .indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());
    logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(),
            request.shardId().id(), request.targetNode(), request.markAsRelocated());
    final RecoveryResponse response = new RecoveryResponse();
    shard.recover(new Engine.RecoveryHandler() {
        @Override//from ww w  .  j  ava 2  s. c  o  m
        public void phase1(final SnapshotIndexCommit snapshot) throws ElasticSearchException {
            long totalSize = 0;
            long existingTotalSize = 0;
            try {
                StopWatch stopWatch = new StopWatch().start();

                for (String name : snapshot.getFiles()) {
                    StoreFileMetaData md = shard.store().metaData(name);
                    boolean useExisting = false;
                    if (request.existingFiles().containsKey(name)) {
                        // we don't compute checksum for segments, so always recover them
                        if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                            response.phase1ExistingFileNames.add(name);
                            response.phase1ExistingFileSizes.add(md.length());
                            existingTotalSize += md.length();
                            useExisting = true;
                            if (logger.isTraceEnabled()) {
                                logger.trace(
                                        "[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]",
                                        request.shardId().index().name(), request.shardId().id(),
                                        request.targetNode(), name, md.checksum(), md.length());
                            }
                        }
                    }
                    if (!useExisting) {
                        if (request.existingFiles().containsKey(name)) {
                            logger.trace(
                                    "[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]",
                                    request.shardId().index().name(), request.shardId().id(),
                                    request.targetNode(), name, request.existingFiles().get(name), md);
                        } else {
                            logger.trace(
                                    "[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                                    request.shardId().index().name(), request.shardId().id(),
                                    request.targetNode(), name);
                        }
                        response.phase1FileNames.add(name);
                        response.phase1FileSizes.add(md.length());
                    }
                    totalSize += md.length();
                }
                response.phase1TotalSize = totalSize;
                response.phase1ExistingTotalSize = existingTotalSize;

                logger.trace(
                        "[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]",
                        request.shardId().index().name(), request.shardId().id(), request.targetNode(),
                        response.phase1FileNames.size(), new ByteSizeValue(totalSize),
                        response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(
                        request.shardId(), response.phase1FileNames, response.phase1FileSizes,
                        response.phase1ExistingFileNames, response.phase1ExistingFileSizes,
                        response.phase1TotalSize, response.phase1ExistingTotalSize);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO,
                        recoveryInfoFilesRequest, VoidTransportResponseHandler.INSTANCE_SAME).txGet();

                final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
                for (final String name : response.phase1FileNames) {
                    concurrentStreamPool.execute(new Runnable() {
                        @Override
                        public void run() {
                            IndexInput indexInput = null;
                            try {
                                final int BUFFER_SIZE = (int) fileChunkSize.bytes();
                                byte[] buf = new byte[BUFFER_SIZE];
                                StoreFileMetaData md = shard.store().metaData(name);
                                indexInput = snapshot.getDirectory().openInput(name);
                                long len = indexInput.length();
                                long readCount = 0;
                                while (readCount < len) {
                                    if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                        throw new IndexShardClosedException(shard.shardId());
                                    }
                                    int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount)
                                            : BUFFER_SIZE;
                                    long position = indexInput.getFilePointer();
                                    indexInput.readBytes(buf, 0, toRead, false);
                                    transportService.submitRequest(request.targetNode(),
                                            RecoveryTarget.Actions.FILE_CHUNK,
                                            new RecoveryFileChunkRequest(request.shardId(), name, position, len,
                                                    md.checksum(), buf, toRead),
                                            TransportRequestOptions.options().withCompress(compress)
                                                    .withLowType(),
                                            VoidTransportResponseHandler.INSTANCE_SAME).txGet();
                                    readCount += toRead;
                                }
                                indexInput.close();
                            } catch (Exception e) {
                                lastException.set(e);
                            } finally {
                                if (indexInput != null) {
                                    try {
                                        indexInput.close();
                                    } catch (IOException e) {
                                        // ignore
                                    }
                                }
                                latch.countDown();
                            }
                        }
                    });
                }

                latch.await();

                if (lastException.get() != null) {
                    throw lastException.get();
                }

                // now, set the clean files request
                Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
                        new RecoveryCleanFilesRequest(shard.shardId(), snapshotFiles),
                        VoidTransportResponseHandler.INSTANCE_SAME).txGet();

                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(),
                        request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase1Time = stopWatch.totalTime().millis();
            } catch (Throwable e) {
                throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(),
                        new ByteSizeValue(totalSize), e);
            }
        }

        @Override
        public void phase2(Translog.Snapshot snapshot) throws ElasticSearchException {
            if (shard.state() == IndexShardState.CLOSED) {
                throw new IndexShardClosedException(request.shardId());
            }
            logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations",
                    request.shardId().index().name(), request.shardId().id(), request.targetNode());
            StopWatch stopWatch = new StopWatch().start();

            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
                    new RecoveryPrepareForTranslogOperationsRequest(request.shardId()),
                    VoidTransportResponseHandler.INSTANCE_SAME).txGet();

            int totalOperations = sendSnapshot(snapshot);

            stopWatch.stop();
            logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());
            response.phase2Time = stopWatch.totalTime().millis();
            response.phase2Operations = totalOperations;
        }

        @Override
        public void phase3(Translog.Snapshot snapshot) throws ElasticSearchException {
            if (shard.state() == IndexShardState.CLOSED) {
                throw new IndexShardClosedException(request.shardId());
            }
            logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations",
                    request.shardId().index().name(), request.shardId().id(), request.targetNode());
            StopWatch stopWatch = new StopWatch().start();
            int totalOperations = sendSnapshot(snapshot);
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE,
                    new RecoveryFinalizeRecoveryRequest(request.shardId()),
                    VoidTransportResponseHandler.INSTANCE_SAME).txGet();
            if (request.markAsRelocated()) {
                // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
                try {
                    shard.relocated("to " + request.targetNode());
                } catch (IllegalIndexShardStateException e) {
                    // we can ignore this exception since, on the other node, when it moved to phase3
                    // it will also send shard started, which might cause the index shard we work against
                    // to move be closed by the time we get to the the relocated method
                }
            }
            stopWatch.stop();
            logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());
            response.phase3Time = stopWatch.totalTime().millis();
            response.phase3Operations = totalOperations;
        }

        private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticSearchException {
            int ops = 0;
            long size = 0;
            int totalOperations = 0;
            List<Translog.Operation> operations = Lists.newArrayList();
            while (snapshot.hasNext()) {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                Translog.Operation operation = snapshot.next();
                operations.add(operation);
                ops += 1;
                size += operation.estimateSize();
                totalOperations++;
                if (ops >= translogOps || size >= translogSize.bytes()) {
                    RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
                            request.shardId(), operations);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS,
                            translogOperationsRequest,
                            TransportRequestOptions.options().withCompress(compress).withLowType(),
                            VoidTransportResponseHandler.INSTANCE_SAME).txGet();
                    ops = 0;
                    size = 0;
                    operations.clear();
                }
            }
            // send the leftover
            if (!operations.isEmpty()) {
                RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
                        request.shardId(), operations);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS,
                        translogOperationsRequest,
                        TransportRequestOptions.options().withCompress(compress).withLowType(),
                        VoidTransportResponseHandler.INSTANCE_SAME).txGet();
            }
            return totalOperations;
        }
    });
    return response;
}

From source file:org.elasticsearch.indices.recovery.BlobRecoverySource.java

License:Apache License

private RecoveryResponse recover(final StartRecoveryRequest request) {
    final InternalIndexShard shard = (InternalIndexShard) indicesService
            .indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());

    // verify that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
    // the index operations will not be routed to it properly
    RoutingNode node = clusterService.state().readOnlyRoutingNodes().node(request.targetNode().id());
    if (node == null) {
        throw new DelayRecoveryException(
                "source node does not have the node [" + request.targetNode() + "] in its state yet..");
    }/*from w ww  .java 2s  .  c  o m*/
    ShardRouting targetShardRouting = null;
    for (ShardRouting shardRouting : node) {
        if (shardRouting.shardId().equals(request.shardId())) {
            targetShardRouting = shardRouting;
            break;
        }
    }
    if (targetShardRouting == null) {
        throw new DelayRecoveryException(
                "source node does not have the shard listed in its state as allocated on the node");
    }
    if (!targetShardRouting.initializing()) {
        throw new DelayRecoveryException("source node has the state of the target shard to be ["
                + targetShardRouting.state() + "], expecting to be [initializing]");
    }

    logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(),
            request.shardId().id(), request.targetNode(), request.markAsRelocated());
    final RecoveryResponse response = new RecoveryResponse();

    final BlobRecoveryHandler blobRecoveryHandler;

    if (BlobIndices.isBlobIndex(shard.shardId().getIndex())) {
        blobRecoveryHandler = new BlobRecoveryHandler(transportService, recoverySettings, blobTransferTarget,
                blobIndices, shard, request);
    } else {
        blobRecoveryHandler = null;
    }

    shard.recover(new Engine.RecoveryHandler() {
        @Override
        public void phase1(final SnapshotIndexCommit snapshot) throws ElasticsearchException {
            long totalSize = 0;
            long existingTotalSize = 0;
            try {
                if (blobRecoveryHandler != null) {
                    blobRecoveryHandler.phase1();
                }
                StopWatch stopWatch = new StopWatch().start();

                for (String name : snapshot.getFiles()) {
                    StoreFileMetaData md = shard.store().getMetadata().get(name);
                    boolean useExisting = false;
                    if (request.existingFiles().containsKey(name)) {
                        // we don't compute checksum for segments, so always recover them
                        if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                            response.phase1ExistingFileNames.add(name);
                            response.phase1ExistingFileSizes.add(md.length());
                            existingTotalSize += md.length();
                            useExisting = true;
                            if (logger.isTraceEnabled()) {
                                logger.trace(
                                        "[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]",
                                        request.shardId().index().name(), request.shardId().id(),
                                        request.targetNode(), name, md.checksum(), md.length());
                            }
                        }
                    }
                    if (!useExisting) {
                        if (request.existingFiles().containsKey(name)) {
                            logger.trace(
                                    "[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]",
                                    request.shardId().index().name(), request.shardId().id(),
                                    request.targetNode(), name, request.existingFiles().get(name), md);
                        } else {
                            logger.trace(
                                    "[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                                    request.shardId().index().name(), request.shardId().id(),
                                    request.targetNode(), name);
                        }
                        response.phase1FileNames.add(name);
                        response.phase1FileSizes.add(md.length());
                    }
                    totalSize += md.length();
                }
                response.phase1TotalSize = totalSize;
                response.phase1ExistingTotalSize = existingTotalSize;

                logger.trace(
                        "[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]",
                        request.shardId().index().name(), request.shardId().id(), request.targetNode(),
                        response.phase1FileNames.size(), new ByteSizeValue(totalSize),
                        response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(
                        request.recoveryId(), request.shardId(), response.phase1FileNames,
                        response.phase1FileSizes, response.phase1ExistingFileNames,
                        response.phase1ExistingFileSizes, response.phase1TotalSize,
                        response.phase1ExistingTotalSize);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO,
                        recoveryInfoFilesRequest,
                        TransportRequestOptions.options().withTimeout(internalActionTimeout),
                        EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
                for (final String name : response.phase1FileNames) {
                    recoverySettings.concurrentStreamPool().execute(new Runnable() {
                        @Override
                        public void run() {
                            IndexInput indexInput = null;
                            try {
                                final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                                byte[] buf = new byte[BUFFER_SIZE];
                                StoreFileMetaData md = shard.store().getMetadata().get(name);
                                // TODO: maybe use IOContext.READONCE?
                                indexInput = shard.store().directory().openInput(name, IOContext.READ);
                                boolean shouldCompressRequest = recoverySettings.compress();
                                if (CompressorFactory.isCompressed(indexInput)) {
                                    shouldCompressRequest = false;
                                }

                                long len = indexInput.length();
                                long readCount = 0;
                                while (readCount < len) {
                                    if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                        throw new IndexShardClosedException(shard.shardId());
                                    }
                                    int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount)
                                            : BUFFER_SIZE;
                                    long position = indexInput.getFilePointer();

                                    if (recoverySettings.rateLimiter() != null) {
                                        recoverySettings.rateLimiter().pause(toRead);
                                    }

                                    indexInput.readBytes(buf, 0, toRead, false);
                                    BytesArray content = new BytesArray(buf, 0, toRead);
                                    transportService.submitRequest(request.targetNode(),
                                            RecoveryTarget.Actions.FILE_CHUNK,
                                            new RecoveryFileChunkRequest(request.recoveryId(),
                                                    request.shardId(), md, position, content),
                                            TransportRequestOptions.options()
                                                    .withCompress(shouldCompressRequest)
                                                    .withType(TransportRequestOptions.Type.RECOVERY)
                                                    .withTimeout(internalActionTimeout),
                                            EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                                    readCount += toRead;
                                }
                            } catch (Exception e) {
                                lastException.set(e);
                            } finally {
                                if (indexInput != null) {
                                    try {
                                        indexInput.close();
                                    } catch (IOException e) {
                                        // ignore
                                    }
                                }
                                latch.countDown();
                            }
                        }
                    });
                }

                latch.await();

                if (lastException.get() != null) {
                    throw lastException.get();
                }

                // now, set the clean files request
                Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
                        new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), snapshotFiles),
                        TransportRequestOptions.options().withTimeout(internalActionTimeout),
                        EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(),
                        request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase1Time = stopWatch.totalTime().millis();
            } catch (Throwable e) {
                throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(),
                        new ByteSizeValue(totalSize), e);
            }
        }

        @Override
        public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
            if (shard.state() == IndexShardState.CLOSED) {
                throw new IndexShardClosedException(request.shardId());
            }
            logger.trace("[{}][{}] recovery [phase2] to {}: start", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode());
            StopWatch stopWatch = new StopWatch().start();
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
                    new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()),
                    TransportRequestOptions.options().withTimeout(internalActionTimeout),
                    EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
            stopWatch.stop();
            response.startTime = stopWatch.totalTime().millis();
            logger.trace("[{}][{}] recovery [phase2] to {}: start took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());

            logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations",
                    request.shardId().index().name(), request.shardId().id(), request.targetNode());
            stopWatch = new StopWatch().start();
            int totalOperations = sendSnapshot(snapshot);
            stopWatch.stop();
            logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());
            response.phase2Time = stopWatch.totalTime().millis();
            response.phase2Operations = totalOperations;
        }

        @Override
        public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
            if (shard.state() == IndexShardState.CLOSED) {
                throw new IndexShardClosedException(request.shardId());
            }
            logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations",
                    request.shardId().index().name(), request.shardId().id(), request.targetNode());
            StopWatch stopWatch = new StopWatch().start();
            int totalOperations = sendSnapshot(snapshot);
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE,
                    new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()),
                    TransportRequestOptions.options().withTimeout(internalActionLongTimeout),
                    EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
            if (request.markAsRelocated()) {
                // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
                try {
                    shard.relocated("to " + request.targetNode());
                } catch (IllegalIndexShardStateException e) {
                    // we can ignore this exception since, on the other node, when it moved to phase3
                    // it will also send shard started, which might cause the index shard we work against
                    // to move be closed by the time we get to the the relocated method
                }
            }
            stopWatch.stop();
            logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());
            response.phase3Time = stopWatch.totalTime().millis();
            response.phase3Operations = totalOperations;
        }

        private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
            int ops = 0;
            long size = 0;
            int totalOperations = 0;
            List<Translog.Operation> operations = Lists.newArrayList();
            while (snapshot.hasNext()) {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                Translog.Operation operation = snapshot.next();
                operations.add(operation);
                ops += 1;
                size += operation.estimateSize();
                totalOperations++;
                if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().bytes()) {

                    if (recoverySettings.rateLimiter() != null) {
                        recoverySettings.rateLimiter().pause(size);
                    }

                    RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
                            request.recoveryId(), request.shardId(), operations);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS,
                            translogOperationsRequest,
                            TransportRequestOptions.options().withCompress(recoverySettings.compress())
                                    .withType(TransportRequestOptions.Type.RECOVERY)
                                    .withTimeout(internalActionLongTimeout),
                            EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                    ops = 0;
                    size = 0;
                    operations.clear();
                }
            }
            // send the leftover
            if (!operations.isEmpty()) {
                RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
                        request.recoveryId(), request.shardId(), operations);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS,
                        translogOperationsRequest,
                        TransportRequestOptions.options().withCompress(recoverySettings.compress())
                                .withType(TransportRequestOptions.Type.RECOVERY)
                                .withTimeout(internalActionLongTimeout),
                        EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
            }
            return totalOperations;
        }
    });
    return response;
}

From source file:org.elasticsearch.indices.recovery.RecoverySource.java

License:Apache License

private RecoveryResponse recover(final StartRecoveryRequest request) {
    final InternalIndexShard shard = (InternalIndexShard) indicesService
            .indexServiceSafe(request.shardId().index().name()).shardSafe(request.shardId().id());

    // verify that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
    // the index operations will not be routed to it properly
    RoutingNode node = clusterService.state().readOnlyRoutingNodes().node(request.targetNode().id());
    if (node == null) {
        throw new DelayRecoveryException(
                "source node does not have the node [" + request.targetNode() + "] in its state yet..");
    }/*from   ww w.ja  v a 2s .c o  m*/
    ShardRouting targetShardRouting = null;
    for (ShardRouting shardRouting : node) {
        if (shardRouting.shardId().equals(request.shardId())) {
            targetShardRouting = shardRouting;
            break;
        }
    }
    if (targetShardRouting == null) {
        throw new DelayRecoveryException(
                "source node does not have the shard listed in its state as allocated on the node");
    }
    if (!targetShardRouting.initializing()) {
        throw new DelayRecoveryException("source node has the state of the target shard to be ["
                + targetShardRouting.state() + "], expecting to be [initializing]");
    }

    logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().index().name(),
            request.shardId().id(), request.targetNode(), request.markAsRelocated());
    final RecoveryResponse response = new RecoveryResponse();
    shard.recover(new Engine.RecoveryHandler() {
        @Override
        public void phase1(final SnapshotIndexCommit snapshot) throws ElasticsearchException {
            long totalSize = 0;
            long existingTotalSize = 0;
            try {
                StopWatch stopWatch = new StopWatch().start();

                for (String name : snapshot.getFiles()) {
                    StoreFileMetaData md = shard.store().metaData(name);
                    boolean useExisting = false;
                    if (request.existingFiles().containsKey(name)) {
                        // we don't compute checksum for segments, so always recover them
                        if (!name.startsWith("segments") && md.isSame(request.existingFiles().get(name))) {
                            response.phase1ExistingFileNames.add(name);
                            response.phase1ExistingFileSizes.add(md.length());
                            existingTotalSize += md.length();
                            useExisting = true;
                            if (logger.isTraceEnabled()) {
                                logger.trace(
                                        "[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]",
                                        request.shardId().index().name(), request.shardId().id(),
                                        request.targetNode(), name, md.checksum(), md.length());
                            }
                        }
                    }
                    if (!useExisting) {
                        if (request.existingFiles().containsKey(name)) {
                            logger.trace(
                                    "[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]",
                                    request.shardId().index().name(), request.shardId().id(),
                                    request.targetNode(), name, request.existingFiles().get(name), md);
                        } else {
                            logger.trace(
                                    "[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                                    request.shardId().index().name(), request.shardId().id(),
                                    request.targetNode(), name);
                        }
                        response.phase1FileNames.add(name);
                        response.phase1FileSizes.add(md.length());
                    }
                    totalSize += md.length();
                }
                response.phase1TotalSize = totalSize;
                response.phase1ExistingTotalSize = existingTotalSize;

                logger.trace(
                        "[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]",
                        request.shardId().index().name(), request.shardId().id(), request.targetNode(),
                        response.phase1FileNames.size(), new ByteSizeValue(totalSize),
                        response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));

                RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(
                        request.recoveryId(), request.shardId(), response.phase1FileNames,
                        response.phase1FileSizes, response.phase1ExistingFileNames,
                        response.phase1ExistingFileSizes, response.phase1TotalSize,
                        response.phase1ExistingTotalSize);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO,
                        recoveryInfoFilesRequest,
                        TransportRequestOptions.options().withTimeout(internalActionTimeout),
                        EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
                final AtomicReference<Throwable> lastException = new AtomicReference<Throwable>();
                int fileIndex = 0;
                for (final String name : response.phase1FileNames) {
                    ThreadPoolExecutor pool;
                    long fileSize = response.phase1FileSizes.get(fileIndex);
                    if (fileSize > recoverySettings.SMALL_FILE_CUTOFF_BYTES) {
                        pool = recoverySettings.concurrentStreamPool();
                    } else {
                        pool = recoverySettings.concurrentSmallFileStreamPool();
                    }

                    pool.execute(new Runnable() {
                        @Override
                        public void run() {
                            IndexInput indexInput = null;
                            try {
                                final int BUFFER_SIZE = (int) recoverySettings.fileChunkSize().bytes();
                                byte[] buf = new byte[BUFFER_SIZE];
                                StoreFileMetaData md = shard.store().metaData(name);
                                // TODO: maybe use IOContext.READONCE?
                                indexInput = shard.store().openInputRaw(name, IOContext.READ);
                                boolean shouldCompressRequest = recoverySettings.compress();
                                if (CompressorFactory.isCompressed(indexInput)) {
                                    shouldCompressRequest = false;
                                }

                                long len = indexInput.length();
                                long readCount = 0;
                                while (readCount < len) {
                                    if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
                                        throw new IndexShardClosedException(shard.shardId());
                                    }
                                    int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount)
                                            : BUFFER_SIZE;
                                    long position = indexInput.getFilePointer();

                                    if (recoverySettings.rateLimiter() != null) {
                                        recoverySettings.rateLimiter().pause(toRead);
                                    }

                                    indexInput.readBytes(buf, 0, toRead, false);
                                    BytesArray content = new BytesArray(buf, 0, toRead);
                                    transportService.submitRequest(request.targetNode(),
                                            RecoveryTarget.Actions.FILE_CHUNK,
                                            new RecoveryFileChunkRequest(request.recoveryId(),
                                                    request.shardId(), name, position, len, md.checksum(),
                                                    content),
                                            TransportRequestOptions.options()
                                                    .withCompress(shouldCompressRequest)
                                                    .withType(TransportRequestOptions.Type.RECOVERY)
                                                    .withTimeout(internalActionTimeout),
                                            EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                                    readCount += toRead;
                                }
                            } catch (Throwable e) {
                                lastException.set(e);
                            } finally {
                                IOUtils.closeWhileHandlingException(indexInput);
                                latch.countDown();
                            }
                        }
                    });
                    fileIndex++;
                }

                latch.await();

                if (lastException.get() != null) {
                    throw lastException.get();
                }

                // now, set the clean files request
                Set<String> snapshotFiles = Sets.newHashSet(snapshot.getFiles());
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES,
                        new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), snapshotFiles),
                        TransportRequestOptions.options().withTimeout(internalActionTimeout),
                        EmptyTransportResponseHandler.INSTANCE_SAME).txGet();

                stopWatch.stop();
                logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(),
                        request.shardId().id(), request.targetNode(), stopWatch.totalTime());
                response.phase1Time = stopWatch.totalTime().millis();
            } catch (Throwable e) {
                throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(),
                        new ByteSizeValue(totalSize), e);
            }
        }

        @Override
        public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException {
            if (shard.state() == IndexShardState.CLOSED) {
                throw new IndexShardClosedException(request.shardId());
            }
            logger.trace("[{}][{}] recovery [phase2] to {}: start", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode());
            StopWatch stopWatch = new StopWatch().start();
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
                    new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()),
                    TransportRequestOptions.options().withTimeout(internalActionTimeout),
                    EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
            stopWatch.stop();
            response.startTime = stopWatch.totalTime().millis();
            logger.trace("[{}][{}] recovery [phase2] to {}: start took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());

            logger.trace("[{}][{}] recovery [phase2] to {}: sending transaction log operations",
                    request.shardId().index().name(), request.shardId().id(), request.targetNode());
            stopWatch = new StopWatch().start();
            int totalOperations = sendSnapshot(snapshot);
            stopWatch.stop();
            logger.trace("[{}][{}] recovery [phase2] to {}: took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());
            response.phase2Time = stopWatch.totalTime().millis();
            response.phase2Operations = totalOperations;
        }

        @Override
        public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
            if (shard.state() == IndexShardState.CLOSED) {
                throw new IndexShardClosedException(request.shardId());
            }
            logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations",
                    request.shardId().index().name(), request.shardId().id(), request.targetNode());
            StopWatch stopWatch = new StopWatch().start();
            int totalOperations = sendSnapshot(snapshot);
            transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE,
                    new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()),
                    TransportRequestOptions.options().withTimeout(internalActionLongTimeout),
                    EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
            if (request.markAsRelocated()) {
                // TODO what happens if the recovery process fails afterwards, we need to mark this back to started
                try {
                    shard.relocated("to " + request.targetNode());
                } catch (IllegalIndexShardStateException e) {
                    // we can ignore this exception since, on the other node, when it moved to phase3
                    // it will also send shard started, which might cause the index shard we work against
                    // to move be closed by the time we get to the the relocated method
                }
            }
            stopWatch.stop();
            logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", request.shardId().index().name(),
                    request.shardId().id(), request.targetNode(), stopWatch.totalTime());
            response.phase3Time = stopWatch.totalTime().millis();
            response.phase3Operations = totalOperations;
        }

        private int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
            int ops = 0;
            long size = 0;
            int totalOperations = 0;
            List<Translog.Operation> operations = Lists.newArrayList();
            while (snapshot.hasNext()) {
                if (shard.state() == IndexShardState.CLOSED) {
                    throw new IndexShardClosedException(request.shardId());
                }
                Translog.Operation operation = snapshot.next();
                operations.add(operation);
                ops += 1;
                size += operation.estimateSize();
                totalOperations++;
                if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().bytes()) {

                    // don't throttle translog, since we lock for phase3 indexing, so we need to move it as
                    // fast as possible. Note, sine we index docs to replicas while the index files are recovered
                    // the lock can potentially be removed, in which case, it might make sense to re-enable
                    // throttling in this phase
                    //                        if (recoverySettings.rateLimiter() != null) {
                    //                            recoverySettings.rateLimiter().pause(size);
                    //                        }

                    RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
                            request.recoveryId(), request.shardId(), operations);
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS,
                            translogOperationsRequest,
                            TransportRequestOptions.options().withCompress(recoverySettings.compress())
                                    .withType(TransportRequestOptions.Type.RECOVERY)
                                    .withTimeout(internalActionLongTimeout),
                            EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                    ops = 0;
                    size = 0;
                    operations.clear();
                }
            }
            // send the leftover
            if (!operations.isEmpty()) {
                RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
                        request.recoveryId(), request.shardId(), operations);
                transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS,
                        translogOperationsRequest,
                        TransportRequestOptions.options().withCompress(recoverySettings.compress())
                                .withType(TransportRequestOptions.Type.RECOVERY)
                                .withTimeout(internalActionLongTimeout),
                        EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
            }
            return totalOperations;
        }
    });
    return response;
}

From source file:org.elasticsearch.util.lucene.Directories.java

License:Apache License

public static void copyFromDirectory(IndexInput ii, OutputStream os) throws IOException {
    final int BUFFER_SIZE = ii.length() < 16384 ? (int) ii.length() : 16384;
    byte[] buffer = new byte[BUFFER_SIZE];
    try {//from w ww .j  a  v a  2  s .  co  m
        long len = ii.length();
        long readCount = 0;
        while (readCount < len) {
            int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
            ii.readBytes(buffer, 0, toRead, false);
            readCount += toRead;
            os.write(buffer, 0, toRead);
        }
    } finally {
        if (os != null) {
            try {
                os.close();
            } catch (Exception e) {
                // ignore
            }
        }
        if (ii != null) {
            try {
                ii.close();
            } catch (Exception e) {
                // ignore
            }
        }
    }
}

From source file:org.sonatype.nexus.index.packer.DefaultIndexPacker.java

License:Open Source License

static void writeFile(String name, ZipOutputStream zos, Directory directory, byte[] buf) throws IOException {
    ZipEntry e = new ZipEntry(name);

    zos.putNextEntry(e);//  ww  w  .j av  a 2s .co  m

    IndexInput in = directory.openInput(name);

    try {
        int toRead = 0;

        int bytesLeft = (int) in.length();

        while (bytesLeft > 0) {
            toRead = (bytesLeft >= buf.length) ? buf.length : bytesLeft;
            bytesLeft -= toRead;

            in.readBytes(buf, 0, toRead, false);

            zos.write(buf, 0, toRead);
        }
    } finally {
        IndexUtils.close(in);
    }

    zos.flush();

    zos.closeEntry();
}