Example usage for org.apache.commons.lang3.concurrent LazyInitializer LazyInitializer

List of usage examples for org.apache.commons.lang3.concurrent LazyInitializer LazyInitializer

Introduction

In this page you can find the example usage for org.apache.commons.lang3.concurrent LazyInitializer LazyInitializer.

Prototype

LazyInitializer

Source Link

Usage

From source file:com.spotify.reaper.service.SegmentRunner.java

private void runRepair() {
    LOG.debug("Run repair for segment #{}", segmentId);
    final RepairSegment segment = context.storage.getRepairSegment(segmentId).get();
    try (JmxProxy coordinator = context.jmxConnectionFactory.connectAny(Optional.<RepairStatusHandler>of(this),
            potentialCoordinators)) {//from ww w. j a v  a  2s.  c om

        if (segmentRunners.containsKey(segmentId)) {
            LOG.error("SegmentRunner already exists for segment with ID: " + segmentId);
            throw new ReaperException("SegmentRunner already exists for segment with ID: " + segmentId);
        }
        segmentRunners.put(segmentId, this);

        RepairUnit repairUnit = context.storage.getRepairUnit(segment.getRepairUnitId()).get();
        String keyspace = repairUnit.getKeyspaceName();

        // If this segment is blocked by other repairs on the hosts involved, we will want to double-
        // check with storage, whether those hosts really should be busy with repairs. This listing of
        // busy hosts isn't a cheap operation, so only do it (once) when repairs block the segment.
        LazyInitializer<Set<String>> busyHosts = new LazyInitializer<Set<String>>() {
            @Override
            protected Set<String> initialize() {
                Collection<RepairParameters> ongoingRepairs = context.storage
                        .getOngoingRepairsInCluster(clusterName);
                Set<String> busyHosts = Sets.newHashSet();
                for (RepairParameters ongoingRepair : ongoingRepairs) {
                    busyHosts.addAll(coordinator.tokenRangeToEndpoint(ongoingRepair.keyspaceName,
                            ongoingRepair.tokenRange));
                }
                return busyHosts;
            }
        };
        if (!canRepair(segment, keyspace, coordinator, busyHosts)) {
            postponeCurrentSegment();
            return;
        }

        LOG.debug("Enter synchronized section with segment ID {}", segmentId);
        synchronized (condition) {
            commandId = coordinator.triggerRepair(segment.getStartToken(), segment.getEndToken(), keyspace,
                    validationParallelism, repairUnit.getColumnFamilies());

            if (commandId == 0) {
                // From cassandra source in "forceRepairAsync":
                //if (ranges.isEmpty() || Keyspace.open(keyspace).getReplicationStrategy().getReplicationFactor() < 2)
                //  return 0;
                LOG.info("Nothing to repair for keyspace {}", keyspace);
                context.storage.updateRepairSegment(segment.with().coordinatorHost(coordinator.getHost())
                        .state(RepairSegment.State.DONE).build(segmentId));
                segmentRunners.remove(segment.getId());
                return;
            }

            LOG.debug("Triggered repair with command id {}", commandId);
            context.storage.updateRepairSegment(segment.with().coordinatorHost(coordinator.getHost())
                    .repairCommandId(commandId).build(segmentId));
            String eventMsg = String.format("Triggered repair of segment %d via host %s", segment.getId(),
                    coordinator.getHost());
            repairRunner.updateLastEvent(eventMsg);
            LOG.info("Repair for segment {} started, status wait will timeout in {} millis", segmentId,
                    timeoutMillis);
            try {
                condition.await(timeoutMillis, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e) {
                LOG.warn("Repair command {} on segment {} interrupted", commandId, segmentId);
            } finally {
                RepairSegment resultingSegment = context.storage.getRepairSegment(segmentId).get();
                LOG.info("Repair command {} on segment {} returned with state {}", commandId, segmentId,
                        resultingSegment.getState());
                if (resultingSegment.getState() == RepairSegment.State.RUNNING) {
                    LOG.info("Repair command {} on segment {} has been cancelled while running", commandId,
                            segmentId);
                    abort(resultingSegment, coordinator);
                } else if (resultingSegment.getState() == RepairSegment.State.DONE) {
                    LOG.debug("Repair segment with id '{}' was repaired in {} seconds",
                            resultingSegment.getId(), Seconds.secondsBetween(resultingSegment.getStartTime(),
                                    resultingSegment.getEndTime()).getSeconds());
                    segmentRunners.remove(resultingSegment.getId());
                }
            }
        }
    } catch (ReaperException e) {
        LOG.warn("Failed to connect to a coordinator node for segment {}", segmentId);
        String msg = "Postponed a segment because no coordinator was reachable";
        repairRunner.updateLastEvent(msg);
        postponeCurrentSegment();
        LOG.warn("Open files amount for process: " + getOpenFilesAmount());
    }
    LOG.debug("Exiting synchronized section with segment ID {}", segmentId);
}

From source file:org.knime.ext.textprocessing.nodes.misc.markuptagfilter.MarkupTagFilterCellFactory.java

/**
 * Creates instance of {@code MarkupTagFilterCellFactory}
 *
 * @param colIndextoFilter The indices of the columns with the Strings to filter.
 * @param newColSpecs The specs of the new columns (replaced or appended).
 * @param tokenizerName The name of the tokenizer used to retokenize filtered documents.
 *///  w  ww .ja  v a 2 s  .c  o  m
MarkupTagFilterCellFactory(final int[] colIndexToFilter, final DataColumnSpec[] newColSpecs,
        final String tokenizerName) {
    super(newColSpecs);
    m_tokenizerName = tokenizerName;

    m_colIndexToFilter = colIndexToFilter;

    m_cacheInitializer = new LazyInitializer<DataCellCache>() {
        @Override
        protected DataCellCache initialize() throws ConcurrentException {
            return initializeDataCellCache();
        }
    };

}

From source file:org.knime.ext.textprocessing.nodes.transformation.documentdataassigner.DocumentDataAssignerCellFactory.java

/**
 * @param conf The {@code DocumentDataAssignerConfig} that contains information about how to build new Documents.
 * @param dataColumnSpec The {@code DataColumnSpec} containing the information of the new column.
 *//*  w  w  w  . j ava 2s.c o  m*/
public DocumentDataAssignerCellFactory(final DocumentDataAssignerConfig conf,
        final DataColumnSpec[] dataColumnSpec) {
    super(dataColumnSpec);

    m_conf = conf;
    this.setParallelProcessing(true);
    m_cacheInitializer = new LazyInitializer<DataCellCache>() {

        @Override
        protected DataCellCache initialize() throws ConcurrentException {
            return initializeDataCellCache();
        }
    };
}

From source file:org.knime.ext.textprocessing.nodes.transformation.stringstodocument.StringsToDocumentCellFactory.java

/**
 * Creates new instance of <code>StringsToDocumentCellFactory</code> with given configuration.
 *
 * @param config The configuration how to build a document.
 * @param newColSpecs The specs of the new columns that are created.
 * @param numberOfThreads The number of parallel threads to use.
 * @param tokenizerName The tokenizer used for wordTokenization.
 * @throws IllegalArgumentException If given configuration is <code>null</code>.
 * @since 3.3//w w  w . j a  v  a  2s . com
 */
public StringsToDocumentCellFactory(final StringsToDocumentConfig config, final DataColumnSpec[] newColSpecs,
        final int numberOfThreads, final String tokenizerName) throws IllegalArgumentException {
    super(newColSpecs);

    this.setParallelProcessing(true, numberOfThreads, 10 * numberOfThreads);

    if (config == null) {
        throw new IllegalArgumentException("Configuration object may not be null!");
    }
    m_cacheInitializer = new LazyInitializer<DataCellCache>() {
        @Override
        protected DataCellCache initialize() throws ConcurrentException {
            DataCellCache dataCellCache = initializeDataCellCache();
            m_cacheCreated = true;
            return dataCellCache;
        }
    };
    m_config = config;
    m_tokenizerName = tokenizerName;
}

From source file:org.knime.ext.textprocessing.nodes.transformation.stringstodocument.StringsToDocumentCellFactory2.java

/**
 * Creates new instance of {@code StringsToDocumentCellFactory2} with given configuration.
 *
 * @param config The configuration how to build a document.
 * @param newColSpecs The specs of the new columns that are created.
 * @param numberOfThreads The number of parallel threads to use.
 * @param tokenizerName The tokenizer used for word tokenization.
 * @throws IllegalArgumentException If given configuration is {@code null}.
 *//*ww  w .  j a  va2 s. c  om*/
public StringsToDocumentCellFactory2(final StringsToDocumentConfig2 config, final DataColumnSpec[] newColSpecs,
        final int numberOfThreads, final String tokenizerName) throws IllegalArgumentException {
    super(newColSpecs);

    this.setParallelProcessing(true, numberOfThreads, 10 * numberOfThreads);

    if (config == null) {
        throw new IllegalArgumentException("Configuration object may not be null!");
    }
    m_cacheInitializer = new LazyInitializer<DataCellCache>() {
        @Override
        protected DataCellCache initialize() throws ConcurrentException {
            DataCellCache dataCellCache = initializeDataCellCache();
            m_cacheCreated = true;
            return dataCellCache;
        }
    };
    m_config = config;
    m_tokenizerName = tokenizerName;
}