Example usage for org.apache.commons.lang.time StopWatch StopWatch

List of usage examples for org.apache.commons.lang.time StopWatch StopWatch

Introduction

In this page you can find the example usage for org.apache.commons.lang.time StopWatch StopWatch.

Prototype

public StopWatch() 

Source Link

Document

Constructor.

Usage

From source file:org.apache.accumulo.examples.wikisearch.logic.AbstractQueryLogic.java

public Results runQuery(Connector connector, List<String> authorizations, String query, Date beginDate,
        Date endDate, Set<String> types) {

    if (StringUtils.isEmpty(query)) {
        throw new IllegalArgumentException(
                "NULL QueryNode reference passed to " + this.getClass().getSimpleName());
    }/* w w  w . j  ava2  s. c om*/

    Set<Range> ranges = new HashSet<Range>();
    Set<String> typeFilter = types;
    String array[] = authorizations.toArray(new String[0]);
    Authorizations auths = new Authorizations(array);
    Results results = new Results();

    // Get the query string
    String queryString = query;

    StopWatch abstractQueryLogic = new StopWatch();
    StopWatch optimizedQuery = new StopWatch();
    StopWatch queryGlobalIndex = new StopWatch();
    StopWatch optimizedEventQuery = new StopWatch();
    StopWatch fullScanQuery = new StopWatch();
    StopWatch processResults = new StopWatch();

    abstractQueryLogic.start();

    StopWatch parseQuery = new StopWatch();
    parseQuery.start();

    QueryParser parser;
    try {
        if (log.isDebugEnabled()) {
            log.debug("ShardQueryLogic calling QueryParser.execute");
        }
        parser = new QueryParser();
        parser.execute(queryString);
    } catch (org.apache.commons.jexl2.parser.ParseException e1) {
        throw new IllegalArgumentException("Error parsing query", e1);
    }
    int hash = parser.getHashValue();
    parseQuery.stop();
    if (log.isDebugEnabled()) {
        log.debug(hash + " Query: " + queryString);
    }

    Set<String> fields = new HashSet<String>();
    for (String f : parser.getQueryIdentifiers()) {
        fields.add(f);
    }
    if (log.isDebugEnabled()) {
        log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
    }
    // Remove any negated fields from the fields list, we don't want to lookup negated fields
    // in the index.
    fields.removeAll(parser.getNegatedTermsForOptimizer());

    if (log.isDebugEnabled()) {
        log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
    }
    // Get the mapping of field name to QueryTerm object from the query. The query term object
    // contains the operator, whether its negated or not, and the literal to test against.
    Multimap<String, QueryTerm> terms = parser.getQueryTerms();

    // Find out which terms are indexed
    // TODO: Should we cache indexed terms or does that not make sense since we are always
    // loading data.
    StopWatch queryMetadata = new StopWatch();
    queryMetadata.start();
    Map<String, Multimap<String, Class<? extends Normalizer>>> metadataResults;
    try {
        metadataResults = findIndexedTerms(connector, auths, fields, typeFilter);
    } catch (Exception e1) {
        throw new RuntimeException("Error in metadata lookup", e1);
    }

    // Create a map of indexed term to set of normalizers for it
    Multimap<String, Normalizer> indexedTerms = HashMultimap.create();
    for (Entry<String, Multimap<String, Class<? extends Normalizer>>> entry : metadataResults.entrySet()) {
        // Get the normalizer from the normalizer cache
        for (Class<? extends Normalizer> clazz : entry.getValue().values()) {
            indexedTerms.put(entry.getKey(), normalizerCacheMap.get(clazz));
        }
    }
    queryMetadata.stop();
    if (log.isDebugEnabled()) {
        log.debug(hash + " Indexed Terms: " + indexedTerms.toString());
    }

    Set<String> orTerms = parser.getOrTermsForOptimizer();

    // Iterate over the query terms to get the operators specified in the query.
    ArrayList<String> unevaluatedExpressions = new ArrayList<String>();
    boolean unsupportedOperatorSpecified = false;
    for (Entry<String, QueryTerm> entry : terms.entries()) {
        if (null == entry.getValue()) {
            continue;
        }

        if (null != this.unevaluatedFields && this.unevaluatedFields.contains(entry.getKey().trim())) {
            unevaluatedExpressions.add(entry.getKey().trim() + " " + entry.getValue().getOperator() + " "
                    + entry.getValue().getValue());
        }

        int operator = JexlOperatorConstants.getJJTNodeType(entry.getValue().getOperator());
        if (!(operator == ParserTreeConstants.JJTEQNODE || operator == ParserTreeConstants.JJTNENODE
                || operator == ParserTreeConstants.JJTLENODE || operator == ParserTreeConstants.JJTLTNODE
                || operator == ParserTreeConstants.JJTGENODE || operator == ParserTreeConstants.JJTGTNODE
                || operator == ParserTreeConstants.JJTERNODE)) {
            unsupportedOperatorSpecified = true;
            break;
        }
    }
    if (null != unevaluatedExpressions)
        unevaluatedExpressions.trimToSize();
    if (log.isDebugEnabled()) {
        log.debug(hash + " unsupportedOperators: " + unsupportedOperatorSpecified + " indexedTerms: "
                + indexedTerms.toString() + " orTerms: " + orTerms.toString() + " unevaluatedExpressions: "
                + unevaluatedExpressions.toString());
    }

    // We can use the intersecting iterator over the field index as an optimization under the
    // following conditions
    //
    // 1. No unsupported operators in the query.
    // 2. No 'or' operators and at least one term indexed
    // or
    // 1. No unsupported operators in the query.
    // 2. and all terms indexed
    // or
    // 1. All or'd terms are indexed. NOTE, this will potentially skip some queries and push to a full table scan
    // // WE should look into finding a better way to handle whether we do an optimized query or not.
    boolean optimizationSucceeded = false;
    boolean orsAllIndexed = false;
    if (orTerms.isEmpty()) {
        orsAllIndexed = false;
    } else {
        orsAllIndexed = indexedTerms.keySet().containsAll(orTerms);
    }

    if (log.isDebugEnabled()) {
        log.debug("All or terms are indexed");
    }

    if (!unsupportedOperatorSpecified && (((null == orTerms || orTerms.isEmpty()) && indexedTerms.size() > 0)
            || (fields.size() > 0 && indexedTerms.size() == fields.size()) || orsAllIndexed)) {
        optimizedQuery.start();
        // Set up intersecting iterator over field index.

        // Get information from the global index for the indexed terms. The results object will contain the term
        // mapped to an object that contains the total count, and partitions where this term is located.

        // TODO: Should we cache indexed term information or does that not make sense since we are always loading data
        queryGlobalIndex.start();
        IndexRanges termIndexInfo;
        try {
            // If fields is null or zero, then it's probably the case that the user entered a value
            // to search for with no fields. Check for the value in index.
            if (fields.isEmpty()) {
                termIndexInfo = this.getTermIndexInformation(connector, auths, queryString, typeFilter);
                if (null != termIndexInfo && termIndexInfo.getRanges().isEmpty()) {
                    // Then we didn't find anything in the index for this query. This may happen for an indexed term that has wildcards
                    // in unhandled locations.
                    // Break out of here by throwing a named exception and do full scan
                    throw new DoNotPerformOptimizedQueryException();
                }
                // We need to rewrite the query string here so that it's valid.
                if (termIndexInfo instanceof UnionIndexRanges) {
                    UnionIndexRanges union = (UnionIndexRanges) termIndexInfo;
                    StringBuilder buf = new StringBuilder();
                    String sep = "";
                    for (String fieldName : union.getFieldNamesAndValues().keySet()) {
                        buf.append(sep).append(fieldName).append(" == ");
                        if (!(queryString.startsWith("'") && queryString.endsWith("'"))) {
                            buf.append("'").append(queryString).append("'");
                        } else {
                            buf.append(queryString);
                        }
                        sep = " or ";
                    }
                    if (log.isDebugEnabled()) {
                        log.debug("Rewrote query for non-fielded single term query: " + queryString + " to "
                                + buf.toString());
                    }
                    queryString = buf.toString();
                } else {
                    throw new RuntimeException("Unexpected IndexRanges implementation");
                }
            } else {
                RangeCalculator calc = this.getTermIndexInformation(connector, auths, indexedTerms, terms,
                        this.getIndexTableName(), this.getReverseIndexTableName(), queryString,
                        this.queryThreads, typeFilter);
                if (null == calc.getResult() || calc.getResult().isEmpty()) {
                    // Then we didn't find anything in the index for this query. This may happen for an indexed term that has wildcards
                    // in unhandled locations.
                    // Break out of here by throwing a named exception and do full scan
                    throw new DoNotPerformOptimizedQueryException();
                }
                termIndexInfo = new UnionIndexRanges();
                termIndexInfo.setIndexValuesToOriginalValues(calc.getIndexValues());
                termIndexInfo.setFieldNamesAndValues(calc.getIndexEntries());
                termIndexInfo.getTermCardinality().putAll(calc.getTermCardinalities());
                for (Range r : calc.getResult()) {
                    // foo is a placeholder and is ignored.
                    termIndexInfo.add("foo", r);
                }
            }
        } catch (TableNotFoundException e) {
            log.error(this.getIndexTableName() + "not found", e);
            throw new RuntimeException(this.getIndexTableName() + "not found", e);
        } catch (org.apache.commons.jexl2.parser.ParseException e) {
            throw new RuntimeException("Error determining ranges for query: " + queryString, e);
        } catch (DoNotPerformOptimizedQueryException e) {
            log.info("Indexed fields not found in index, performing full scan");
            termIndexInfo = null;
        }
        queryGlobalIndex.stop();

        // Determine if we should proceed with optimized query based on results from the global index
        boolean proceed = false;
        if (null == termIndexInfo || termIndexInfo.getFieldNamesAndValues().values().size() == 0) {
            proceed = false;
        } else if (null != orTerms && orTerms.size() > 0
                && (termIndexInfo.getFieldNamesAndValues().values().size() == indexedTerms.size())) {
            proceed = true;
        } else if (termIndexInfo.getFieldNamesAndValues().values().size() > 0) {
            proceed = true;
        } else if (orsAllIndexed) {
            proceed = true;
        } else {
            proceed = false;
        }
        if (log.isDebugEnabled()) {
            log.debug("Proceed with optimized query: " + proceed);
            if (null != termIndexInfo)
                log.debug("termIndexInfo.getTermsFound().size(): "
                        + termIndexInfo.getFieldNamesAndValues().values().size() + " indexedTerms.size: "
                        + indexedTerms.size() + " fields.size: " + fields.size());
        }
        if (proceed) {

            if (log.isDebugEnabled()) {
                log.debug(hash + " Performing optimized query");
            }
            // Use the scan ranges from the GlobalIndexRanges object as the ranges for the batch scanner
            ranges = termIndexInfo.getRanges();
            if (log.isDebugEnabled()) {
                log.info(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
            }

            // Create BatchScanner, set the ranges, and setup the iterators.
            optimizedEventQuery.start();
            BatchScanner bs = null;
            try {
                bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
                bs.setRanges(ranges);
                IteratorSetting si = new IteratorSetting(21, "eval", OptimizedQueryIterator.class);

                if (log.isDebugEnabled()) {
                    log.debug("Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
                }
                // Set the query option
                si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
                // Set the Indexed Terms List option. This is the field name and normalized field value pair separated
                // by a comma.
                StringBuilder buf = new StringBuilder();
                String sep = "";
                for (Entry<String, String> entry : termIndexInfo.getFieldNamesAndValues().entries()) {
                    buf.append(sep);
                    buf.append(entry.getKey());
                    buf.append(":");
                    buf.append(termIndexInfo.getIndexValuesToOriginalValues().get(entry.getValue()));
                    buf.append(":");
                    buf.append(entry.getValue());
                    if (sep.equals("")) {
                        sep = ";";
                    }
                }
                if (log.isDebugEnabled()) {
                    log.debug("Setting scan option: " + FieldIndexQueryReWriter.INDEXED_TERMS_LIST + " to "
                            + buf.toString());
                }
                FieldIndexQueryReWriter rewriter = new FieldIndexQueryReWriter();
                String q = "";
                try {
                    q = queryString;
                    q = rewriter.applyCaseSensitivity(q, true, false);// Set upper/lower case for fieldname/fieldvalue
                    Map<String, String> opts = new HashMap<String, String>();
                    opts.put(FieldIndexQueryReWriter.INDEXED_TERMS_LIST, buf.toString());
                    q = rewriter.removeNonIndexedTermsAndInvalidRanges(q, opts);
                    q = rewriter.applyNormalizedTerms(q, opts);
                    if (log.isDebugEnabled()) {
                        log.debug("runServerQuery, FieldIndex Query: " + q);
                    }
                } catch (org.apache.commons.jexl2.parser.ParseException ex) {
                    log.error("Could not parse query, Jexl ParseException: " + ex);
                } catch (Exception ex) {
                    log.error("Problem rewriting query, Exception: " + ex.getMessage());
                }
                si.addOption(BooleanLogicIterator.FIELD_INDEX_QUERY, q);

                // Set the term cardinality option
                sep = "";
                buf.delete(0, buf.length());
                for (Entry<String, Long> entry : termIndexInfo.getTermCardinality().entrySet()) {
                    buf.append(sep);
                    buf.append(entry.getKey());
                    buf.append(":");
                    buf.append(entry.getValue());
                    sep = ",";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting scan option: " + BooleanLogicIterator.TERM_CARDINALITIES + " to "
                            + buf.toString());
                si.addOption(BooleanLogicIterator.TERM_CARDINALITIES, buf.toString());
                if (this.useReadAheadIterator) {
                    if (log.isDebugEnabled()) {
                        log.debug("Enabling read ahead iterator with queue size: " + this.readAheadQueueSize
                                + " and timeout: " + this.readAheadTimeOut);
                    }
                    si.addOption(ReadAheadIterator.QUEUE_SIZE, this.readAheadQueueSize);
                    si.addOption(ReadAheadIterator.TIMEOUT, this.readAheadTimeOut);

                }

                if (null != unevaluatedExpressions) {
                    StringBuilder unevaluatedExpressionList = new StringBuilder();
                    String sep2 = "";
                    for (String exp : unevaluatedExpressions) {
                        unevaluatedExpressionList.append(sep2).append(exp);
                        sep2 = ",";
                    }
                    if (log.isDebugEnabled())
                        log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
                                + unevaluatedExpressionList.toString());
                    si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS,
                            unevaluatedExpressionList.toString());
                }

                bs.addScanIterator(si);

                processResults.start();
                processResults.suspend();
                long count = 0;
                for (Entry<Key, Value> entry : bs) {
                    count++;
                    // The key that is returned by the EvaluatingIterator is not the same key that is in
                    // the table. The value that is returned by the EvaluatingIterator is a kryo
                    // serialized EventFields object.
                    processResults.resume();
                    Document d = this.createDocument(entry.getKey(), entry.getValue());
                    results.getResults().add(d);
                    processResults.suspend();
                }
                log.info(count + " matching entries found in optimized query.");
                optimizationSucceeded = true;
                processResults.stop();
            } catch (TableNotFoundException e) {
                log.error(this.getTableName() + "not found", e);
                throw new RuntimeException(this.getIndexTableName() + "not found", e);
            } finally {
                if (bs != null) {
                    bs.close();
                }
            }
            optimizedEventQuery.stop();
        }
        optimizedQuery.stop();
    }

    // WE should look into finding a better way to handle whether we do an optimized query or not.
    // We are not setting up an else condition here because we may have aborted the logic early in the if statement.
    if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0)
            && (indexedTerms.size() != fields.size()) && !orsAllIndexed)) {
        // if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0) && (indexedTerms.size() != fields.size()))) {
        fullScanQuery.start();
        if (log.isDebugEnabled()) {
            log.debug(hash + " Performing full scan query");
        }

        // Set up a full scan using the date ranges from the query
        // Create BatchScanner, set the ranges, and setup the iterators.
        BatchScanner bs = null;
        try {
            // The ranges are the start and end dates
            Collection<Range> r = getFullScanRange(beginDate, endDate, terms);
            ranges.addAll(r);

            if (log.isDebugEnabled()) {
                log.debug(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
            }

            bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
            bs.setRanges(ranges);
            IteratorSetting si = new IteratorSetting(22, "eval", EvaluatingIterator.class);
            // Create datatype regex if needed
            if (null != typeFilter) {
                StringBuilder buf = new StringBuilder();
                String s = "";
                for (String type : typeFilter) {
                    buf.append(s).append(type).append(".*");
                    s = "|";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting colf regex iterator to: " + buf.toString());
                IteratorSetting ri = new IteratorSetting(21, "typeFilter", RegExFilter.class);
                RegExFilter.setRegexs(ri, null, buf.toString(), null, null, false);
                bs.addScanIterator(ri);
            }
            if (log.isDebugEnabled()) {
                log.debug("Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
            }
            si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
            if (null != unevaluatedExpressions) {
                StringBuilder unevaluatedExpressionList = new StringBuilder();
                String sep2 = "";
                for (String exp : unevaluatedExpressions) {
                    unevaluatedExpressionList.append(sep2).append(exp);
                    sep2 = ",";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
                            + unevaluatedExpressionList.toString());
                si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS, unevaluatedExpressionList.toString());
            }
            bs.addScanIterator(si);
            long count = 0;
            processResults.start();
            processResults.suspend();
            for (Entry<Key, Value> entry : bs) {
                count++;
                // The key that is returned by the EvaluatingIterator is not the same key that is in
                // the partition table. The value that is returned by the EvaluatingIterator is a kryo
                // serialized EventFields object.
                processResults.resume();
                Document d = this.createDocument(entry.getKey(), entry.getValue());
                results.getResults().add(d);
                processResults.suspend();
            }
            processResults.stop();
            log.info(count + " matching entries found in full scan query.");
        } catch (TableNotFoundException e) {
            log.error(this.getTableName() + "not found", e);
        } finally {
            if (bs != null) {
                bs.close();
            }
        }
        fullScanQuery.stop();
    }

    log.info("AbstractQueryLogic: " + queryString + " " + timeString(abstractQueryLogic.getTime()));
    log.info("  1) parse query " + timeString(parseQuery.getTime()));
    log.info("  2) query metadata " + timeString(queryMetadata.getTime()));
    log.info("  3) full scan query " + timeString(fullScanQuery.getTime()));
    log.info("  3) optimized query " + timeString(optimizedQuery.getTime()));
    log.info("  1) process results " + timeString(processResults.getTime()));
    log.info("      1) query global index " + timeString(queryGlobalIndex.getTime()));
    log.info(hash + " Query completed.");

    return results;
}

From source file:org.apache.archiva.common.filelock.DefaultFileLockManager.java

@Override
public Lock readFileLock(File file) throws FileLockException, FileLockTimeoutException {
    if (skipLocking) {
        return new Lock(file);

    }/*from ww w  .  j  a  va2  s .  c  om*/
    StopWatch stopWatch = new StopWatch();
    boolean acquired = false;
    mkdirs(file.getParentFile());

    Lock lock = null;

    stopWatch.start();

    while (!acquired) {

        if (timeout > 0) {
            long delta = stopWatch.getTime();
            log.debug("delta {}, timeout {}", delta, timeout);
            if (delta > timeout) {
                log.warn("Cannot acquire read lock within {} millis. Will skip the file: {}", timeout, file);
                // we could not get the lock within the timeout period, so  throw  FileLockTimeoutException
                throw new FileLockTimeoutException();
            }
        }

        Lock current = lockFiles.get(file);

        if (current != null) {
            log.debug("read lock file exist continue wait");
            continue;
        }

        try {
            lock = new Lock(file, false);
            createNewFileQuietly(file);
            lock.openLock(false, timeout > 0);
            acquired = true;
        } catch (FileNotFoundException e) {
            // can happen if an other thread has deleted the file
            // close RandomAccessFile!!!
            if (lock != null) {
                closeQuietly(lock.getRandomAccessFile());
            }
            log.debug("read Lock skip: {} try to create file", e.getMessage());
            createNewFileQuietly(file);
        } catch (IOException e) {
            throw new FileLockException(e.getMessage(), e);
        } catch (IllegalStateException e) {
            log.debug("openLock {}:{}", e.getClass(), e.getMessage());
        }
    }
    Lock current = lockFiles.putIfAbsent(file, lock);
    if (current != null) {
        lock = current;
    }
    return lock;

}

From source file:org.apache.archiva.common.filelock.DefaultFileLockManager.java

@Override
public Lock writeFileLock(File file) throws FileLockException, FileLockTimeoutException {
    if (skipLocking) {
        return new Lock(file);
    }/*from www  .  j a  va  2 s.  c  o  m*/

    mkdirs(file.getParentFile());

    StopWatch stopWatch = new StopWatch();
    boolean acquired = false;

    Lock lock = null;

    stopWatch.start();

    while (!acquired) {

        if (timeout > 0) {
            long delta = stopWatch.getTime();
            log.debug("delta {}, timeout {}", delta, timeout);
            if (delta > timeout) {
                log.warn("Cannot acquire read lock within {} millis. Will skip the file: {}", timeout, file);
                // we could not get the lock within the timeout period, so throw FileLockTimeoutException
                throw new FileLockTimeoutException();
            }
        }

        Lock current = lockFiles.get(file);

        try {

            if (current != null) {
                log.debug("write lock file exist continue wait");

                continue;
            }
            lock = new Lock(file, true);
            createNewFileQuietly(file);
            lock.openLock(true, timeout > 0);
            acquired = true;
        } catch (FileNotFoundException e) {
            // can happen if an other thread has deleted the file
            // close RandomAccessFile!!!
            if (lock != null) {
                closeQuietly(lock.getRandomAccessFile());
            }
            log.debug("write Lock skip: {} try to create file", e.getMessage());
            createNewFileQuietly(file);
        } catch (IOException e) {
            throw new FileLockException(e.getMessage(), e);
        } catch (IllegalStateException e) {
            log.debug("openLock {}:{}", e.getClass(), e.getMessage());
        }
    }

    Lock current = lockFiles.putIfAbsent(file, lock);
    if (current != null) {
        lock = current;
    }

    return lock;

}

From source file:org.apache.archiva.indexer.maven.merger.DefaultIndexMerger.java

@Override
public IndexingContext buildMergedIndex(IndexMergerRequest indexMergerRequest) throws IndexMergerException {
    String groupId = indexMergerRequest.getGroupId();

    if (runningGroups.contains(groupId)) {
        log.info("skip build merge remote indexes for id: '{}' as already running", groupId);
        return null;
    }//w ww .j av a2s  .  c om

    runningGroups.add(groupId);

    StopWatch stopWatch = new StopWatch();
    stopWatch.reset();
    stopWatch.start();

    Path mergedIndexDirectory = indexMergerRequest.getMergedIndexDirectory();

    String tempRepoId = mergedIndexDirectory.getFileName().toString();

    try {
        Path indexLocation = mergedIndexDirectory.resolve(indexMergerRequest.getMergedIndexPath());

        List<IndexingContext> members = indexMergerRequest.getRepositoriesIds().stream()
                .map(id -> repositoryRegistry.getRepository(id))
                .filter(repo -> repo.getType().equals(RepositoryType.MAVEN)).map(repo -> {
                    try {
                        return repo.getIndexingContext().getBaseContext(IndexingContext.class);
                    } catch (UnsupportedBaseContextException e) {
                        return null;
                        // Ignore
                    }
                }).filter(Objects::nonNull).collect(Collectors.toList());
        ContextMemberProvider memberProvider = new StaticContextMemberProvider(members);
        IndexingContext mergedCtx = indexer.createMergedIndexingContext(tempRepoId, tempRepoId,
                mergedIndexDirectory.toFile(), indexLocation.toFile(), true, memberProvider);
        mergedCtx.optimize();

        if (indexMergerRequest.isPackIndex()) {
            IndexPackingRequest request = new IndexPackingRequest(mergedCtx, //
                    mergedCtx.acquireIndexSearcher().getIndexReader(), //
                    indexLocation.toFile());
            indexPacker.packIndex(request);
        }

        if (indexMergerRequest.isTemporary()) {
            temporaryGroupIndexes.add(new TemporaryGroupIndex(mergedIndexDirectory, tempRepoId, groupId,
                    indexMergerRequest.getMergedIndexTtl()));
            temporaryContextes.add(mergedCtx);
        }
        stopWatch.stop();
        log.info("merged index for repos {} in {} s", indexMergerRequest.getRepositoriesIds(),
                stopWatch.getTime());
        return mergedCtx;
    } catch (IOException e) {
        throw new IndexMergerException(e.getMessage(), e);
    } finally {
        runningGroups.remove(groupId);
    }
}

From source file:org.apache.archiva.indexer.merger.DefaultIndexMerger.java

@Override
public IndexingContext buildMergedIndex(IndexMergerRequest indexMergerRequest) throws IndexMergerException {
    String groupId = indexMergerRequest.getGroupId();

    if (runningGroups.contains(groupId)) {
        log.info("skip build merge remote indexes for id: '{}' as already running", groupId);
        return null;
    }/*ww  w  .  ja  v  a  2  s .  c  o m*/

    runningGroups.add(groupId);

    StopWatch stopWatch = new StopWatch();
    stopWatch.reset();
    stopWatch.start();

    File mergedIndexDirectory = indexMergerRequest.getMergedIndexDirectory();

    String tempRepoId = mergedIndexDirectory.getName();

    try {
        File indexLocation = new File(mergedIndexDirectory, indexMergerRequest.getMergedIndexPath());
        IndexingContext indexingContext = indexer.addIndexingContext(tempRepoId, tempRepoId,
                mergedIndexDirectory, indexLocation, null, null, mavenIndexerUtils.getAllIndexCreators());

        for (String repoId : indexMergerRequest.getRepositoriesIds()) {
            IndexingContext idxToMerge = indexer.getIndexingContexts().get(repoId);
            if (idxToMerge != null) {
                indexingContext.merge(idxToMerge.getIndexDirectory());
            }
        }

        indexingContext.optimize();

        if (indexMergerRequest.isPackIndex()) {
            IndexPackingRequest request = new IndexPackingRequest(indexingContext, indexLocation);
            indexPacker.packIndex(request);
        }

        if (indexMergerRequest.isTemporary()) {
            temporaryGroupIndexes.add(new TemporaryGroupIndex(mergedIndexDirectory, tempRepoId, groupId,
                    indexMergerRequest.getMergedIndexTtl()));
        }
        stopWatch.stop();
        log.info("merged index for repos {} in {} s", indexMergerRequest.getRepositoriesIds(),
                stopWatch.getTime());
        return indexingContext;
    } catch (IOException e) {
        throw new IndexMergerException(e.getMessage(), e);
    } catch (UnsupportedExistingLuceneIndexException e) {
        throw new IndexMergerException(e.getMessage(), e);
    } finally {
        runningGroups.remove(groupId);
    }
}

From source file:org.apache.archiva.metadata.repository.jcr.JcrRepositorySessionFactory.java

@PostConstruct
public void initialize() throws Exception {

    // skip initialisation if not cassandra
    if (!StringUtils.equals(repositorySessionFactoryBean.getId(), "jcr")) {
        return;//from ww  w. ja  va  2 s .co m
    }

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    metadataFacetFactories = applicationContext.getBeansOfType(MetadataFacetFactory.class);
    // olamy with spring the "id" is now "metadataFacetFactory#hint"
    // whereas was only hint with plexus so let remove  metadataFacetFactory#
    Map<String, MetadataFacetFactory> cleanedMetadataFacetFactories = new HashMap<>(
            metadataFacetFactories.size());

    for (Map.Entry<String, MetadataFacetFactory> entry : metadataFacetFactories.entrySet()) {
        cleanedMetadataFacetFactories.put(StringUtils.substringAfterLast(entry.getKey(), "#"),
                entry.getValue());
    }

    metadataFacetFactories = cleanedMetadataFacetFactories;

    JcrMetadataRepository metadataRepository = null;
    try {
        metadataRepository = new JcrMetadataRepository(metadataFacetFactories, repository);
        JcrMetadataRepository.initialize(metadataRepository.getJcrSession());
    } catch (RepositoryException e) {
        throw new RuntimeException(e.getMessage(), e);
    } finally {
        if (metadataRepository != null) {
            metadataRepository.close();
        }
    }

    stopWatch.stop();
    logger.info("time to initialize JcrRepositorySessionFactory: {}", stopWatch.getTime());
}

From source file:org.apache.archiva.metadata.repository.jcr.RepositoryFactory.java

public Repository createRepository() throws IOException, InvalidFileStoreVersionException {
    createExecutor();/*w ww .j a  v  a 2s .  c  o m*/

    if (SEGMENT_FILE_TYPE == storeType) {
        fileStore = FileStoreBuilder.fileStoreBuilder(repositoryPath.toFile()).build();
        nodeStore = SegmentNodeStoreBuilders.builder(fileStore) //
                .withStatisticsProvider(StatisticsProvider.NOOP) //
                .build();
    } else if (IN_MEMORY_TYPE == storeType) {
        nodeStore = null;
    } else {
        throw new IllegalArgumentException("Store type " + storeType + " not recognized");
    }

    Oak oak = nodeStore == null ? new Oak() : new Oak(nodeStore);
    oak.with(new RepositoryInitializer() {
        @Override
        public void initialize(@Nonnull NodeBuilder root) {
            log.info("Creating index ");

            NodeBuilder lucene = IndexUtils.getOrCreateOakIndex(root).child("lucene");
            lucene.setProperty(JcrConstants.JCR_PRIMARYTYPE, "oak:QueryIndexDefinition", Type.NAME);

            lucene.setProperty("compatVersion", 2);
            lucene.setProperty("type", "lucene");
            // lucene.setProperty("async", "async");
            lucene.setProperty(INCLUDE_PROPERTY_TYPES, ImmutableSet.of("String"), Type.STRINGS);
            // lucene.setProperty("refresh",true);
            lucene.setProperty("async", ImmutableSet.of("async", "sync"), Type.STRINGS);
            NodeBuilder rules = lucene.child("indexRules").setProperty(JcrConstants.JCR_PRIMARYTYPE,
                    JcrConstants.NT_UNSTRUCTURED, Type.NAME);
            rules.setProperty(":childOrder", ImmutableSet.of("archiva:projectVersion", //
                    "archiva:artifact", //
                    "archiva:facet", //
                    "archiva:namespace", //
                    "archiva:project"), //
                    Type.STRINGS);
            NodeBuilder allProps = rules.child("archiva:projectVersion") //
                    .child("properties") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, "nt:unstructured", Type.NAME) //
                    .setProperty(":childOrder", ImmutableSet.of("allProps"), Type.STRINGS) //
                    .setProperty("indexNodeName", true) //
                    .child("allProps") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, JcrConstants.NT_UNSTRUCTURED, Type.NAME);
            allProps.setProperty("name", ".*");
            allProps.setProperty("isRegexp", true);
            allProps.setProperty("nodeScopeIndex", true);
            allProps.setProperty("index", true);
            allProps.setProperty("analyzed", true);
            // allProps.setProperty("propertyIndex",true);
            allProps = rules.child("archiva:artifact") //
                    .child("properties") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, "nt:unstructured", Type.NAME) //
                    .setProperty(":childOrder", ImmutableSet.of("allProps"), Type.STRINGS) //
                    .setProperty("indexNodeName", true).child("allProps") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, JcrConstants.NT_UNSTRUCTURED, Type.NAME);
            allProps.setProperty("name", ".*");
            allProps.setProperty("isRegexp", true);
            allProps.setProperty("nodeScopeIndex", true);
            allProps.setProperty("index", true);
            allProps.setProperty("analyzed", true);
            allProps = rules.child("archiva:facet") //
                    .child("properties") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, "nt:unstructured", Type.NAME) //
                    .setProperty(":childOrder", ImmutableSet.of("allProps"), Type.STRINGS) //
                    .setProperty("indexNodeName", true) //
                    .child("allProps") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, JcrConstants.NT_UNSTRUCTURED, Type.NAME);
            allProps.setProperty("name", ".*");
            allProps.setProperty("isRegexp", true);
            allProps.setProperty("nodeScopeIndex", true);
            allProps.setProperty("index", true);
            allProps.setProperty("analyzed", true);
            allProps = rules.child("archiva:namespace") //
                    .child("properties") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, "nt:unstructured", Type.NAME) //
                    .setProperty(":childOrder", ImmutableSet.of("allProps"), Type.STRINGS) //
                    .setProperty("indexNodeName", true) //
                    .child("allProps") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, JcrConstants.NT_UNSTRUCTURED, Type.NAME);
            allProps.setProperty("name", ".*");
            allProps.setProperty("isRegexp", true);
            allProps.setProperty("nodeScopeIndex", true);
            allProps.setProperty("index", true);
            allProps.setProperty("analyzed", true);
            allProps = rules.child("archiva:project") //
                    .child("properties") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, "nt:unstructured", Type.NAME) //
                    .setProperty(":childOrder", ImmutableSet.of("allProps"), Type.STRINGS) //
                    .setProperty("indexNodeName", true) //
                    .child("allProps") //
                    .setProperty(JcrConstants.JCR_PRIMARYTYPE, JcrConstants.NT_UNSTRUCTURED, Type.NAME);
            allProps.setProperty("name", ".*");
            allProps.setProperty("isRegexp", true);
            allProps.setProperty("nodeScopeIndex", true);
            allProps.setProperty("index", true);
            allProps.setProperty("analyzed", true);

            log.info("Index: {} myIndex {}", lucene, lucene.getChildNode("myIndex"));
            log.info("myIndex {}", lucene.getChildNode("myIndex").getProperties());
            // IndexUtils.createIndexDefinition(  )

        }
    });

    StatisticsProvider statsProvider = StatisticsProvider.NOOP;
    int queueSize = Integer.getInteger("queueSize", 10000);
    Path indexDir = Files.createTempDirectory("archiva_index");
    log.info("Queue Index {}", indexDir.toString());
    IndexCopier indexCopier = new IndexCopier(executorService, indexDir.toFile(), true);
    NRTIndexFactory nrtIndexFactory = new NRTIndexFactory(indexCopier, statsProvider);
    MountInfoProvider mountInfoProvider = Mounts.defaultMountInfoProvider();
    IndexTracker tracker = new IndexTracker(new DefaultIndexReaderFactory(mountInfoProvider, indexCopier),
            nrtIndexFactory);
    DocumentQueue queue = new DocumentQueue(queueSize, tracker, executorService, statsProvider);
    LocalIndexObserver localIndexObserver = new LocalIndexObserver(queue, statsProvider);
    LuceneIndexProvider provider = new LuceneIndexProvider(tracker);

    //        ExternalObserverBuilder builder = new ExternalObserverBuilder(queue, tracker, statsProvider,
    //            executorService, queueSize);
    //        Observer observer = builder.build();
    //        builder.getBackgroundObserver();

    LuceneIndexEditorProvider editorProvider = //
            new LuceneIndexEditorProvider(null, tracker, //
                    new ExtractedTextCache(0, 0), //
                    null, mountInfoProvider);
    editorProvider.setIndexingQueue(queue);

    log.info("Oak: {} with nodeStore {}", oak, nodeStore);
    Jcr jcr = new Jcr(oak).with(editorProvider) //
            .with((Observer) provider) //
            .with(localIndexObserver)
            // .with(observer)
            .with((QueryIndexProvider) provider); //
    //.withAsyncIndexing( "async", 5 );
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    Repository r = jcr.createRepository();
    stopWatch.stop();
    log.info("time to create jcr repository: {} ms", stopWatch.getTime());
    //        try
    //        {
    //            Thread.currentThread().sleep( 1000 );
    //        }
    //        catch ( InterruptedException e )
    //        {
    //            log.error( e.getMessage(), e );
    //        }
    return r;

}

From source file:org.apache.archiva.metadata.repository.stats.DefaultRepositoryStatisticsManager.java

@Override
public RepositoryStatistics getLastStatistics(MetadataRepository metadataRepository, String repositoryId)
        throws MetadataRepositoryException {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();//from  w w  w.  ja  v a2  s  .  c o  m
    // TODO: consider a more efficient implementation that directly gets the last one from the content repository
    List<String> scans = metadataRepository.getMetadataFacets(repositoryId, RepositoryStatistics.FACET_ID);
    if (scans == null) {
        return null;
    }
    Collections.sort(scans);
    if (!scans.isEmpty()) {
        String name = scans.get(scans.size() - 1);
        RepositoryStatistics repositoryStatistics = RepositoryStatistics.class
                .cast(metadataRepository.getMetadataFacet(repositoryId, RepositoryStatistics.FACET_ID, name));
        stopWatch.stop();
        log.debug("time to find last RepositoryStatistics: {} ms", stopWatch.getTime());
        return repositoryStatistics;
    } else {
        return null;
    }
}

From source file:org.apache.archiva.redback.rest.services.utils.EnvironmentChecker.java

@Inject
public EnvironmentChecker(ApplicationContext applicationContext) {
    Collection<EnvironmentCheck> checkers = applicationContext.getBeansOfType(EnvironmentCheck.class).values();

    StopWatch stopWatch = new StopWatch();
    stopWatch.reset();/*from   w ww  .  ja v  a  2  s  . c  o  m*/
    stopWatch.start();

    if (checkers != null) {
        List<String> violations = new ArrayList<String>();

        for (EnvironmentCheck check : checkers) {
            check.validateEnvironment(violations);
        }

        if (!violations.isEmpty()) {
            StringBuilder msg = new StringBuilder();
            msg.append("EnvironmentCheck Failure.\n");
            msg.append("======================================================================\n");
            msg.append(" ENVIRONMENT FAILURE !! \n");
            msg.append("\n");

            for (String v : violations) {
                msg.append(v).append("\n");
            }

            msg.append("\n");
            msg.append("======================================================================");
            log.error(msg.toString());
        }
    }

    stopWatch.stop();
    log.info("time to execute all EnvironmentCheck: {} ms", stopWatch.getTime());
}

From source file:org.apache.archiva.redback.role.DefaultRoleManager.java

@PostConstruct
public void initialize() {

    knownResources = new HashMap<String, ModelApplication>();
    this.unblessedModel = new RedbackRoleModel();
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();/*from  w ww .j  a  va2  s . com*/

    try {
        URL baseResource = RoleManager.class.getResource("/META-INF/redback/redback-core.xml");

        if (baseResource == null) {
            throw new RuntimeException("unable to initialize role manager, missing redback-core.xml");
        }

        loadRoleModel(baseResource);

        Enumeration<URL> enumerator = RoleManager.class.getClassLoader()
                .getResources("META-INF/redback/redback.xml");

        while (enumerator.hasMoreElements()) {
            URL redbackResource = enumerator.nextElement();

            loadRoleModel(redbackResource);
        }
    } catch (RoleManagerException e) {
        throw new RuntimeException("unable to initialize RoleManager", e);
    } catch (IOException e) {
        throw new RuntimeException("unable to initialize RoleManager, problem with redback.xml loading", e);
    }

    stopWatch.stop();
    log.info("DefaultRoleManager initialize time {}", stopWatch.getTime());
}