Example usage for com.google.common.collect Iterators peekingIterator

List of usage examples for com.google.common.collect Iterators peekingIterator

Introduction

In this page you can find the example usage for com.google.common.collect Iterators peekingIterator.

Prototype

@Deprecated
public static <T> PeekingIterator<T> peekingIterator(PeekingIterator<T> iterator) 

Source Link

Document

Simply returns its argument.

Usage

From source file:r.base.Calls.java

/**
 * Argument matching is done by a three-pass process:
 * <ol>//from   w w  w .  ja va  2 s. c  o  m
 * <li><strong>Exact matching on tags.</strong> For each named supplied argument the list of formal arguments
 *  is searched for an item whose name matches exactly. It is an error to have the same formal
 * argument match several actuals or vice versa.</li>
 *
 * <li><strong>Partial matching on tags.</strong> Each remaining named supplied argument is compared to the
 * remaining formal arguments using partial matching. If the name of the supplied argument
 * matches exactly with the first part of a formal argument then the two arguments are considered
 * to be matched. It is an error to have multiple partial matches.
 *  Notice that if f <- function(fumble, fooey) fbody, then f(f = 1, fo = 2) is illegal,
 * even though the 2nd actual argument only matches fooey. f(f = 1, fooey = 2) is legal
 * though since the second argument matches exactly and is removed from consideration for
 * partial matching. If the formal arguments contain ... then partial matching is only applied to
 * arguments that precede it.
 *
 * <li><strong>Positional matching.</strong> Any unmatched formal arguments are bound to unnamed supplied arguments,
 * in order. If there is a ... argument, it will take up the remaining arguments, tagged or not.
 * If any arguments remain unmatched an error is declared.
 *
 * @param actuals the actual arguments supplied to the list
 */
public static PairList matchArguments(PairList formals, PairList actuals) {

    PairList.Builder result = new PairList.Builder();

    List<PairList.Node> unmatchedActuals = Lists.newArrayList();
    for (PairList.Node argNode : actuals.nodes()) {
        unmatchedActuals.add(argNode);
    }

    List<PairList.Node> unmatchedFormals = Lists.newArrayList(formals.nodes());

    // do exact matching
    for (ListIterator<PairList.Node> formalIt = unmatchedFormals.listIterator(); formalIt.hasNext();) {
        PairList.Node formal = formalIt.next();
        if (formal.hasTag()) {
            Symbol name = (Symbol) formal.getTag();
            Collection<PairList.Node> matches = Collections2.filter(unmatchedActuals,
                    PairList.Predicates.matches(name));

            if (matches.size() == 1) {
                PairList.Node match = first(matches);
                result.add(name, match.getValue());
                formalIt.remove();
                unmatchedActuals.remove(match);

            } else if (matches.size() > 1) {
                throw new EvalException(
                        String.format("Multiple named values provided for argument '%s'", name.getPrintName()));
            }
        }
    }

    // do partial matching
    Collection<PairList.Node> remainingNamedFormals = filter(unmatchedFormals, PairList.Predicates.hasTag());
    for (Iterator<PairList.Node> actualIt = unmatchedActuals.iterator(); actualIt.hasNext();) {
        PairList.Node actual = actualIt.next();
        if (actual.hasTag()) {
            Collection<PairList.Node> matches = Collections2.filter(remainingNamedFormals,
                    PairList.Predicates.startsWith(actual.getTag()));

            if (matches.size() == 1) {
                PairList.Node match = first(matches);
                result.add(match.getTag(), actual.getValue());
                actualIt.remove();
                unmatchedFormals.remove(match);

            } else if (matches.size() > 1) {
                throw new EvalException(
                        String.format("Provided argument '%s' matches multiple named formal arguments: %s",
                                actual.getTag().getPrintName(), argumentTagList(matches)));
            }
        }
    }

    // match any unnamed args positionally

    Iterator<PairList.Node> formalIt = unmatchedFormals.iterator();
    PeekingIterator<PairList.Node> actualIt = Iterators.peekingIterator(unmatchedActuals.iterator());
    while (formalIt.hasNext()) {
        PairList.Node formal = formalIt.next();
        if (Symbols.ELLIPSES.equals(formal.getTag())) {
            PromisePairList.Builder promises = new PromisePairList.Builder();
            while (actualIt.hasNext()) {
                PairList.Node actual = actualIt.next();
                promises.add(actual.getRawTag(), actual.getValue());
            }
            result.add(formal.getTag(), promises.build());

        } else if (hasNextUnTagged(actualIt)) {
            result.add(formal.getTag(), nextUnTagged(actualIt).getValue());

        } else {
            result.add(formal.getTag(), Symbol.MISSING_ARG);
        }
    }
    if (actualIt.hasNext()) {
        throw new EvalException(String.format("Unmatched positional arguments"));
    }

    return result.build();
}

From source file:com.ikanow.infinit.e.api.knowledge.federated.SimpleFederatedQueryEngine.java

@Override
public void postQueryActivities(ObjectId queryId, List<BasicDBObject> docs, ResponsePojo response) {
    boolean grabbedScores = false;
    double aggregateSignif = 100.0;
    double queryRelevance = 100.0;
    double score = 100.0;

    if (null != _asyncRequestsPerQuery) {
        int added = 0;
        BasicDBList bsonArray = new BasicDBList();
        PeekingIterator<FederatedRequest> it = Iterators.peekingIterator(_asyncRequestsPerQuery.iterator());
        while (it.hasNext()) {
            // loop state:
            BasicDBObject[] docOrDocs = new BasicDBObject[1];
            docOrDocs[0] = null;//from  ww w  .  j  a va2s  . c  o m

            FederatedRequest request = it.next();
            boolean isComplexSource = isComplexSource(request.endpointInfo.parentSource);
            if (null == request.cachedDoc) { // no cached doc, simple source processing (OR ANY COMPLEX CASE BY CONSTRUCTION)
                try {
                    if ((null == request.cachedResult) || isComplexSource) { // no cached api response, or complex         
                        if (null != request.importThread) {
                            // 1) wait for the thread to finish
                            if (null == request.endpointInfo.queryTimeout_secs) {
                                request.endpointInfo.queryTimeout_secs = 300;
                            }
                            for (int timer = 0; timer < request.endpointInfo.queryTimeout_secs; timer++) {
                                try {
                                    request.importThread.join(1000L);
                                    if (!request.importThread.isAlive()) {
                                        break;
                                    }
                                } //TESTED (by hand)
                                catch (Exception e) {
                                    //(carry on)
                                }
                            }
                            if (request.importThread.isAlive()) {
                                request.errorMessage = new RuntimeException("Script timed out");
                            } //TESTED (by hand)

                            // 2) Get the results
                            if (null != request.errorMessage) {
                                if (_testMode) {
                                    throw new RuntimeException(request.errorMessage);
                                }
                            } else if (isComplexSource) {
                                //DEBUG 
                                if (_DEBUG)
                                    _logger.debug("DEB: postQA0: " + request.complexSourceProcResults.size());

                                handleComplexDocCaching(request, _cacheMode, _scoreStats);

                                // Get a list of docs
                                docOrDocs = ((BasicDBList) DocumentPojo
                                        .listToDb(request.complexSourceProcResults, DocumentPojo.listType()))
                                                .toArray(new BasicDBObject[0]);

                                // (_API_ caching is exactly the same between cache and non-cache cases)
                                // (note that if null != complexSourceProcResults then follows that null != scriptResult)
                                String url = buildScriptUrl(request.mergeKey, request.queryIndex);

                                if (!(request.importThread instanceof FederatedSimpleHarvest) && _cacheMode) { // (don't cache python federated queries in test mode)
                                    // (simple harvest caching is done separately)
                                    this.cacheApiResponse(url, request.scriptResult, request.endpointInfo);
                                }
                            } //TESTED (by hand - single and multiple doc mode)               
                            else if (null == request.scriptResult) {
                                if (_testMode) {
                                    throw new RuntimeException("Script mode: no cached result found from: "
                                            + request.requestParameter);
                                }
                            } else {
                                // (_API_ caching is exactly the same between cache and non-cache cases)
                                String url = buildScriptUrl(request.mergeKey, request.queryIndex);
                                if (_cacheMode) { // (don't cache python federated queries in test mode)
                                    this.cacheApiResponse(url, request.scriptResult, request.endpointInfo);
                                }
                                bsonArray.add(request.scriptResult);
                            }
                        } // end script mode
                        else { // HTTP mode (also: must be simple source builder)
                            Response endpointResponse = request.responseFuture.get();
                            request.asyncClient.close();
                            request.asyncClient = null;

                            String jsonStr = endpointResponse.getResponseBody();
                            String url = endpointResponse.getUri().toURL().toString();

                            Object bsonUnknownType = com.mongodb.util.JSON.parse(jsonStr);
                            BasicDBObject bson = null;
                            if (bsonUnknownType instanceof BasicDBObject) {
                                bson = (BasicDBObject) bsonUnknownType;
                            } else if (bsonUnknownType instanceof BasicDBList) {
                                bson = new BasicDBObject(SimpleFederatedCache.array_, bsonUnknownType);
                            } else if (bsonUnknownType instanceof String) {
                                bson = new BasicDBObject(SimpleFederatedCache.value_, bsonUnknownType);
                            }

                            //DEBUG
                            if (_DEBUG)
                                _logger.debug("DEB: postQA1: " + url + ": " + jsonStr);

                            if (null != bson) {
                                MongoDbUtil.enforceTypeNamingPolicy(bson, 0);
                                this.cacheApiResponse(url, bson, request.endpointInfo);
                                bsonArray.add(bson);
                            }
                        } //(end script vs request method)
                    } //TESTED (3.1, 4.2)
                    else { // (just used cached value)
                        //DEBUG 
                        if (_DEBUG)
                            _logger.debug("DEB: postQA2: " + request.cachedResult.toString());

                        bsonArray.add(
                                (BasicDBObject) request.cachedResult.get(SimpleFederatedCache.cachedJson_));
                    } //TESTED (4.1, 4.3)
                } catch (Exception e) {
                    //DEBUG
                    if (null == request.subRequest) {
                        _logger.error("Error with script: " + e.getMessage());
                        if (_testMode) {
                            throw new RuntimeException("Error with script: " + e.getMessage(), e);
                        }
                    } else {
                        _logger.error("Error with " + request.subRequest.endPointUrl + ": " + e.getMessage());
                        if (_testMode) {
                            throw new RuntimeException(
                                    "Error with " + request.subRequest.endPointUrl + ": " + e.getMessage(), e);
                        }
                    }
                }

                if (null == docOrDocs[0]) {
                    // (this next bit of logic can only occur in simple source cases by construction, phew)
                    if (!it.hasNext() || (request.mergeKey != it.peek().mergeKey)) { // deliberate ptr arithmetic
                        String url = buildScriptUrl(request.mergeKey, request.queryIndex);

                        //DEBUG
                        if (_DEBUG)
                            _logger.debug("DEB: postQA3: " + url + ": " + bsonArray);

                        docOrDocs[0] = createDocFromJson(bsonArray, url, request, request.endpointInfo);
                    }
                }
            } // (end if no cached doc)
            else { // cached doc, bypass lots of processing because no merging and doc already built (simple source processing)
                docOrDocs[0] = request.cachedDoc;
            } //TESTED (by hand)

            if (null != docOrDocs[0])
                for (BasicDBObject doc : docOrDocs) {

                    // Cache the document unless already cached (or caching disabled)
                    if ((null == request.cachedDoc) && _cacheMode && !isComplexSource
                            && ((null == request.endpointInfo.cacheTime_days)
                                    || (request.endpointInfo.cacheTime_days >= 0))) {
                        simpleDocCache(request, doc);
                    } //TESTED (by hand, 3 cases: cached not expired, cached expired first time, cached expired multiple times)

                    if (!grabbedScores) {
                        if (!docs.isEmpty()) {
                            BasicDBObject topDoc = docs.get(0);
                            aggregateSignif = topDoc.getDouble(DocumentPojo.aggregateSignif_, aggregateSignif);
                            queryRelevance = topDoc.getDouble(DocumentPojo.queryRelevance_, queryRelevance);
                            score = topDoc.getDouble(DocumentPojo.score_, score);
                            grabbedScores = true;

                            // OK would also like to grab the original matching entity, if it exists
                            if (!isComplexSource) {
                                BasicDBList ents = (BasicDBList) topDoc.get(DocumentPojo.entities_);
                                if (null != ents) {
                                    for (Object entObj : ents) {
                                        BasicDBObject ent = (BasicDBObject) entObj;
                                        String entIndex = ent.getString(EntityPojo.index_, "");
                                        if (entIndex.equals(request.queryIndex)) {
                                            ents = (BasicDBList) doc.get(DocumentPojo.entities_);
                                            if (null != ents) {
                                                ents.add(ent);
                                            }
                                            break;
                                        }
                                    }
                                } //TESTED (by hand)
                            }
                        }
                    }
                    doc.put(DocumentPojo.aggregateSignif_, aggregateSignif);
                    doc.put(DocumentPojo.queryRelevance_, queryRelevance);
                    doc.put(DocumentPojo.score_, score);

                    // Swap id and updateId, everything's been cached now:
                    // Handle update ids vs normal ids:
                    ObjectId updateId = (ObjectId) doc.get(DocumentPojo.updateId_);
                    if (null != updateId) { // swap the 2...
                        doc.put(DocumentPojo.updateId_, doc.get(DocumentPojo._id_));
                        doc.put(DocumentPojo._id_, updateId);
                    } //TESTED (by hand)            

                    // If we're returning to a query then we'll adjust the doc format (some of the atomic fields become arrays)
                    if (!_testMode) {
                        convertDocToQueryFormat(doc, request.communityIdStrs);
                    } //TESTED (by hand)

                    docs.add(0, doc);
                    added++;
                    //(doc auto reset at top of loop)

                    //(end if built a doc from the last request/set of requests)
                } //TESTED (3.1)      

        } //(end loop over federated requests)

        if (null != response.getStats()) {
            response.getStats().found += added;
        } //TESTED (by hand)         
    }
}

From source file:org.apache.cassandra.db.LegacyLayout.java

private static UnfilteredRowIterator toUnfilteredRowIterator(CFMetaData metadata, DecoratedKey key,
        LegacyDeletionInfo delInfo, Iterator<LegacyCell> cells, boolean reversed, SerializationHelper helper) {
    // A reducer that basically does nothing, we know the 2 merged iterators can't have conflicting atoms (since we merge cells with range tombstones).
    MergeIterator.Reducer<LegacyAtom, LegacyAtom> reducer = new MergeIterator.Reducer<LegacyAtom, LegacyAtom>() {
        private LegacyAtom atom;

        public void reduce(int idx, LegacyAtom current) {
            // We're merging cell with range tombstones, so we should always only have a single atom to reduce.
            assert atom == null;
            atom = current;//from  w  w w . j a  v a  2s .co  m
        }

        protected LegacyAtom getReduced() {
            return atom;
        }

        protected void onKeyChange() {
            atom = null;
        }
    };
    List<Iterator<LegacyAtom>> iterators = Arrays.asList(asLegacyAtomIterator(cells),
            asLegacyAtomIterator(delInfo.inRowRangeTombstones()));
    PeekingIterator<LegacyAtom> atoms = Iterators
            .peekingIterator(MergeIterator.get(iterators, legacyAtomComparator(metadata), reducer));

    // Check if we have some static
    Row staticRow = atoms.hasNext() && atoms.peek().isStatic()
            ? getNextRow(CellGrouper.staticGrouper(metadata, helper), atoms)
            : Rows.EMPTY_STATIC_ROW;

    Iterator<Row> rows = convertToRows(new CellGrouper(metadata, helper), atoms);
    Iterator<RangeTombstone> ranges = delInfo.deletionInfo.rangeIterator(reversed);
    return new RowAndDeletionMergeIterator(metadata, key, delInfo.deletionInfo.getPartitionDeletion(),
            ColumnFilter.all(metadata), staticRow, reversed, EncodingStats.NO_STATS, rows, ranges, true);
}

From source file:org.nuxeo.ecm.core.storage.ExpressionEvaluator.java

protected static boolean fulltext(Set<String> fulltext, List<String> query) {
    boolean andMatch = true;
    for (PeekingIterator<String> it = Iterators.peekingIterator(query.iterator()); it.hasNext();) {
        String word = it.next();//from   ww w  .  j  av  a2  s .  c om
        boolean match;
        if (word.endsWith("*") || word.endsWith("%")) {
            // prefix match
            match = false;
            String prefix = word.substring(0, word.length() - 2);
            for (String candidate : fulltext) {
                if (candidate.startsWith(prefix)) {
                    match = true;
                    break;
                }
            }
        } else {
            if (word.startsWith("-")) {
                word = word.substring(1);//
                match = !fulltext.contains(word);
            } else {
                match = fulltext.contains(word);
            }
        }
        if (!match) {
            andMatch = false;
        }
        if (it.hasNext() && it.peek().equals(OR)) {
            // end of AND group
            // swallow OR
            it.next();
            // return if the previous AND group matched
            if (andMatch) {
                return true;
            }
            // else start next AND group
            andMatch = true;
        }
    }
    return andMatch;
}

From source file:org.diqube.flatten.Flattener.java

/**
 * Flattens a single {@link TableShard}.
 * //from w  ww . ja  va2 s. c  o m
 * <p>
 * This works as follows:
 * 
 * <ol>
 * <li>Find all patterns the flatten-by-field pattern matches to. These are then the prefixes of the column names of
 * which a new row will be created.
 * <li>Also find the names of the length columns of these patterns.
 * <li>Produce a to-do list: What is the name of the output columns and what input columns is that output column
 * created from?
 * <ul>
 * <li>Is the new column a "multiplicating col"? These cols are cols that are outside of the path of the repeated
 * column that is flattened over. Nevertheless each input col contains a value for that row: A single row-value of the
 * input columns needs to be available for multiple cols on the output table.
 * <li>Remove previously found length-columns from to-be-created col list (when flattening over a[*] we do not want a
 * a[length] column to appear in the output!).
 * </ul>
 * <li>Iterate over all rows of the input col and identify for each row and identify (1) how many output rows that row
 * will create (taking into account the length columns of the flatten-by field in that row) and (2) if this row is
 * missing of any child-fields (i.e. there is an array a[*].c[*], when flattening over a[*], there are output cols
 * a.c[0], a.c[1], a.c[2], but it could be that a specific row does not contain a.c[2], because that row simply does
 * not have that many entries in the array.
 * <li>Build the new columns - each new column can be either "multiplicating" (see above), in which case the col pages
 * are repeated accordingly (and no-longer repeated rows are removed from the repeated colpages) or they can be
 * "flattned" - in which case the col is a sub-field of the flattened one and we only need to remove rows that do not
 * contain any value.
 * </ol>
 * 
 * We need to ensure that we do not mess up with the row-ordering of the various output columns: Each output column
 * needs to have the same number of rows and the rowIds need to match correctly. Therefore, when creating a column
 * e.g. based on inputColumns where we do not have realized all, we need to insert "constant" column pages into the
 * output which will then resolve to default values. Example:
 * 
 * Source table:
 * 
 * <pre>
 * {a:[ { b:[1] },
 *      { b:[2, 3] }]},
 * {a:[ { b:[4] },
 *      { b:[5, 6] }]}
 * </pre>
 * 
 * In this example, there will be no column a[0].b[1] in the input (as all a[0]s only have at max a single entry in
 * .b). Would we now map new columns to col pages of old columns in the following way (flattened over a[*]; displayed
 * is the list of col pages that are consecutively accessed for a new column):
 * 
 * <pre>
 * a.b[0] = [ all col pages of a[0].b[0] ]
 * a.b[1] = [ all col pages of a[0].b[1], all col pages of a[1].b[1] ]
 * a.b[length] = [ all col pages of a[0].b[length], all col pages of a[1].b[length] ]
 * </pre>
 * 
 * .. in that way we would mess up as a.b[0] would have less rows than a.b[1] -> we need to add a "constant" colPage
 * to a.b[0] to resolve to a default value. Note that we nevertheless will probably never resolve those default values
 * (at least in this example) as the a.b[length] value will not allow us to iterate that far in the corresponding
 * rows.
 * 
 * <p>
 * Note that the resulting TableShard will have the same first Row ID as the input TableShard. If multiple TableShards
 * of the same table are flattened (this is usually the case), then after flattening them, the row IDs might overlap
 * (since every TableShard has the original firstRow ID, but each table shard contains more rows). The rowIds need to
 * be adjusted afterwards!.
 */
private TableShard flattenTableShard(String resultTableName, TableShard inputTableShard, String flattenByField)
        throws PatternException, LengthColumnMissingException, IllegalStateException {
    String[] flattenFieldSplit = flattenByField
            .split(Pattern.quote(repeatedColNameGen.allEntriesIdentifyingSubstr() + "."));
    List<String> repeatedFieldsAlongPath = new ArrayList<>();
    String prev = "";
    for (String splitPart : flattenFieldSplit) {
        if (!"".equals(prev))
            prev += ".";

        prev += splitPart;
        if (!splitPart.endsWith(repeatedColNameGen.allEntriesIdentifyingSubstr()))
            prev += repeatedColNameGen.allEntriesIdentifyingSubstr();

        repeatedFieldsAlongPath.add(prev);
    }

    // calculate the most specific patterns first - colPatternUtil will return its lists in the same ordering!
    repeatedFieldsAlongPath = Lists.reverse(repeatedFieldsAlongPath);

    Set<String> allInputLengthColsOfFlattenedFields = new HashSet<>();

    ColumnPatternContainer patterns = colPatternUtil.findColNamesForColNamePattern(lengthColName -> {
        allInputLengthColsOfFlattenedFields.add(lengthColName);
        return new QueryableLongColumnShardFacade(inputTableShard.getLongColumns().get(lengthColName));
    }, repeatedFieldsAlongPath);

    // transpose result of colPatternUtil: Collect all the most specific patterns in a set, then the second-most
    // specific patterns etc.
    // Later we want to first check if a colname matches one of the most specfic patterns as prefix and replace that,
    // before checking if it matches some less-specific patterns.
    List<Set<String>> prefixesToReplace = new ArrayList<>();
    for (int i = 0; i < repeatedFieldsAlongPath.size(); i++)
        prefixesToReplace.add(new HashSet<>());
    for (List<String> patternList : patterns.getMaximumColumnPatterns()) {
        for (int i = 0; i < patternList.size(); i++)
            prefixesToReplace.get(i).add(patternList.get(i));
    }

    // Prefix replacements based on index in prefixesToReplace: If a prefix of prefixesToReplace.get(0) is found, that
    // prefix needs to be replaced by replacements.get(0).
    List<String> replacements = repeatedFieldsAlongPath.stream().map(
            pattern -> pattern.replaceAll(Pattern.quote(repeatedColNameGen.allEntriesIdentifyingSubstr()), ""))
            .collect(Collectors.toList());

    // map from new column name to input column names that column is based upon. Note that input col names might not
    // exist in inputTableShard, see comments below when newColumn is filled.
    Map<String, SortedSet<String>> newColumns = new HashMap<>();
    // output cols whose row-values are based on using input cols values and each row value of those inputs is the value
    // of multiple output cols
    Set<String> multiplicatingOutputCols = new HashSet<>();

    Set<String> allInputColNames = inputTableShard.getColumns().keySet();

    for (String inputColName : allInputColNames) {
        if (allInputLengthColsOfFlattenedFields.contains(inputColName))
            // Remove certian length columns from the set of to-be-created columns. For example when flattenning over a[*],
            // we do not want to create a[length] column, as it simply does not make sense any more as each of the entries
            // in a[*] is now a separate row.
            continue;

        String newColName = null;
        String foundPrefix = null;
        int foundPatternIdx = -1;
        for (int patternIdx = 0; patternIdx < prefixesToReplace.size(); patternIdx++) {
            Set<String> prefixes = prefixesToReplace.get(patternIdx);
            for (String prefix : prefixes) {
                if (inputColName.startsWith(prefix)) {
                    newColName = inputColName.replaceFirst(Pattern.quote(prefix), replacements.get(patternIdx));
                    foundPrefix = prefix;
                    foundPatternIdx = patternIdx;
                    if (patternIdx > 0)
                        // not the first list of prefixes matched (= created from pattern equalling the "flatten-by"), but
                        // less-specific patterns matched. That means that this column needs to act in a way, that the value of
                        // one input row needs to be projected to multiple rows on the output side.
                        // Example: matched: a[0], but flattened over a[*].b[*]
                        multiplicatingOutputCols.add(newColName);
                    break;
                }
            }
            if (newColName != null)
                break;
        }

        if (newColName == null) {
            // no replacement found, this column is on different path than the flattened one, do not flatten, do not
            // replace.
            newColName = inputColName;
            // At the same time, this column needs to be multiplied: One row of the input col needs to be available in
            // multiple rows in the output.
            multiplicatingOutputCols.add(newColName);
        }

        if (!newColumns.containsKey(newColName))
            newColumns.put(newColName, new TreeSet<>());

        // Add all "potentially available" input columns to the newColName. It could be that for a specific repetition, a
        // child-field is missing, e.g. a[0].c does not exist, but a[1].c does. Nevertheless, we need to reserve some
        // "space" for a[0].c in the new column a.c, because otherwise the rows of an existing a[0].d will mess up with
        // the rows of a[1].c, because a.c does contain the values of rows of a[1].c first, but a.d does contain a[0].d
        // first
        if (foundPatternIdx == -1)
            newColumns.get(newColName).add(inputColName);
        else {
            // add all eg. a[*].c as input columns, no matter if they exist or not.
            for (String inputPref : prefixesToReplace.get(foundPatternIdx))
                newColumns.get(newColName)
                        .add(inputColName.replaceFirst(Pattern.quote(foundPrefix), inputPref));
        }
    }

    logger.trace("Will flatten following columns using following input cols (limit): {}",
            Iterables.limit(newColumns.entrySet(), 100));
    logger.trace("Following columns will be multiplicating (limit): {}",
            Iterables.limit(multiplicatingOutputCols, 100));

    // prepare information of single rows:

    Map<Long, Integer> multiplicationFactorByRowId = new HashMap<>();
    // map from input col prefix to rowIds that are not available for all cols starting with that prefix.
    NavigableMap<String, NavigableSet<Long>> rowIdsNotAvailableForInputCols = new TreeMap<>();

    // number of rows that are generated for one of the prefixes created based on the flatten-by value. Example: When
    // flattening over a[*], this will contain: a[0] -> generates X rows, a[1] -> generates Y rows.
    Map<String, Integer> numberOfRowsByFlattenedPrefix = new HashMap<>();

    for (long inputRowId = inputTableShard.getLowestRowId(); inputRowId < inputTableShard.getLowestRowId()
            + inputTableShard.getNumberOfRowsInShard(); inputRowId++) {

        // find the cols of the "flatten-by" field that actually exist for this row.
        Set<List<String>> colPatterns = patterns.getColumnPatterns(inputRowId);
        Set<String> mostSpecificColPatterns = // most-specific = the flatten-by field!
                colPatterns.stream().flatMap(l -> Stream.of(l.get(0))).collect(Collectors.toSet());

        // This row will produce this many rows in the output.
        int numberOfNewRows = mostSpecificColPatterns.size();
        multiplicationFactorByRowId.put(inputRowId, numberOfNewRows);
        mostSpecificColPatterns
                .forEach(colPattern -> numberOfRowsByFlattenedPrefix.merge(colPattern, 1, Integer::sum));

        // This row might not have valid values for all those repeated cols that are available in the Table for the
        // flatten-by field. Find those columns that are missing.
        for (String notAvailableColName : Sets.difference(prefixesToReplace.get(0), mostSpecificColPatterns)) {
            if (!rowIdsNotAvailableForInputCols.containsKey(notAvailableColName))
                rowIdsNotAvailableForInputCols.put(notAvailableColName, new TreeSet<>());
            rowIdsNotAvailableForInputCols.get(notAvailableColName).add(inputRowId);
        }
    }

    logger.trace("Multiplication factors are the following for all rows (limit): {}",
            Iterables.limit(multiplicationFactorByRowId.entrySet(), 100));

    int maxMultiplicationFactor = multiplicationFactorByRowId.values().stream().mapToInt(Integer::intValue)
            .max().getAsInt();

    // Build new col shards
    List<StandardColumnShard> flattenedColShards = new ArrayList<>();
    for (String newColName : newColumns.keySet()) {
        long nextFirstRowId = inputTableShard.getLowestRowId();

        // find colType by searching an input col that exists and taking the coltype of that one.
        ColumnType colType = newColumns.get(newColName).stream()
                .filter(inputColName -> inputTableShard.getColumns().containsKey(inputColName))
                .map(inputColName -> inputTableShard.getColumns().get(inputColName).getColumnType()).findAny()
                .get();

        // Collect all the col dictionaries of the input columns:
        // map from an artificial ID to the dictionary of an input column. The artificial ID is built the following way:
        // The first dict has artificial ID 0.
        // The second dict has artificial ID = number of entries in first dict
        // The third dict has artificial ID = number of entries in second dict
        // and so on
        // -> basically every entry in the dict has it's own artificial ID. These must not be overlapping!
        // The artificial ID is defined in a way so it can be fed to #mergeDicts(.)
        Map<Long, Dictionary<?>> origColDicts = new HashMap<>();
        long nextColAndColDictId = 0L;
        for (String inputColName : newColumns.get(newColName)) {
            Dictionary<?> dict;
            if (inputTableShard.getColumns().containsKey(inputColName))
                dict = inputTableShard.getColumns().get(inputColName).getColumnShardDictionary();
            else {
                // assume we had an input col dict for this non-existing col.
                if (inputColName.endsWith(repeatedColNameGen.lengthIdentifyingSuffix()))
                    // length cols get "0" as default.
                    dict = new ConstantLongDictionary(0L);
                else
                    dict = createDictionaryWithOnlyDefaultValue(colType);
            }

            origColDicts.put(nextColAndColDictId, dict);
            nextColAndColDictId += dict.getMaxId() + 1;
        }

        // merge the input column dicts into the new column dict.
        Pair<Dictionary<?>, Map<Long, Map<Long, Long>>> mergeDictInfo = mergeDicts(newColName, colType,
                origColDicts);
        Dictionary<?> colDict = mergeDictInfo.getLeft();

        // new col pages.
        List<ColumnPage> flattenedColPages = new ArrayList<>();

        // we'll use the same counting mechanism that we used fot origColDicts.
        nextColAndColDictId = 0L;

        long[] nextPageValues = new long[ColumnShardBuilder.PROPOSAL_ROWS];
        int nextPageValueNextIdx = 0;

        // build col pages
        for (String inputColName : newColumns.get(newColName)) {
            long curColId = nextColAndColDictId;

            Map<Long, Long> columnValueIdChangeMap = mergeDictInfo.getRight().get(curColId);

            if (!inputTableShard.getColumns().containsKey(inputColName)) {
                // This col does not exist, therefore we add an "empty" colPage, which resolves statically to the colTypes'
                // default value.

                // The size of the page is identified by the number of rows that flattened prefix would have.
                int noOfRows = -1;
                for (String prefix : numberOfRowsByFlattenedPrefix.keySet()) {
                    if (inputColName.startsWith(prefix)) {
                        noOfRows = numberOfRowsByFlattenedPrefix.get(prefix);
                        break;
                    }
                }
                if (noOfRows == -1)
                    throw new IllegalStateException("Could not find number of rows for empty values.");

                for (int i = 0; i < noOfRows; i++) {
                    if (nextPageValueNextIdx == nextPageValues.length) {
                        flattenedColPages.add(
                                buildColPageFromValueArray(nextPageValues, -1, nextFirstRowId, newColName));
                        nextPageValueNextIdx = 0;
                        nextFirstRowId += nextPageValues.length;
                    }
                    nextPageValues[nextPageValueNextIdx++] = columnValueIdChangeMap.get(0L); // constant dict -> always id 0L.
                }

                nextColAndColDictId++; // single entry dict!

                continue;
            }

            Dictionary<?> colShardDict = inputTableShard.getColumns().get(inputColName)
                    .getColumnShardDictionary();
            nextColAndColDictId += colShardDict.getMaxId() + 1;

            if (multiplicatingOutputCols.contains(newColName)) {
                // decompress whole column at once, so we can access it quickly later on.
                StandardColumnShard inputCol = inputTableShard.getColumns().get(inputColName);
                Map<Long, Long[]> colValueIds = new HashMap<>();
                for (ColumnPage inputPage : inputCol.getPages().values()) {
                    long[] pageValueIds = inputPage.getValues().decompressedArray();
                    Long[] colValueIdsByRow = inputPage.getColumnPageDict()
                            .decompressValues(LongStream.of(pageValueIds).boxed().toArray(l -> new Long[l]));
                    colValueIds.put(inputPage.getFirstRowId(), colValueIdsByRow);
                }

                for (int multiplication = 0; multiplication < maxMultiplicationFactor; multiplication++)
                    for (ColumnPage inputPage : inputTableShard.getColumns().get(inputColName).getPages()
                            .values()) {
                        final int curMultiplicationNo = multiplication;
                        for (int i = 0; i < inputPage.getValues().size(); i++) {
                            Integer thisIndexMultiplicationFactor = multiplicationFactorByRowId
                                    .get(inputPage.getFirstRowId() + i);
                            if (thisIndexMultiplicationFactor == null)
                                thisIndexMultiplicationFactor = 1;

                            if (thisIndexMultiplicationFactor > curMultiplicationNo) {
                                // we need to multiplicate this row!
                                if (nextPageValueNextIdx == nextPageValues.length) {
                                    flattenedColPages.add(buildColPageFromValueArray(nextPageValues, -1,
                                            nextFirstRowId, newColName));
                                    nextPageValueNextIdx = 0;
                                    nextFirstRowId += nextPageValues.length;
                                }
                                long origColValueId = colValueIds.get(inputPage.getFirstRowId())[i];
                                nextPageValues[nextPageValueNextIdx++] = (columnValueIdChangeMap != null)
                                        ? columnValueIdChangeMap.get(origColValueId)
                                        : origColValueId;
                            }
                        }
                    }
            } else {
                for (ColumnPage inputPage : inputTableShard.getColumns().get(inputColName).getPages()
                        .values()) {
                    // decompress whole column page at once, so we can access it quickly later on.
                    long[] pageValueIds = inputPage.getValues().decompressedArray();
                    Long[] colValueIdsByRow = inputPage.getColumnPageDict()
                            .decompressValues(LongStream.of(pageValueIds).boxed().toArray(l -> new Long[l]));

                    Set<Long> sortedNotAvailableIndices;
                    String interestingPrefix = rowIdsNotAvailableForInputCols.floorKey(inputColName);
                    if (interestingPrefix != null && inputColName.startsWith(interestingPrefix)) {
                        sortedNotAvailableIndices = rowIdsNotAvailableForInputCols.get(interestingPrefix)
                                .subSet(inputPage.getFirstRowId(),
                                        inputPage.getFirstRowId() + inputPage.getValues().size());
                    } else
                        sortedNotAvailableIndices = new HashSet<>();

                    // peek next unavailable index, works because indices are sorted.
                    PeekingIterator<Long> notAvailableIndicesIt = Iterators
                            .peekingIterator(sortedNotAvailableIndices.iterator());
                    for (int i = 0; i < inputPage.getValues().size(); i++) {
                        if (notAvailableIndicesIt.hasNext()
                                && notAvailableIndicesIt.peek() == inputPage.getFirstRowId() + i) {
                            notAvailableIndicesIt.next();
                            continue;
                        }

                        if (nextPageValueNextIdx == nextPageValues.length) {
                            flattenedColPages.add(
                                    buildColPageFromValueArray(nextPageValues, -1, nextFirstRowId, newColName));
                            nextPageValueNextIdx = 0;
                            nextFirstRowId += nextPageValues.length;
                        }
                        long origColValueId = colValueIdsByRow[i];
                        nextPageValues[nextPageValueNextIdx++] = (columnValueIdChangeMap != null)
                                ? columnValueIdChangeMap.get(origColValueId)
                                : origColValueId;
                    }
                }
            }
        }

        if (nextPageValueNextIdx > 0) {
            flattenedColPages.add(buildColPageFromValueArray(nextPageValues, nextPageValueNextIdx,
                    nextFirstRowId, newColName));
            nextFirstRowId += nextPageValueNextIdx;
            nextPageValueNextIdx = 0;
        }

        NavigableMap<Long, ColumnPage> navigableFlattenedColPages = new TreeMap<>();
        for (ColumnPage flattendColPage : flattenedColPages)
            navigableFlattenedColPages.put(flattendColPage.getFirstRowId(), flattendColPage);

        StandardColumnShard flattenedColShard = null;
        switch (colType) {
        case STRING:
            flattenedColShard = columnShardFactory.createStandardStringColumnShard(newColName,
                    navigableFlattenedColPages, (StringDictionary<?>) colDict);
            break;
        case LONG:
            flattenedColShard = columnShardFactory.createStandardLongColumnShard(newColName,
                    navigableFlattenedColPages, (LongDictionary<?>) colDict);
            break;
        case DOUBLE:
            flattenedColShard = columnShardFactory.createStandardDoubleColumnShard(newColName,
                    navigableFlattenedColPages, (DoubleDictionary<?>) colDict);
            break;
        }

        flattenedColShards.add(flattenedColShard);

        logger.trace("Created flattened column {}", newColName);
    }

    TableShard flattenedTableShard = tableFactory.createDefaultTableShard(resultTableName, flattenedColShards);

    logger.trace("Created flattened table shard " + resultTableName);

    return flattenedTableShard;
}

From source file:org.nuxeo.ecm.core.storage.ExpressionEvaluator.java

protected static boolean fulltext1(Set<String> fulltext, List<String> query) {
    boolean inOr = false; // if we're in a OR group
    boolean orMatch = false; // value of the OR group
    for (PeekingIterator<String> it = Iterators.peekingIterator(query.iterator()); it.hasNext();) {
        String word = it.next();/*from   ww w  .j  ava  2  s  .c om*/
        if (it.hasNext() && it.peek().equals(OR)) {
            inOr = true;
            orMatch = false;
        }
        boolean match;
        if (word.endsWith("*") || word.endsWith("%")) {
            // prefix match
            match = false;
            String prefix = word.substring(0, word.length() - 2);
            for (String candidate : fulltext) {
                if (candidate.startsWith(prefix)) {
                    match = true;
                    break;
                }
            }
        } else {
            if (word.startsWith("-")) {
                word = word.substring(1);//
                match = !fulltext.contains(word);
            } else {
                match = fulltext.contains(word);
            }
        }
        if (inOr) {
            if (match) {
                orMatch = true;
            }
            if (it.hasNext() && it.peek().equals(OR)) {
                // swallow OR and keep going in OR group
                it.next();
                continue;
            }
            // finish OR group
            match = orMatch;
            inOr = false;
        }
        if (!match) {
            return false;
        }
    }
    if (inOr) {
        // trailing OR, ignore and finish previous group
        if (!orMatch) {
            return false;
        }
    }
    return true;
}

From source file:org.diqube.flatten.Flattener.java

/**
 * Merges multiple col dicts into one./*from  ww  w . ja v a 2 s .  c  o m*/
 * 
 * <p>
 * The input dictionaries are expected to be of type T. T must be {@link Comparable} (which though is no problem for
 * our values of String, Long, Double).
 * 
 * @param inputDicts
 *          The col dicts of the input cols, indexed by an artificial "dictionary id" which can be chosen arbitrarily.
 * @return Pair of merged dictionary and for each input dict ID a mapping map. That map maps from old col dict ID of a
 *         value to the new col dict ID in the merged dict. Map can be empty.
 */
@SuppressWarnings("unchecked")
private <T extends Comparable<T>> Pair<Dictionary<?>, Map<Long, Map<Long, Long>>> mergeDicts(String colName,
        ColumnType colType, Map<Long, Dictionary<?>> inputDicts) throws IllegalStateException {
    Map<Long, Map<Long, Long>> resMappingMap = new HashMap<>();

    if (inputDicts.size() == 1) {
        return new Pair<>(inputDicts.values().iterator().next(), resMappingMap);
    }

    Map<Long, PeekingIterator<Pair<Long, T>>> iterators = new HashMap<>();
    for (Entry<Long, Dictionary<?>> e : inputDicts.entrySet()) {
        if (e.getValue().getMaxId() == null)
            continue;
        iterators.put(e.getKey(), Iterators.peekingIterator(((Dictionary<T>) e.getValue()).iterator()));
    }

    // order the next elements of all dicts by their value.
    // Pair of (Pair of ID in dict and value) and dictId
    PriorityQueue<Pair<Pair<Long, T>, Long>> nextElements = new PriorityQueue<>(
            (p1, p2) -> p1.getLeft().getRight().compareTo(p2.getLeft().getRight()));

    for (Entry<Long, PeekingIterator<Pair<Long, T>>> e : iterators.entrySet())
        nextElements.add(new Pair<>(e.getValue().peek(), e.getKey()));

    // map from value to new ID which will be fed into the dictionary builder.
    NavigableMap<T, Long> entityMap = new TreeMap<>();
    long nextEntityId = 0L;

    Pair<T, Long> previous = null;

    // traverse all dictionaries and build mapping list
    while (!nextElements.isEmpty()) {
        Pair<Pair<Long, T>, Long> p = nextElements.poll();
        Long dictId = p.getRight();
        Pair<Long, T> valuePair = p.getLeft();

        // move iterator forward
        iterators.get(dictId).next();
        if (iterators.get(dictId).hasNext())
            nextElements.add(new Pair<>(iterators.get(dictId).peek(), dictId));

        long idInInputDict = valuePair.getLeft();
        if (previous == null || valuePair.getRight().compareTo(previous.getLeft()) > 0) {
            long resultNewId = nextEntityId++;

            entityMap.put(valuePair.getRight(), resultNewId);

            previous = new Pair<>(valuePair.getRight(), resultNewId);
        }

        if (!resMappingMap.containsKey(dictId))
            resMappingMap.put(dictId, new HashMap<>());
        resMappingMap.get(dictId).put(idInInputDict, previous.getRight());
    }

    Dictionary<?> resDict = null;
    Map<Long, Long> builderAdjustMap = null;
    switch (colType) {
    case LONG:
        CompressedLongDictionaryBuilder longBuilder = new CompressedLongDictionaryBuilder();
        longBuilder.withDictionaryName(colName).fromEntityMap((NavigableMap<Long, Long>) entityMap);
        Pair<LongDictionary<?>, Map<Long, Long>> longPair = longBuilder.build();
        builderAdjustMap = longPair.getRight();
        resDict = longPair.getLeft();
        break;
    case STRING:
        CompressedStringDictionaryBuilder stringBuilder = new CompressedStringDictionaryBuilder();
        stringBuilder.fromEntityMap((NavigableMap<String, Long>) entityMap);
        Pair<StringDictionary<?>, Map<Long, Long>> stringPair = stringBuilder.build();
        builderAdjustMap = stringPair.getRight();
        resDict = stringPair.getLeft();
        break;
    case DOUBLE:
        CompressedDoubleDictionaryBuilder doubleBuilder = new CompressedDoubleDictionaryBuilder();
        doubleBuilder.fromEntityMap((NavigableMap<Double, Long>) entityMap);
        Pair<DoubleDictionary<?>, Map<Long, Long>> doublePair = doubleBuilder.build();
        builderAdjustMap = doublePair.getRight();
        resDict = doublePair.getLeft();
        break;
    }

    if (!builderAdjustMap.isEmpty())
        throw new IllegalStateException(
                "IDs of new col dict for col " + colName + " were adjusted although that was not expected!");

    return new Pair<Dictionary<?>, Map<Long, Map<Long, Long>>>(resDict, resMappingMap);
}

From source file:com.google.googlejavaformat.java.JavaInputAstVisitor.java

@Override
public Void visitForLoop(ForLoopTree node, Void unused) {
    sync(node);/*from w ww  .  j a v a2s  .c o  m*/
    token("for");
    builder.space();
    token("(");
    builder.open(plusFour);
    builder.open(node.getInitializer().size() > 1
            && node.getInitializer().get(0).getKind() == Tree.Kind.EXPRESSION_STATEMENT ? plusFour : ZERO);
    if (!node.getInitializer().isEmpty()) {
        if (node.getInitializer().get(0).getKind() == VARIABLE) {
            PeekingIterator<StatementTree> it = Iterators.peekingIterator(node.getInitializer().iterator());
            visitVariables(variableFragments(it, it.next()), DeclarationKind.NONE, Direction.HORIZONTAL);
        } else {
            boolean first = true;
            builder.open(ZERO);
            for (StatementTree t : node.getInitializer()) {
                if (!first) {
                    token(",");
                    builder.breakOp(" ");
                }
                scan(((ExpressionStatementTree) t).getExpression(), null);
                first = false;
            }
            token(";");
            builder.close();
        }
    } else {
        token(";");
    }
    builder.close();
    builder.breakOp(" ");
    if (node.getCondition() != null) {
        scan(node.getCondition(), null);
    }
    token(";");
    if (!node.getUpdate().isEmpty()) {
        builder.breakOp(" ");
        builder.open(node.getUpdate().size() <= 1 ? ZERO : plusFour);
        boolean firstUpdater = true;
        for (ExpressionStatementTree updater : node.getUpdate()) {
            if (!firstUpdater) {
                token(",");
                builder.breakToFill(" ");
            }
            scan(updater.getExpression(), null);
            firstUpdater = false;
        }
        builder.guessToken(";");
        builder.close();
    } else {
        builder.space();
    }
    builder.close();
    token(")");
    visitStatement(node.getStatement(), CollapseEmptyOrNot.YES, AllowLeadingBlankLine.YES,
            AllowTrailingBlankLine.NO);
    return null;
}

From source file:org.bitcoinj_extra.wallet.DeterministicKeyChain.java

/**
 * Returns all the key chains found in the given list of keys. Typically there will only be one, but in the case of
 * key rotation it can happen that there are multiple chains found.
 *///w  w w.  j a v a 2  s .  c om
public static List<DeterministicKeyChain> fromProtobuf(List<Protos.Key> keys, @Nullable KeyCrypter crypter,
        KeyChainFactory factory) throws UnreadableWalletException {
    List<DeterministicKeyChain> chains = newLinkedList();
    DeterministicSeed seed = null;
    DeterministicKeyChain chain = null;

    int lookaheadSize = -1;
    int sigsRequiredToSpend = 1;

    PeekingIterator<Protos.Key> iter = Iterators.peekingIterator(keys.iterator());
    while (iter.hasNext()) {
        Protos.Key key = iter.next();
        final Protos.Key.Type t = key.getType();
        if (t == Protos.Key.Type.DETERMINISTIC_MNEMONIC) {
            if (chain != null) {
                checkState(lookaheadSize >= 0);
                chain.setLookaheadSize(lookaheadSize);
                chain.setSigsRequiredToSpend(sigsRequiredToSpend);
                chain.maybeLookAhead();
                chains.add(chain);
                chain = null;
            }
            long timestamp = key.getCreationTimestamp() / 1000;
            String passphrase = DEFAULT_PASSPHRASE_FOR_MNEMONIC; // FIXME allow non-empty passphrase
            if (key.hasSecretBytes()) {
                if (key.hasEncryptedDeterministicSeed())
                    throw new UnreadableWalletException("Malformed key proto: " + key.toString());
                byte[] seedBytes = null;
                if (key.hasDeterministicSeed()) {
                    seedBytes = key.getDeterministicSeed().toByteArray();
                }
                seed = new DeterministicSeed(key.getSecretBytes().toStringUtf8(), seedBytes, passphrase,
                        timestamp);
            } else if (key.hasEncryptedData()) {
                if (key.hasDeterministicSeed())
                    throw new UnreadableWalletException("Malformed key proto: " + key.toString());
                EncryptedData data = new EncryptedData(
                        key.getEncryptedData().getInitialisationVector().toByteArray(),
                        key.getEncryptedData().getEncryptedPrivateKey().toByteArray());
                EncryptedData encryptedSeedBytes = null;
                if (key.hasEncryptedDeterministicSeed()) {
                    Protos.EncryptedData encryptedSeed = key.getEncryptedDeterministicSeed();
                    encryptedSeedBytes = new EncryptedData(
                            encryptedSeed.getInitialisationVector().toByteArray(),
                            encryptedSeed.getEncryptedPrivateKey().toByteArray());
                }
                seed = new DeterministicSeed(data, encryptedSeedBytes, timestamp);
            } else {
                throw new UnreadableWalletException("Malformed key proto: " + key.toString());
            }
            if (log.isDebugEnabled())
                log.debug("Deserializing: DETERMINISTIC_MNEMONIC: {}", seed);
        } else if (t == Protos.Key.Type.DETERMINISTIC_KEY) {
            if (!key.hasDeterministicKey())
                throw new UnreadableWalletException("Deterministic key missing extra data: " + key.toString());
            byte[] chainCode = key.getDeterministicKey().getChainCode().toByteArray();
            // Deserialize the path through the tree.
            LinkedList<ChildNumber> path = newLinkedList();
            for (int i : key.getDeterministicKey().getPathList())
                path.add(new ChildNumber(i));
            // Deserialize the public key and path.
            LazyECPoint pubkey = new LazyECPoint(ECKey.CURVE.getCurve(), key.getPublicKey().toByteArray());
            final ImmutableList<ChildNumber> immutablePath = ImmutableList.copyOf(path);
            // Possibly create the chain, if we didn't already do so yet.
            boolean isWatchingAccountKey = false;
            boolean isFollowingKey = false;
            // save previous chain if any if the key is marked as following. Current key and the next ones are to be
            // placed in new following key chain
            if (key.getDeterministicKey().getIsFollowing()) {
                if (chain != null) {
                    checkState(lookaheadSize >= 0);
                    chain.setLookaheadSize(lookaheadSize);
                    chain.setSigsRequiredToSpend(sigsRequiredToSpend);
                    chain.maybeLookAhead();
                    chains.add(chain);
                    chain = null;
                    seed = null;
                }
                isFollowingKey = true;
            }
            if (chain == null) {
                // If this is not a following chain and previous was, this must be married
                boolean isMarried = !isFollowingKey && !chains.isEmpty()
                        && chains.get(chains.size() - 1).isFollowing();
                if (seed == null) {
                    DeterministicKey accountKey = new DeterministicKey(immutablePath, chainCode, pubkey, null,
                            null);
                    accountKey.setCreationTimeSeconds(key.getCreationTimestamp() / 1000);
                    chain = factory.makeWatchingKeyChain(key, iter.peek(), accountKey, isFollowingKey,
                            isMarried);
                    isWatchingAccountKey = true;
                } else {
                    chain = factory.makeKeyChain(key, iter.peek(), seed, crypter, isMarried);
                    chain.lookaheadSize = LAZY_CALCULATE_LOOKAHEAD;
                    // If the seed is encrypted, then the chain is incomplete at this point. However, we will load
                    // it up below as we parse in the keys. We just need to check at the end that we've loaded
                    // everything afterwards.
                }
            }
            // Find the parent key assuming this is not the root key, and not an account key for a watching chain.
            DeterministicKey parent = null;
            if (!path.isEmpty() && !isWatchingAccountKey) {
                ChildNumber index = path.removeLast();
                parent = chain.hierarchy.get(path, false, false);
                path.add(index);
            }
            DeterministicKey detkey;
            if (key.hasSecretBytes()) {
                // Not encrypted: private key is available.
                final BigInteger priv = new BigInteger(1, key.getSecretBytes().toByteArray());
                detkey = new DeterministicKey(immutablePath, chainCode, pubkey, priv, parent);
            } else {
                if (key.hasEncryptedData()) {
                    Protos.EncryptedData proto = key.getEncryptedData();
                    EncryptedData data = new EncryptedData(proto.getInitialisationVector().toByteArray(),
                            proto.getEncryptedPrivateKey().toByteArray());
                    checkNotNull(crypter, "Encountered an encrypted key but no key crypter provided");
                    detkey = new DeterministicKey(immutablePath, chainCode, crypter, pubkey, data, parent);
                } else {
                    // No secret key bytes and key is not encrypted: either a watching key or private key bytes
                    // will be rederived on the fly from the parent.
                    detkey = new DeterministicKey(immutablePath, chainCode, pubkey, null, parent);
                }
            }
            if (key.hasCreationTimestamp())
                detkey.setCreationTimeSeconds(key.getCreationTimestamp() / 1000);
            if (log.isDebugEnabled())
                log.debug("Deserializing: DETERMINISTIC_KEY: {}", detkey);
            if (!isWatchingAccountKey) {
                // If the non-encrypted case, the non-leaf keys (account, internal, external) have already
                // been rederived and inserted at this point. In the encrypted case though,
                // we can't rederive and we must reinsert, potentially building the heirarchy object
                // if need be.
                if (path.size() == 0) {
                    // Master key.
                    if (chain.rootKey == null) {
                        chain.rootKey = detkey;
                        chain.hierarchy = new DeterministicHierarchy(detkey);
                    }
                } else if (path.size() == chain.getAccountPath().size() + 1) {
                    if (detkey.getChildNumber().num() == 0) {
                        chain.externalParentKey = detkey;
                        chain.issuedExternalKeys = key.getDeterministicKey().getIssuedSubkeys();
                        lookaheadSize = Math.max(lookaheadSize, key.getDeterministicKey().getLookaheadSize());
                        sigsRequiredToSpend = key.getDeterministicKey().getSigsRequiredToSpend();
                    } else if (detkey.getChildNumber().num() == 1) {
                        chain.internalParentKey = detkey;
                        chain.issuedInternalKeys = key.getDeterministicKey().getIssuedSubkeys();
                    }
                }
            }
            chain.hierarchy.putKey(detkey);
            chain.basicKeyChain.importKey(detkey);
        }
    }
    if (chain != null) {
        checkState(lookaheadSize >= 0);
        chain.setLookaheadSize(lookaheadSize);
        chain.setSigsRequiredToSpend(sigsRequiredToSpend);
        chain.maybeLookAhead();
        chains.add(chain);
    }
    return chains;
}

From source file:org.bitcoinj.wallet.DeterministicKeyChain.java

/**
 * Returns all the key chains found in the given list of keys. Typically there will only be one, but in the case of
 * key rotation it can happen that there are multiple chains found.
 *///ww w.ja  va 2  s.  co  m
public static List<DeterministicKeyChain> fromProtobuf(List<Protos.Key> keys, @Nullable KeyCrypter crypter,
        KeyChainFactory factory) throws UnreadableWalletException {
    List<DeterministicKeyChain> chains = newLinkedList();
    DeterministicSeed seed = null;
    DeterministicKeyChain chain = null;

    int lookaheadSize = -1;
    int sigsRequiredToSpend = 1;

    List<ChildNumber> accountPath = newArrayList();
    PeekingIterator<Protos.Key> iter = Iterators.peekingIterator(keys.iterator());
    while (iter.hasNext()) {
        Protos.Key key = iter.next();
        final Protos.Key.Type t = key.getType();
        if (t == Protos.Key.Type.DETERMINISTIC_MNEMONIC) {
            accountPath = newArrayList();
            for (int i : key.getAccountPathList()) {
                accountPath.add(new ChildNumber(i));
            }
            if (accountPath.isEmpty())
                accountPath = ACCOUNT_ZERO_PATH;
            if (chain != null) {
                checkState(lookaheadSize >= 0);
                chain.setLookaheadSize(lookaheadSize);
                chain.setSigsRequiredToSpend(sigsRequiredToSpend);
                chain.maybeLookAhead();
                chains.add(chain);
                chain = null;
            }
            long timestamp = key.getCreationTimestamp() / 1000;
            String passphrase = DEFAULT_PASSPHRASE_FOR_MNEMONIC; // FIXME allow non-empty passphrase
            if (key.hasSecretBytes()) {
                if (key.hasEncryptedDeterministicSeed())
                    throw new UnreadableWalletException("Malformed key proto: " + key.toString());
                byte[] seedBytes = null;
                if (key.hasDeterministicSeed()) {
                    seedBytes = key.getDeterministicSeed().toByteArray();
                }
                seed = new DeterministicSeed(key.getSecretBytes().toStringUtf8(), seedBytes, passphrase,
                        timestamp);
            } else if (key.hasEncryptedData()) {
                if (key.hasDeterministicSeed())
                    throw new UnreadableWalletException("Malformed key proto: " + key.toString());
                EncryptedData data = new EncryptedData(
                        key.getEncryptedData().getInitialisationVector().toByteArray(),
                        key.getEncryptedData().getEncryptedPrivateKey().toByteArray());
                EncryptedData encryptedSeedBytes = null;
                if (key.hasEncryptedDeterministicSeed()) {
                    Protos.EncryptedData encryptedSeed = key.getEncryptedDeterministicSeed();
                    encryptedSeedBytes = new EncryptedData(
                            encryptedSeed.getInitialisationVector().toByteArray(),
                            encryptedSeed.getEncryptedPrivateKey().toByteArray());
                }
                seed = new DeterministicSeed(data, encryptedSeedBytes, timestamp);
            } else {
                throw new UnreadableWalletException("Malformed key proto: " + key.toString());
            }
            if (log.isDebugEnabled())
                log.debug("Deserializing: DETERMINISTIC_MNEMONIC: {}", seed);
        } else if (t == Protos.Key.Type.DETERMINISTIC_KEY) {
            if (!key.hasDeterministicKey())
                throw new UnreadableWalletException("Deterministic key missing extra data: " + key.toString());
            byte[] chainCode = key.getDeterministicKey().getChainCode().toByteArray();
            // Deserialize the path through the tree.
            LinkedList<ChildNumber> path = newLinkedList();
            for (int i : key.getDeterministicKey().getPathList())
                path.add(new ChildNumber(i));
            // Deserialize the public key and path.
            LazyECPoint pubkey = new LazyECPoint(ECKey.CURVE.getCurve(), key.getPublicKey().toByteArray());
            final ImmutableList<ChildNumber> immutablePath = ImmutableList.copyOf(path);
            // Possibly create the chain, if we didn't already do so yet.
            boolean isWatchingAccountKey = false;
            boolean isFollowingKey = false;
            // save previous chain if any if the key is marked as following. Current key and the next ones are to be
            // placed in new following key chain
            if (key.getDeterministicKey().getIsFollowing()) {
                if (chain != null) {
                    checkState(lookaheadSize >= 0);
                    chain.setLookaheadSize(lookaheadSize);
                    chain.setSigsRequiredToSpend(sigsRequiredToSpend);
                    chain.maybeLookAhead();
                    chains.add(chain);
                    chain = null;
                    seed = null;
                }
                isFollowingKey = true;
            }
            if (chain == null) {
                // If this is not a following chain and previous was, this must be married
                boolean isMarried = !isFollowingKey && !chains.isEmpty()
                        && chains.get(chains.size() - 1).isFollowing();
                if (seed == null) {
                    DeterministicKey accountKey = new DeterministicKey(immutablePath, chainCode, pubkey, null,
                            null);
                    accountKey.setCreationTimeSeconds(key.getCreationTimestamp() / 1000);
                    chain = factory.makeWatchingKeyChain(key, iter.peek(), accountKey, isFollowingKey,
                            isMarried);
                    isWatchingAccountKey = true;
                } else {
                    chain = factory.makeKeyChain(key, iter.peek(), seed, crypter, isMarried,
                            ImmutableList.<ChildNumber>builder().addAll(accountPath).build());
                    chain.lookaheadSize = LAZY_CALCULATE_LOOKAHEAD;
                    // If the seed is encrypted, then the chain is incomplete at this point. However, we will load
                    // it up below as we parse in the keys. We just need to check at the end that we've loaded
                    // everything afterwards.
                }
            }
            // Find the parent key assuming this is not the root key, and not an account key for a watching chain.
            DeterministicKey parent = null;
            if (!path.isEmpty() && !isWatchingAccountKey) {
                ChildNumber index = path.removeLast();
                parent = chain.hierarchy.get(path, false, false);
                path.add(index);
            }
            DeterministicKey detkey;
            if (key.hasSecretBytes()) {
                // Not encrypted: private key is available.
                final BigInteger priv = new BigInteger(1, key.getSecretBytes().toByteArray());
                detkey = new DeterministicKey(immutablePath, chainCode, pubkey, priv, parent);
            } else {
                if (key.hasEncryptedData()) {
                    Protos.EncryptedData proto = key.getEncryptedData();
                    EncryptedData data = new EncryptedData(proto.getInitialisationVector().toByteArray(),
                            proto.getEncryptedPrivateKey().toByteArray());
                    checkNotNull(crypter, "Encountered an encrypted key but no key crypter provided");
                    detkey = new DeterministicKey(immutablePath, chainCode, crypter, pubkey, data, parent);
                } else {
                    // No secret key bytes and key is not encrypted: either a watching key or private key bytes
                    // will be rederived on the fly from the parent.
                    detkey = new DeterministicKey(immutablePath, chainCode, pubkey, null, parent);
                }
            }
            if (key.hasCreationTimestamp())
                detkey.setCreationTimeSeconds(key.getCreationTimestamp() / 1000);
            if (log.isDebugEnabled())
                log.debug("Deserializing: DETERMINISTIC_KEY: {}", detkey);
            if (!isWatchingAccountKey) {
                // If the non-encrypted case, the non-leaf keys (account, internal, external) have already
                // been rederived and inserted at this point. In the encrypted case though,
                // we can't rederive and we must reinsert, potentially building the hierarchy object
                // if need be.
                if (path.isEmpty()) {
                    // Master key.
                    if (chain.rootKey == null) {
                        chain.rootKey = detkey;
                        chain.hierarchy = new DeterministicHierarchy(detkey);
                    }
                } else if (path.size() == chain.getAccountPath().size() + 1) {
                    // Constant 0 is used for external chain and constant 1 for internal chain
                    // (also known as change addresses). https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
                    if (detkey.getChildNumber().num() == 0) {
                        // External chain is used for addresses that are meant to be visible outside of the wallet
                        // (e.g. for receiving payments).
                        chain.externalParentKey = detkey;
                        chain.issuedExternalKeys = key.getDeterministicKey().getIssuedSubkeys();
                        lookaheadSize = Math.max(lookaheadSize, key.getDeterministicKey().getLookaheadSize());
                        sigsRequiredToSpend = key.getDeterministicKey().getSigsRequiredToSpend();
                    } else if (detkey.getChildNumber().num() == 1) {
                        // Internal chain is used for addresses which are not meant to be visible outside of the
                        // wallet and is used for return transaction change.
                        chain.internalParentKey = detkey;
                        chain.issuedInternalKeys = key.getDeterministicKey().getIssuedSubkeys();
                    }
                }
            }
            chain.hierarchy.putKey(detkey);
            chain.basicKeyChain.importKey(detkey);
        }
    }
    if (chain != null) {
        checkState(lookaheadSize >= 0);
        chain.setLookaheadSize(lookaheadSize);
        chain.setSigsRequiredToSpend(sigsRequiredToSpend);
        chain.maybeLookAhead();
        chains.add(chain);
    }
    return chains;
}