Example usage for com.fasterxml.jackson.core JsonParser getFloatValue

List of usage examples for com.fasterxml.jackson.core JsonParser getFloatValue

Introduction

In this page you can find the example usage for com.fasterxml.jackson.core JsonParser getFloatValue.

Prototype

public abstract float getFloatValue() throws IOException, JsonParseException;

Source Link

Document

Numeric accessor that can be called when the current token is of type JsonToken#VALUE_NUMBER_FLOAT and it can be expressed as a Java float primitive type.

Usage

From source file:com.netflix.hollow.jsonadapter.HollowJsonAdapter.java

private void addObjectField(JsonParser parser, JsonToken token, ObjectMappedFieldPath mappedFieldPath)
        throws IOException {
    if (mappedFieldPath == null) {
        skipObjectField(parser, token);//from  ww w . j av  a2  s .co m
    } else {

        HollowObjectWriteRecord writeRec = mappedFieldPath.getWriteRecord();
        HollowObjectSchema schema = writeRec.getSchema();
        String fieldName = mappedFieldPath.getFieldName();
        int fieldPosition = mappedFieldPath.getFieldPosition();

        FieldProcessor processor = mappedFieldPath.getFieldProcessor();
        if (processor != null && token != JsonToken.VALUE_NULL) {
            processor.processField(parser, stateEngine, writeRec);
            return;
        }

        switch (token) {
        case START_ARRAY:
        case START_OBJECT:
            int refOrdinal = parseSubType(parser, token, schema.getReferencedType(fieldPosition));
            writeRec.setReference(fieldName, refOrdinal);
            break;
        case VALUE_FALSE:
        case VALUE_TRUE:
        case VALUE_NUMBER_INT:
        case VALUE_NUMBER_FLOAT:
        case VALUE_STRING:
            switch (schema.getFieldType(fieldPosition)) {
            case BOOLEAN:
                writeRec.setBoolean(fieldName, parser.getBooleanValue());
                break;
            case INT:
                writeRec.setInt(fieldName, parser.getIntValue());
                break;
            case LONG:
                writeRec.setLong(fieldName, parser.getLongValue());
                break;
            case DOUBLE:
                writeRec.setDouble(fieldName, parser.getDoubleValue());
                break;
            case FLOAT:
                writeRec.setFloat(fieldName, parser.getFloatValue());
                break;
            case STRING:
                writeRec.setString(fieldName, parser.getValueAsString());
                break;
            case REFERENCE:
                HollowObjectWriteRecord referencedRec = (HollowObjectWriteRecord) getWriteRecord(
                        schema.getReferencedType(fieldPosition));
                referencedRec.reset();
                String refFieldName = referencedRec.getSchema().getFieldName(0);
                switch (referencedRec.getSchema().getFieldType(0)) {
                case BOOLEAN:
                    referencedRec.setBoolean(refFieldName, parser.getBooleanValue());
                    break;
                case INT:
                    referencedRec.setInt(refFieldName, parser.getIntValue());
                    break;
                case LONG:
                    referencedRec.setLong(refFieldName, parser.getLongValue());
                    break;
                case DOUBLE:
                    referencedRec.setDouble(refFieldName, parser.getDoubleValue());
                    break;
                case FLOAT:
                    referencedRec.setFloat(refFieldName, parser.getFloatValue());
                    break;
                case STRING:
                    referencedRec.setString(refFieldName, parser.getValueAsString());
                    break;
                default:
                }

                int referencedOrdinal = stateEngine.add(schema.getReferencedType(fieldPosition), referencedRec);
                writeRec.setReference(fieldName, referencedOrdinal);
                break;
            default:
            }
        case VALUE_NULL:
            break;
        default:
        }
    }
}

From source file:com.netflix.hollow.jsonadapter.HollowJsonAdapterPrimaryKeyFinder.java

private void addObjectField(JsonParser parser, JsonToken token, HollowObjectSchema schema, String fieldName,
        StringBuilder currentFieldPath) throws IOException {
    if (token != JsonToken.FIELD_NAME) {
        int fieldPosition = schema.getPosition(fieldName);

        if (fieldPosition == -1) {
            skipObjectField(parser, token);
        } else {//from  w  w  w.j a  v a 2s. c  om
            int parentFieldPathLength = currentFieldPath.length();
            if (parentFieldPathLength > 0)
                currentFieldPath.append(".");
            currentFieldPath.append(fieldName);
            Integer keyFieldPosition = keyFieldPathPositions.get(currentFieldPath.toString());

            switch (token) {
            case START_ARRAY:
                skipSubArray(parser);
                break;
            case START_OBJECT:
                String referencedType = schema.getReferencedType(fieldName);
                HollowSchema referencedSchema = hollowSchemas.get(referencedType);

                if (referencedSchema.getSchemaType() == SchemaType.OBJECT)
                    addObject(parser, (HollowObjectSchema) referencedSchema, currentFieldPath);
                else
                    skipObject(parser);

                break;
            case VALUE_FALSE:
            case VALUE_TRUE:
            case VALUE_NUMBER_INT:
            case VALUE_NUMBER_FLOAT:
            case VALUE_STRING:
                switch (schema.getFieldType(fieldPosition)) {
                case BOOLEAN:
                    if (keyFieldPosition != null)
                        keyElementArray[keyFieldPosition.intValue()] = Boolean
                                .valueOf(parser.getBooleanValue());
                    break;
                case INT:
                    if (keyFieldPosition != null)
                        keyElementArray[keyFieldPosition.intValue()] = Integer.valueOf(parser.getIntValue());
                    break;
                case LONG:
                    if (keyFieldPosition != null)
                        keyElementArray[keyFieldPosition.intValue()] = Long.valueOf(parser.getLongValue());
                    break;
                case DOUBLE:
                    if (keyFieldPosition != null)
                        keyElementArray[keyFieldPosition.intValue()] = Double.valueOf(parser.getDoubleValue());
                    break;
                case FLOAT:
                    if (keyFieldPosition != null)
                        keyElementArray[keyFieldPosition.intValue()] = Float.valueOf(parser.getFloatValue());
                    break;
                case STRING:
                    if (keyFieldPosition != null)
                        keyElementArray[keyFieldPosition.intValue()] = parser.getValueAsString();
                    break;
                case REFERENCE:
                    if (keyFieldPosition != null)
                        throw new IllegalStateException("Key elements must not be REFERENCE");
                    HollowObjectSchema subSchema = (HollowObjectSchema) hollowSchemas
                            .get(schema.getReferencedType(fieldPosition));
                    currentFieldPath.append(".").append(subSchema.getFieldName(0));
                    keyFieldPosition = keyFieldPathPositions.get(currentFieldPath.toString());
                    if (keyFieldPosition != null) {
                        switch (subSchema.getFieldType(0)) {
                        case BOOLEAN:
                            if (keyFieldPosition != null)
                                keyElementArray[keyFieldPosition.intValue()] = Boolean
                                        .valueOf(parser.getBooleanValue());
                            break;
                        case INT:
                            if (keyFieldPosition != null)
                                keyElementArray[keyFieldPosition.intValue()] = Integer
                                        .valueOf(parser.getIntValue());
                            break;
                        case LONG:
                            if (keyFieldPosition != null)
                                keyElementArray[keyFieldPosition.intValue()] = Long
                                        .valueOf(parser.getLongValue());
                            break;
                        case DOUBLE:
                            if (keyFieldPosition != null)
                                keyElementArray[keyFieldPosition.intValue()] = Double
                                        .valueOf(parser.getDoubleValue());
                            break;
                        case FLOAT:
                            if (keyFieldPosition != null)
                                keyElementArray[keyFieldPosition.intValue()] = Float
                                        .valueOf(parser.getFloatValue());
                            break;
                        case STRING:
                            if (keyFieldPosition != null)
                                keyElementArray[keyFieldPosition.intValue()] = parser.getValueAsString();
                            break;
                        case REFERENCE:
                            throw new IllegalStateException("Key elements must not be REFERENCE");
                        default:
                        }
                    }

                default:
                }
            case VALUE_NULL:
                break;
            default:
            }

            currentFieldPath.setLength(parentFieldPathLength);
        }
    }
}

From source file:eu.project.ttc.models.index.JsonTermIndexIO.java

/**
 * Loads the json-serialized term index into the param {@link TermIndex} object.
 * /*from  www .ja  v a  2  s .  c  o m*/
 * @param reader
 * @param options
 *          The deserialization {@link IOOptions}.
 * @return
 * @throws JsonParseException
 * @throws IOException
 */
public static TermIndex load(Reader reader, JsonOptions options) throws JsonParseException, IOException {
    TermIndex termIndex = null;
    JsonFactory jsonFactory = new JsonFactory();
    JsonParser jp = jsonFactory.createParser(reader); // or Stream, Reader
    jp.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES);
    jp.enable(JsonParser.Feature.STRICT_DUPLICATE_DETECTION);
    String fieldname;
    String compLemma = null;
    int fileSource = -1;
    String wordLemma = null;
    String syntacticLabel = null;
    int begin = -1;
    int end = -1;
    int nbWordAnnos = -1;
    int nbSpottedTerms = -1;
    Term b;
    Term v;
    String text;
    String base;
    String variant;
    //      String rule;
    String infoToken;
    String variantType;
    double variantScore;

    Map<Integer, String> inputSources = Maps.newTreeMap();

    Map<Integer, List<TempVecEntry>> contextVectors = Maps.newHashMap();

    OccurrenceStore occurrenceStore = null;

    // useful var for debug
    JsonToken tok;

    while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {

        fieldname = jp.getCurrentName();
        if (METADATA.equals(fieldname)) {
            jp.nextToken();
            String termIndexName = null;
            Lang lang = null;
            String corpusID = null;
            String occurrenceStorage = null;
            String occurrenceStoreURI = null;

            while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                fieldname = jp.getCurrentName();
                if (LANG.equals(fieldname)) {
                    lang = Lang.forName(jp.nextTextValue());
                } else if (NAME.equals(fieldname)) {
                    termIndexName = jp.nextTextValue();
                } else if (NB_WORD_ANNOTATIONS.equals(fieldname)) {
                    nbWordAnnos = jp.nextIntValue(-1);
                } else if (NB_SPOTTED_TERMS.equals(fieldname)) {
                    nbSpottedTerms = jp.nextIntValue(-1);
                } else if (CORPUS_ID.equals(fieldname)) {
                    corpusID = jp.nextTextValue();
                } else if (OCCURRENCE_STORAGE.equals(fieldname)) {
                    occurrenceStorage = jp.nextTextValue();
                } else if (OCCURRENCE_MONGODB_STORE_URI.equals(fieldname)) {
                    occurrenceStoreURI = jp.nextTextValue();
                }
            }
            Preconditions.checkState(lang != null, "The property meta.lang must be defined");
            Preconditions.checkState(termIndexName != null, "The property meta.name must be defined");

            if (occurrenceStorage != null && occurrenceStorage.equals(OCCURRENCE_STORAGE_MONGODB)) {
                Preconditions.checkNotNull(occurrenceStoreURI,
                        "Missing attribute " + OCCURRENCE_MONGODB_STORE_URI);
                occurrenceStore = new MongoDBOccurrenceStore(occurrenceStoreURI, OccurrenceStore.State.INDEXED);
            } else
                occurrenceStore = new MemoryOccurrenceStore();

            termIndex = new MemoryTermIndex(termIndexName, lang, occurrenceStore);
            if (corpusID != null)
                termIndex.setCorpusId(corpusID);
            if (nbWordAnnos != -1)
                termIndex.setWordAnnotationsNum(nbWordAnnos);
            if (nbSpottedTerms != -1)
                termIndex.setSpottedTermsNum(nbSpottedTerms);

            if (options.isMetadataOnly())
                return termIndex;

        } else if (WORDS.equals(fieldname)) {
            jp.nextToken();
            while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) {
                WordBuilder wordBuilder = WordBuilder.start();
                while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                    fieldname = jp.getCurrentName();
                    if (LEMMA.equals(fieldname))
                        wordBuilder.setLemma(jp.nextTextValue());
                    else if (COMPOUND_TYPE.equals(fieldname))
                        wordBuilder.setCompoundType(CompoundType.fromName(jp.nextTextValue()));
                    else if (STEM.equals(fieldname))
                        wordBuilder.setStem(jp.nextTextValue());
                    else if (COMPONENTS.equals(fieldname)) {
                        while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) {
                            while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                                fieldname = jp.getCurrentName();
                                if (LEMMA.equals(fieldname))
                                    compLemma = jp.nextTextValue();
                                else if (BEGIN.equals(fieldname))
                                    begin = jp.nextIntValue(-2);
                                else if (END.equals(fieldname))
                                    end = jp.nextIntValue(-2);
                            }
                            wordBuilder.addComponent(begin, end, compLemma);
                        }
                    }
                }
                try {
                    termIndex.addWord(wordBuilder.create());
                } catch (Exception e) {
                    LOGGER.error("Could not add word " + wordBuilder.getLemma() + " to term index", e);
                    LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex");
                }
            }
        } else if (TERMS.equals(fieldname)) {
            jp.nextToken();
            while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) {
                TermBuilder builder = TermBuilder.start(termIndex);
                List<TempVecEntry> currentContextVector = Lists.newArrayList();
                int currentTermId = -1;
                while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                    fieldname = jp.getCurrentName();
                    if (GROUPING_KEY.equals(fieldname))
                        builder.setGroupingKey(jp.nextTextValue());
                    else if (SPOTTING_RULE.equals(fieldname))
                        builder.setSpottingRule(jp.nextTextValue());
                    else if (ID.equals(fieldname)) {
                        currentTermId = jp.nextIntValue(-2);
                        builder.setId(currentTermId);
                    } else if (RANK.equals(fieldname)) {
                        builder.setRank(jp.nextIntValue(-1));
                    } else if (FREQUENCY.equals(fieldname)) {
                        builder.setFrequency(jp.nextIntValue(-1));
                    } else {
                        if (FREQ_NORM.equals(fieldname)) {
                            jp.nextToken();
                            builder.setFrequencyNorm((double) jp.getFloatValue());
                        } else if (SPECIFICITY.equals(fieldname)) {
                            jp.nextToken();
                            builder.setSpecificity((double) jp.getDoubleValue());
                        } else if (GENERAL_FREQ_NORM.equals(fieldname)) {
                            jp.nextToken();
                            builder.setGeneralFrequencyNorm((double) jp.getFloatValue());
                        } else if (WORDS.equals(fieldname)) {
                            while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) {
                                wordLemma = null;
                                syntacticLabel = null;
                                while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                                    fieldname = jp.getCurrentName();
                                    if (LEMMA.equals(fieldname))
                                        wordLemma = jp.nextTextValue();
                                    else if (SYN.equals(fieldname))
                                        syntacticLabel = jp.nextTextValue();
                                }
                                Preconditions.checkArgument(wordLemma != null, MSG_EXPECT_PROP_FOR_TERM_WORD,
                                        LEMMA);
                                Preconditions.checkArgument(syntacticLabel != null,
                                        MSG_EXPECT_PROP_FOR_TERM_WORD, SYN);
                                builder.addWord(termIndex.getWord(wordLemma), syntacticLabel);
                            } // end words

                        } else if (OCCURRENCES.equals(fieldname)) {
                            tok = jp.nextToken();
                            if (tok == JsonToken.START_ARRAY) {

                                while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) {
                                    begin = -1;
                                    end = -1;
                                    fileSource = -1;
                                    text = null;
                                    while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                                        fieldname = jp.getCurrentName();
                                        if (BEGIN.equals(fieldname))
                                            begin = jp.nextIntValue(-1);
                                        else if (TEXT.equals(fieldname))
                                            text = jp.nextTextValue();
                                        else if (END.equals(fieldname))
                                            end = jp.nextIntValue(-1);
                                        else if (FILE.equals(fieldname)) {
                                            fileSource = jp.nextIntValue(-1);
                                        }
                                    }

                                    Preconditions.checkArgument(begin != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE,
                                            BEGIN);
                                    Preconditions.checkArgument(end != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, END);
                                    Preconditions.checkArgument(fileSource != -1,
                                            MSG_EXPECT_PROP_FOR_OCCURRENCE, FILE);
                                    Preconditions.checkNotNull(inputSources.get(fileSource),
                                            "No file source with id: %s", fileSource);
                                    Preconditions.checkNotNull(text, MSG_EXPECT_PROP_FOR_OCCURRENCE, TEXT);
                                    if (occurrenceStore.getStoreType() == OccurrenceStore.Type.MEMORY)
                                        builder.addOccurrence(begin, end,
                                                termIndex.getDocument(inputSources.get(fileSource)), text);
                                }
                            }
                            // end occurrences
                        } else if (CONTEXT.equals(fieldname)) {
                            @SuppressWarnings("unused")
                            int totalCooccs = 0;
                            while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                                fieldname = jp.getCurrentName();
                                if (TOTAL_COOCCURRENCES.equals(fieldname))
                                    /*
                                     * value never used since the total will 
                                     * be reincremented in the contextVector
                                     */
                                    totalCooccs = jp.nextIntValue(-1);
                                else if (CO_OCCURRENCES.equals(fieldname)) {
                                    jp.nextToken();
                                    while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) {
                                        TempVecEntry entry = new TempVecEntry();
                                        while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                                            fieldname = jp.getCurrentName();
                                            if (NB_COCCS.equals(fieldname))
                                                entry.setNbCooccs(jp.nextIntValue(-1));
                                            else if (ASSOC_RATE.equals(fieldname)) {
                                                jp.nextToken();
                                                entry.setAssocRate(jp.getFloatValue());
                                            } else if (CO_TERM.equals(fieldname))
                                                entry.setTermGroupingKey(jp.nextTextValue());
                                            else if (FILE.equals(fieldname)) {
                                                fileSource = jp.nextIntValue(-1);
                                            }
                                        }
                                        currentContextVector.add(entry);
                                    }
                                }
                            }
                        }
                    } //end if fieldname

                } // end term object
                try {
                    builder.createAndAddToIndex();
                } catch (Exception e) {
                    LOGGER.error("Could not add term " + builder.getGroupingKey() + " to term index", e);
                    LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex");
                }

                if (options.isWithContexts())
                    contextVectors.put(currentTermId, currentContextVector);

            } // end array of terms

        } else if (INPUT_SOURCES.equals(fieldname)) {
            jp.nextToken();
            while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                String id = jp.getCurrentName();
                try {
                    inputSources.put(Integer.parseInt(id), jp.nextTextValue());
                } catch (NumberFormatException e) {
                    IOUtils.closeQuietly(jp);
                    throw new IllegalArgumentException("Bad format for input source key: " + id);
                }
            }
        } else if (TERM_VARIATIONS.equals(fieldname)) {
            jp.nextToken();
            while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) {
                base = null;
                variant = null;
                infoToken = null;
                variantType = null;
                variantScore = 0;
                while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) {
                    fieldname = jp.getCurrentName();
                    if (BASE.equals(fieldname))
                        base = jp.nextTextValue();
                    else if (VARIANT.equals(fieldname))
                        variant = jp.nextTextValue();
                    else if (VARIANT_TYPE.equals(fieldname))
                        variantType = jp.nextTextValue();
                    else if (VARIANT_SCORE.equals(fieldname)) {
                        jp.nextToken();
                        variantScore = jp.getDoubleValue();
                    } else if (INFO.equals(fieldname))
                        infoToken = jp.nextTextValue();
                } // end syntactic variant object
                Preconditions.checkNotNull(base, MSG_EXPECT_PROP_FOR_VAR, BASE);
                Preconditions.checkNotNull(variant, MSG_EXPECT_PROP_FOR_VAR, VARIANT);
                Preconditions.checkNotNull(infoToken, MSG_EXPECT_PROP_FOR_VAR, INFO);
                b = termIndex.getTermByGroupingKey(base);
                v = termIndex.getTermByGroupingKey(variant);
                if (b != null && v != null) {

                    VariationType vType = VariationType.fromShortName(variantType);

                    TermVariation tv = new TermVariation(vType, b, v,
                            vType == VariationType.GRAPHICAL ? Double.parseDouble(infoToken) : infoToken);
                    tv.setScore(variantScore);
                    b.addTermVariation(tv);
                } else {
                    if (b == null)
                        LOGGER.warn("Could not build variant because term \"{}\" was not found.", base);
                    if (v == null)
                        LOGGER.warn("Could not build variant because term \"{}\" was not found.", variant);
                }

                //               Preconditions.checkNotNull(b, MSG_TERM_DOES_NOT_EXIST, base);
                //               Preconditions.checkNotNull(v, MSG_TERM_DOES_NOT_EXIST, variant);

            } // end syntactic variations array
        }
    }
    jp.close();

    if (options.isWithContexts()) {
        /*
         *  map term ids with terms in context vectors and
         *  set context vectors
         */
        List<TempVecEntry> currentTempVecList;
        Term term = null;
        Term coTerm = null;
        ContextVector contextVector;
        for (int termId : contextVectors.keySet()) {
            currentTempVecList = contextVectors.get(termId);
            term = termIndex.getTermById(termId);
            contextVector = new ContextVector(term);
            for (TempVecEntry tempVecEntry : currentTempVecList) {
                coTerm = termIndex.getTermByGroupingKey(tempVecEntry.getTermGroupingKey());
                contextVector.addEntry(coTerm, tempVecEntry.getNbCooccs(), tempVecEntry.getAssocRate());
            }
            if (!contextVector.getEntries().isEmpty())
                term.setContextVector(contextVector);
        }
    }

    return termIndex;
}

From source file:org.hippoecm.frontend.service.restproxy.custom.json.deserializers.AnnotationJsonDeserializer.java

protected Float[] deserializeFloatArrayAnnotationAttribute(JsonParser jsonParser)
        throws JsonParseException, IOException {
    List<Float> floatArray = new ArrayList<Float>();

    while (jsonParser.nextToken() != JsonToken.END_ARRAY) {
        floatArray.add(jsonParser.getFloatValue());
    }/*from w  w w.j ava2 s  . co m*/

    return floatArray.toArray(new Float[floatArray.size()]);
}