List of usage examples for com.fasterxml.jackson.core JsonToken END_ARRAY
JsonToken END_ARRAY
To view the source code for com.fasterxml.jackson.core JsonToken END_ARRAY.
Click Source Link
From source file:org.oscim.utils.overpass.OverpassAPIReader.java
private void parseWay(JsonParser jp) throws JsonParseException, IOException { long id = 0;/*from w w w.j a va 2 s . c om*/ TagSet tags = null; ArrayList<OsmNode> wayNodes = new ArrayList<OsmNode>(); while (jp.nextToken() != JsonToken.END_OBJECT) { String name = jp.getCurrentName(); jp.nextToken(); if ("id".equals(name)) id = jp.getLongValue(); else if ("nodes".equals(name)) { while (jp.nextToken() != JsonToken.END_ARRAY) { Long nodeId = Long.valueOf(jp.getLongValue()); OsmNode node = nodesById.get(nodeId); if (node != null) // log("missing node " + nodeId); // else wayNodes.add(node); } } else if ("tags".equals(name)) tags = parseTags(jp); } // log("way: "+ id + " " + wayNodes.size()); OsmWay way = new OsmWay(tags, id, wayNodes); ownWays.add(way); waysById.put(Long.valueOf(id), way); }
From source file:com.github.heuermh.personalgenome.client.converter.JacksonPersonalGenomeConverter.java
@Override public UserName parseNames(final InputStream inputStream) { checkNotNull(inputStream);//from ww w . j a v a 2 s .c o m JsonParser parser = null; try { parser = jsonFactory.createParser(inputStream); parser.nextToken(); String id = null; String firstName = null; String lastName = null; String profileId = null; String profileFirstName = null; String profileLastName = null; List<ProfileName> profileNames = new ArrayList<ProfileName>(); while (parser.nextToken() != JsonToken.END_OBJECT) { String field = parser.getCurrentName(); parser.nextToken(); if ("id".equals(field)) { id = parser.getText(); } else if ("first_name".equals(field)) { firstName = parser.getText(); } else if ("last_name".equals(field)) { lastName = parser.getText(); } else if ("profiles".equals(field)) { while (parser.nextToken() != JsonToken.END_ARRAY) { while (parser.nextToken() != JsonToken.END_OBJECT) { String profileNameField = parser.getCurrentName(); parser.nextToken(); if ("id".equals(profileNameField)) { profileId = parser.getText(); } else if ("first_name".equals(profileNameField)) { profileFirstName = parser.getText(); } else if ("last_name".equals(profileNameField)) { profileLastName = parser.getText(); } } profileNames.add(new ProfileName(profileId, profileFirstName, profileLastName)); } } } return new UserName(id, firstName, lastName, profileNames); } catch (IOException e) { logger.warn("could not parse names", e); } finally { try { inputStream.close(); } catch (Exception e) { // ignored } try { parser.close(); } catch (Exception e) { // ignored } } return null; }
From source file:io.apiman.manager.api.exportimport.json.JsonImportReader.java
public void readClients() throws Exception { current = nextToken();//from w ww. j a v a2s. co m if (current == JsonToken.END_ARRAY) { return; } while (nextToken() != JsonToken.END_ARRAY) { // Traverse each api definition while (nextToken() != JsonToken.END_OBJECT) { if (jp.getCurrentName().equals(ClientBean.class.getSimpleName())) { current = nextToken(); ClientBean apiBean = jp.readValueAs(ClientBean.class); dispatcher.client(apiBean); } else { OrgElementsEnum fieldName = OrgElementsEnum.valueOf(jp.getCurrentName()); current = nextToken(); switch (fieldName) { case Versions: readClientVersions(); break; default: throw new RuntimeException("Unhandled entity " + fieldName + " with token " + current); } } } } }
From source file:com.quinsoft.zeidon.standardoe.ActivateOisFromJsonStream.java
private boolean readOi() throws Exception { JsonToken token = jp.nextToken();/*w ww . ja va 2s. co m*/ // If we find the end of the OI array then that's the end of OIs. if (token == JsonToken.END_ARRAY) return false; // No more OIs in the stream. if (token != JsonToken.START_OBJECT) throw new ZeidonException("OI JSON stream doesn't start with object."); token = jp.nextToken(); String fieldName = jp.getCurrentName(); if (StringUtils.equals(fieldName, ".oimeta")) token = readOiMeta(); else throw new ZeidonException(".oimeta object not specified in JSON stream"); // If the token after reading the .oimeta is END_OBJECT then the OI is empty. if (token != JsonToken.END_OBJECT) { fieldName = jp.getCurrentName(); if (!StringUtils.equalsIgnoreCase(fieldName, lodDef.getRoot().getName())) throw new ZeidonException("First entity specified in OI (%s) is not the root (%s)", fieldName, lodDef.getRoot().getName()); readEntity(fieldName); token = jp.nextToken(); } if (selectedInstances.size() > 0) setCursors(); else view.reset(); if (token != JsonToken.END_OBJECT) throw new ZeidonException("OI JSON stream doesn't end with object."); if (readOnlyOi) ((InternalView) view).getViewImpl().getObjectInstance().setReadOnly(true); if (readOnly) view.setReadOnly(true); if (totalRootCount != null) view.setTotalRootCount(totalRootCount); return true; // Keep looking for OIs in the stream. }
From source file:com.adobe.communities.ugc.migration.importer.UGCImportHelper.java
public static void extractTally(final Resource post, final JsonParser jsonParser, final ModifyingResourceProvider srp, final TallyOperationsService tallyOperationsService) throws IOException { jsonParser.nextToken(); // should be start object, but would be end array if no objects were present while (!jsonParser.getCurrentToken().equals(JsonToken.END_ARRAY)) { Long timestamp = null;/*from ww w . j a va2 s.co m*/ String userIdentifier = null; String response = null; String tallyType = null; jsonParser.nextToken(); // should make current token by "FIELD_NAME" but could be END_OBJECT if this were // an empty object while (!jsonParser.getCurrentToken().equals(JsonToken.END_OBJECT)) { final String label = jsonParser.getCurrentName(); jsonParser.nextToken(); // should be FIELD_VALUE if (label.equals(TallyConstants.TIMESTAMP_PROPERTY)) { timestamp = jsonParser.getValueAsLong(); } else { final String responseValue = jsonParser.getValueAsString(); if (label.equals("response")) { response = URLDecoder.decode(responseValue, "UTF-8"); } else if (label.equals("userIdentifier")) { userIdentifier = URLDecoder.decode(responseValue, "UTF-8"); } else if (label.equals("tallyType")) { tallyType = responseValue; } } jsonParser.nextToken(); // should make current token be "FIELD_NAME" unless we're at the end of our // loop and it's now "END_OBJECT" instead } if (timestamp != null && userIdentifier != null && response != null && tallyType != null) { createTally(srp, post, tallyType, userIdentifier, timestamp, response, tallyOperationsService); } jsonParser.nextToken(); // may advance to "START_OBJECT" if we're not finished yet, but might be // "END_ARRAY" now } }
From source file:com.netflix.hollow.jsonadapter.discover.HollowJsonAdapterSchemaDiscoverer.java
private void discoverSubArraySchemas(JsonParser parser, HollowDiscoveredSchema objectSchema) throws IOException { JsonToken token = parser.nextToken(); while (token != JsonToken.END_ARRAY) { if (token == JsonToken.START_OBJECT) { discoverSchemas(parser, objectSchema); } else {//from ww w . j a v a2 s.c om discoverSchemaField(parser, token, "value", objectSchema); } token = parser.nextToken(); } }
From source file:eu.project.ttc.models.index.JsonTermIndexIO.java
/** * Loads the json-serialized term index into the param {@link TermIndex} object. * //from w ww. ja v a 2s . c o m * @param reader * @param options * The deserialization {@link IOOptions}. * @return * @throws JsonParseException * @throws IOException */ public static TermIndex load(Reader reader, JsonOptions options) throws JsonParseException, IOException { TermIndex termIndex = null; JsonFactory jsonFactory = new JsonFactory(); JsonParser jp = jsonFactory.createParser(reader); // or Stream, Reader jp.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES); jp.enable(JsonParser.Feature.STRICT_DUPLICATE_DETECTION); String fieldname; String compLemma = null; int fileSource = -1; String wordLemma = null; String syntacticLabel = null; int begin = -1; int end = -1; int nbWordAnnos = -1; int nbSpottedTerms = -1; Term b; Term v; String text; String base; String variant; // String rule; String infoToken; String variantType; double variantScore; Map<Integer, String> inputSources = Maps.newTreeMap(); Map<Integer, List<TempVecEntry>> contextVectors = Maps.newHashMap(); OccurrenceStore occurrenceStore = null; // useful var for debug JsonToken tok; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (METADATA.equals(fieldname)) { jp.nextToken(); String termIndexName = null; Lang lang = null; String corpusID = null; String occurrenceStorage = null; String occurrenceStoreURI = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LANG.equals(fieldname)) { lang = Lang.forName(jp.nextTextValue()); } else if (NAME.equals(fieldname)) { termIndexName = jp.nextTextValue(); } else if (NB_WORD_ANNOTATIONS.equals(fieldname)) { nbWordAnnos = jp.nextIntValue(-1); } else if (NB_SPOTTED_TERMS.equals(fieldname)) { nbSpottedTerms = jp.nextIntValue(-1); } else if (CORPUS_ID.equals(fieldname)) { corpusID = jp.nextTextValue(); } else if (OCCURRENCE_STORAGE.equals(fieldname)) { occurrenceStorage = jp.nextTextValue(); } else if (OCCURRENCE_MONGODB_STORE_URI.equals(fieldname)) { occurrenceStoreURI = jp.nextTextValue(); } } Preconditions.checkState(lang != null, "The property meta.lang must be defined"); Preconditions.checkState(termIndexName != null, "The property meta.name must be defined"); if (occurrenceStorage != null && occurrenceStorage.equals(OCCURRENCE_STORAGE_MONGODB)) { Preconditions.checkNotNull(occurrenceStoreURI, "Missing attribute " + OCCURRENCE_MONGODB_STORE_URI); occurrenceStore = new MongoDBOccurrenceStore(occurrenceStoreURI, OccurrenceStore.State.INDEXED); } else occurrenceStore = new MemoryOccurrenceStore(); termIndex = new MemoryTermIndex(termIndexName, lang, occurrenceStore); if (corpusID != null) termIndex.setCorpusId(corpusID); if (nbWordAnnos != -1) termIndex.setWordAnnotationsNum(nbWordAnnos); if (nbSpottedTerms != -1) termIndex.setSpottedTermsNum(nbSpottedTerms); if (options.isMetadataOnly()) return termIndex; } else if (WORDS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { WordBuilder wordBuilder = WordBuilder.start(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordBuilder.setLemma(jp.nextTextValue()); else if (COMPOUND_TYPE.equals(fieldname)) wordBuilder.setCompoundType(CompoundType.fromName(jp.nextTextValue())); else if (STEM.equals(fieldname)) wordBuilder.setStem(jp.nextTextValue()); else if (COMPONENTS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) compLemma = jp.nextTextValue(); else if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-2); else if (END.equals(fieldname)) end = jp.nextIntValue(-2); } wordBuilder.addComponent(begin, end, compLemma); } } } try { termIndex.addWord(wordBuilder.create()); } catch (Exception e) { LOGGER.error("Could not add word " + wordBuilder.getLemma() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } } } else if (TERMS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TermBuilder builder = TermBuilder.start(termIndex); List<TempVecEntry> currentContextVector = Lists.newArrayList(); int currentTermId = -1; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (GROUPING_KEY.equals(fieldname)) builder.setGroupingKey(jp.nextTextValue()); else if (SPOTTING_RULE.equals(fieldname)) builder.setSpottingRule(jp.nextTextValue()); else if (ID.equals(fieldname)) { currentTermId = jp.nextIntValue(-2); builder.setId(currentTermId); } else if (RANK.equals(fieldname)) { builder.setRank(jp.nextIntValue(-1)); } else if (FREQUENCY.equals(fieldname)) { builder.setFrequency(jp.nextIntValue(-1)); } else { if (FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setFrequencyNorm((double) jp.getFloatValue()); } else if (SPECIFICITY.equals(fieldname)) { jp.nextToken(); builder.setSpecificity((double) jp.getDoubleValue()); } else if (GENERAL_FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setGeneralFrequencyNorm((double) jp.getFloatValue()); } else if (WORDS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { wordLemma = null; syntacticLabel = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordLemma = jp.nextTextValue(); else if (SYN.equals(fieldname)) syntacticLabel = jp.nextTextValue(); } Preconditions.checkArgument(wordLemma != null, MSG_EXPECT_PROP_FOR_TERM_WORD, LEMMA); Preconditions.checkArgument(syntacticLabel != null, MSG_EXPECT_PROP_FOR_TERM_WORD, SYN); builder.addWord(termIndex.getWord(wordLemma), syntacticLabel); } // end words } else if (OCCURRENCES.equals(fieldname)) { tok = jp.nextToken(); if (tok == JsonToken.START_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { begin = -1; end = -1; fileSource = -1; text = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-1); else if (TEXT.equals(fieldname)) text = jp.nextTextValue(); else if (END.equals(fieldname)) end = jp.nextIntValue(-1); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } Preconditions.checkArgument(begin != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, BEGIN); Preconditions.checkArgument(end != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, END); Preconditions.checkArgument(fileSource != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, FILE); Preconditions.checkNotNull(inputSources.get(fileSource), "No file source with id: %s", fileSource); Preconditions.checkNotNull(text, MSG_EXPECT_PROP_FOR_OCCURRENCE, TEXT); if (occurrenceStore.getStoreType() == OccurrenceStore.Type.MEMORY) builder.addOccurrence(begin, end, termIndex.getDocument(inputSources.get(fileSource)), text); } } // end occurrences } else if (CONTEXT.equals(fieldname)) { @SuppressWarnings("unused") int totalCooccs = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (TOTAL_COOCCURRENCES.equals(fieldname)) /* * value never used since the total will * be reincremented in the contextVector */ totalCooccs = jp.nextIntValue(-1); else if (CO_OCCURRENCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TempVecEntry entry = new TempVecEntry(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (NB_COCCS.equals(fieldname)) entry.setNbCooccs(jp.nextIntValue(-1)); else if (ASSOC_RATE.equals(fieldname)) { jp.nextToken(); entry.setAssocRate(jp.getFloatValue()); } else if (CO_TERM.equals(fieldname)) entry.setTermGroupingKey(jp.nextTextValue()); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } currentContextVector.add(entry); } } } } } //end if fieldname } // end term object try { builder.createAndAddToIndex(); } catch (Exception e) { LOGGER.error("Could not add term " + builder.getGroupingKey() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } if (options.isWithContexts()) contextVectors.put(currentTermId, currentContextVector); } // end array of terms } else if (INPUT_SOURCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { String id = jp.getCurrentName(); try { inputSources.put(Integer.parseInt(id), jp.nextTextValue()); } catch (NumberFormatException e) { IOUtils.closeQuietly(jp); throw new IllegalArgumentException("Bad format for input source key: " + id); } } } else if (TERM_VARIATIONS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { base = null; variant = null; infoToken = null; variantType = null; variantScore = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BASE.equals(fieldname)) base = jp.nextTextValue(); else if (VARIANT.equals(fieldname)) variant = jp.nextTextValue(); else if (VARIANT_TYPE.equals(fieldname)) variantType = jp.nextTextValue(); else if (VARIANT_SCORE.equals(fieldname)) { jp.nextToken(); variantScore = jp.getDoubleValue(); } else if (INFO.equals(fieldname)) infoToken = jp.nextTextValue(); } // end syntactic variant object Preconditions.checkNotNull(base, MSG_EXPECT_PROP_FOR_VAR, BASE); Preconditions.checkNotNull(variant, MSG_EXPECT_PROP_FOR_VAR, VARIANT); Preconditions.checkNotNull(infoToken, MSG_EXPECT_PROP_FOR_VAR, INFO); b = termIndex.getTermByGroupingKey(base); v = termIndex.getTermByGroupingKey(variant); if (b != null && v != null) { VariationType vType = VariationType.fromShortName(variantType); TermVariation tv = new TermVariation(vType, b, v, vType == VariationType.GRAPHICAL ? Double.parseDouble(infoToken) : infoToken); tv.setScore(variantScore); b.addTermVariation(tv); } else { if (b == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", base); if (v == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", variant); } // Preconditions.checkNotNull(b, MSG_TERM_DOES_NOT_EXIST, base); // Preconditions.checkNotNull(v, MSG_TERM_DOES_NOT_EXIST, variant); } // end syntactic variations array } } jp.close(); if (options.isWithContexts()) { /* * map term ids with terms in context vectors and * set context vectors */ List<TempVecEntry> currentTempVecList; Term term = null; Term coTerm = null; ContextVector contextVector; for (int termId : contextVectors.keySet()) { currentTempVecList = contextVectors.get(termId); term = termIndex.getTermById(termId); contextVector = new ContextVector(term); for (TempVecEntry tempVecEntry : currentTempVecList) { coTerm = termIndex.getTermByGroupingKey(tempVecEntry.getTermGroupingKey()); contextVector.addEntry(coTerm, tempVecEntry.getNbCooccs(), tempVecEntry.getAssocRate()); } if (!contextVector.getEntries().isEmpty()) term.setContextVector(contextVector); } } return termIndex; }
From source file:org.oscim.utils.overpass.OverpassAPIReader.java
private void parseRelation(JsonParser jp) throws JsonParseException, IOException { long id = 0;//from ww w . ja v a2 s .c o m TagSet tags = null; ArrayList<TmpRelation> members = new ArrayList<TmpRelation>(); while (jp.nextToken() != JsonToken.END_OBJECT) { String name = jp.getCurrentName(); jp.nextToken(); if ("id".equals(name)) id = jp.getLongValue(); else if ("members".equals(name)) { while (jp.nextToken() != JsonToken.END_ARRAY) { TmpRelation member = new TmpRelation(); while (jp.nextToken() != JsonToken.END_OBJECT) { name = jp.getCurrentName(); jp.nextToken(); if ("type".equals(name)) member.type = jp.getText(); else if ("ref".equals(name)) member.id = Long.valueOf(jp.getLongValue()); else if ("role".equals(name)) member.role = jp.getText(); } members.add(member); } } else if ("tags".equals(name)) tags = parseTags(jp); } OsmRelation relation = new OsmRelation(tags, id, members.size()); ownRelations.add(relation); relationsById.put(Long.valueOf(id), relation); relationMembersForRelation.put(relation, members); }
From source file:de.undercouch.bson4jackson.BsonParser.java
@Override public JsonToken nextToken() throws IOException, JsonParseException { Context ctx = _currentContext; if (_currToken == null && ctx == null) { try {//from w ww . ja v a 2 s. c o m _currToken = handleNewDocument(false); } catch (EOFException e) { //there is nothing more to read. indicate EOF return null; } } else { _tokenPos = _counter.getPosition(); if (ctx == null) { if (_currToken == JsonToken.END_OBJECT) { //end of input return null; } throw new JsonParseException("Found element outside the document", getTokenLocation()); } if (ctx.state == State.DONE) { //next field ctx.reset(); } boolean readValue = true; if (ctx.state == State.FIELDNAME) { readValue = false; while (true) { //read field name or end of document ctx.type = _in.readByte(); if (ctx.type == BsonConstants.TYPE_END) { //end of document _currToken = (ctx.array ? JsonToken.END_ARRAY : JsonToken.END_OBJECT); _currentContext = _currentContext.parent; } else if (ctx.type == BsonConstants.TYPE_UNDEFINED) { //skip field name and then ignore this token skipCString(); continue; } else { ctx.state = State.VALUE; _currToken = JsonToken.FIELD_NAME; if (ctx.array) { //immediately read value of array element (discard field name) readValue = true; skipCString(); ctx.fieldName = null; } else { //read field name ctx.fieldName = readCString(); } } break; } } if (readValue) { //parse element's value switch (ctx.type) { case BsonConstants.TYPE_DOUBLE: ctx.value = _in.readDouble(); _currToken = JsonToken.VALUE_NUMBER_FLOAT; break; case BsonConstants.TYPE_STRING: ctx.value = readString(); _currToken = JsonToken.VALUE_STRING; break; case BsonConstants.TYPE_DOCUMENT: _currToken = handleNewDocument(false); break; case BsonConstants.TYPE_ARRAY: _currToken = handleNewDocument(true); break; case BsonConstants.TYPE_BINARY: _currToken = handleBinary(); break; case BsonConstants.TYPE_OBJECTID: ctx.value = readObjectId(); _currToken = JsonToken.VALUE_EMBEDDED_OBJECT; break; case BsonConstants.TYPE_BOOLEAN: boolean b = _in.readBoolean(); ctx.value = b; _currToken = (b ? JsonToken.VALUE_TRUE : JsonToken.VALUE_FALSE); break; case BsonConstants.TYPE_DATETIME: ctx.value = new Date(_in.readLong()); _currToken = JsonToken.VALUE_EMBEDDED_OBJECT; break; case BsonConstants.TYPE_NULL: _currToken = JsonToken.VALUE_NULL; break; case BsonConstants.TYPE_REGEX: _currToken = handleRegEx(); break; case BsonConstants.TYPE_DBPOINTER: _currToken = handleDBPointer(); break; case BsonConstants.TYPE_JAVASCRIPT: ctx.value = new JavaScript(readString()); _currToken = JsonToken.VALUE_EMBEDDED_OBJECT; break; case BsonConstants.TYPE_SYMBOL: ctx.value = readSymbol(); _currToken = JsonToken.VALUE_EMBEDDED_OBJECT; break; case BsonConstants.TYPE_JAVASCRIPT_WITH_SCOPE: _currToken = handleJavascriptWithScope(); break; case BsonConstants.TYPE_INT32: ctx.value = _in.readInt(); _currToken = JsonToken.VALUE_NUMBER_INT; break; case BsonConstants.TYPE_TIMESTAMP: ctx.value = readTimestamp(); _currToken = JsonToken.VALUE_EMBEDDED_OBJECT; break; case BsonConstants.TYPE_INT64: ctx.value = _in.readLong(); _currToken = JsonToken.VALUE_NUMBER_INT; break; case BsonConstants.TYPE_MINKEY: ctx.value = "MinKey"; _currToken = JsonToken.VALUE_STRING; break; case BsonConstants.TYPE_MAXKEY: ctx.value = "MaxKey"; _currToken = JsonToken.VALUE_STRING; break; default: throw new JsonParseException("Unknown element type " + ctx.type, getTokenLocation()); } ctx.state = State.DONE; } } return _currToken; }
From source file:com.netflix.hollow.jsonadapter.HollowJsonAdapter.java
private void addPassthroughField(JsonParser parser, JsonToken token, String fieldName, PassthroughWriteRecords rec) throws IOException { rec.passthroughMapKeyWriteRecord.reset(); rec.passthroughMapKeyWriteRecord.setString("value", fieldName); int keyOrdinal = stateEngine.add("MapKey", rec.passthroughMapKeyWriteRecord); switch (token) { case START_ARRAY: rec.multiValuePassthroughListRec.reset(); while (token != JsonToken.END_ARRAY) { switch (token) { case VALUE_FALSE: case VALUE_TRUE: case VALUE_NUMBER_INT: case VALUE_NUMBER_FLOAT: case VALUE_STRING: rec.passthroughMapValueWriteRecord.reset(); rec.passthroughMapValueWriteRecord.setString("value", parser.getValueAsString()); int elementOrdinal = stateEngine.add("String", rec.passthroughMapValueWriteRecord); rec.multiValuePassthroughListRec.addElement(elementOrdinal); break; default: break; }// www . ja v a 2s . c om token = parser.nextToken(); } int valueListOrdinal = stateEngine.add("ListOfString", rec.multiValuePassthroughListRec); rec.multiValuePassthroughMapRec.addEntry(keyOrdinal, valueListOrdinal); break; case VALUE_FALSE: case VALUE_TRUE: case VALUE_NUMBER_INT: case VALUE_NUMBER_FLOAT: case VALUE_STRING: rec.passthroughMapValueWriteRecord.reset(); rec.passthroughMapValueWriteRecord.setString("value", parser.getValueAsString()); int valueOrdinal = stateEngine.add("String", rec.passthroughMapValueWriteRecord); rec.singleValuePassthroughMapRec.addEntry(keyOrdinal, valueOrdinal); break; case VALUE_NULL: break; case START_OBJECT: skipObject(parser); break; default: break; } }