List of usage examples for com.fasterxml.jackson.core JsonToken END_OBJECT
JsonToken END_OBJECT
To view the source code for com.fasterxml.jackson.core JsonToken END_OBJECT.
Click Source Link
From source file:com.amazonaws.services.cloudtrail.processinglibrary.serializer.AbstractEventSerializer.java
/** * Get the next event from the CloudTrail log and parse it. * * @return a {@link com.amazonaws.services.cloudtrail.processinglibrary.model.CloudTrailEvent} that represents the * parsed event.// w w w . j a va 2s . c om * @throws IOException if the event could not be parsed. */ public CloudTrailEvent getNextEvent() throws IOException { CloudTrailEventData eventData = new CloudTrailEventData(); String key = null; /* Get next CloudTrailEvent event from log file. When failed to parse a event, * IOException will be thrown. In this case, the charEnd index the place we * encountered parsing error. */ // return the starting location of the current token; that is, position of the first character // from input that starts the current token int charStart = (int) this.jsonParser.getTokenLocation().getCharOffset(); while (this.jsonParser.nextToken() != JsonToken.END_OBJECT) { key = jsonParser.getCurrentName(); switch (key) { case "eventVersion": String eventVersion = this.jsonParser.nextTextValue(); if (Double.parseDouble(eventVersion) > SUPPORTED_EVENT_VERSION) { logger.warn(String.format("EventVersion %s is not supported by CloudTrail.", eventVersion)); } eventData.add(key, eventVersion); break; case "userIdentity": this.parseUserIdentity(eventData); break; case "eventTime": eventData.add(CloudTrailEventField.eventTime.name(), this.convertToDate(this.jsonParser.nextTextValue())); break; case "eventID": case "requestID": eventData.add(key, this.convertToUUID(this.jsonParser.nextTextValue())); break; case "readOnly": this.parseReadOnly(eventData); break; case "resources": this.parseResources(eventData); break; default: eventData.add(key, this.parseDefaultValue(key)); break; } } this.setAccountId(eventData); // event's last character position in the log file. int charEnd = (int) this.jsonParser.getTokenLocation().getCharOffset(); CloudTrailEventMetadata metaData = this.getMetadata(charStart, charEnd); return new CloudTrailEvent(eventData, metaData); }
From source file:com.adobe.communities.ugc.migration.importer.ScoresImportServlet.java
private void importFile(final JsonParser jsonParser, final UserPropertiesManager userManager, final ResourceResolver resolver) throws ServletException, IOException, RepositoryException { Map<String, Boolean> scoreTypes = getScoreTypes(resolver); JsonToken token = jsonParser.nextToken(); while (!token.equals(JsonToken.END_OBJECT)) { final String authId = jsonParser.getCurrentName(); token = jsonParser.nextToken();/*from w w w .j a va 2 s . com*/ if (!token.equals(JsonToken.START_OBJECT)) { throw new ServletException("Expected to see start object, got " + token); } final Map<String, Long> scores = new HashMap<String, Long>(); token = jsonParser.nextToken(); while (!token.equals(JsonToken.END_OBJECT)) { final String scoreName = jsonParser.getCurrentName(); jsonParser.nextToken(); final Long scoreValue = jsonParser.getLongValue(); scores.put(scoreName, scoreValue); if (!scoreTypes.containsKey(scoreName)) { LOG.warn( "A score of type [{}] was imported for [{}], but that score type hasn't been configured " + "on this server", scoreName, authId); } token = jsonParser.nextToken(); } updateProfileScore(authId, scores, userManager, resolver); token = jsonParser.nextToken(); } }
From source file:com.floragunn.searchguard.dlic.rest.validation.AbstractConfigurationValidator.java
private boolean checkDatatypes() throws Exception { String contentAsJson = XContentHelper.convertToJson(content, false); JsonParser parser = factory.createParser(contentAsJson); JsonToken token = null;// w ww. jav a 2s . c om while ((token = parser.nextToken()) != null) { if (token.equals(JsonToken.FIELD_NAME)) { String currentName = parser.getCurrentName(); DataType dataType = allowedKeys.get(currentName); if (dataType != null) { JsonToken valueToken = parser.nextToken(); switch (dataType) { case STRING: if (!valueToken.equals(JsonToken.VALUE_STRING)) { wrongDatatypes.put(currentName, "String expected"); } break; case ARRAY: if (!valueToken.equals(JsonToken.START_ARRAY) && !valueToken.equals(JsonToken.END_ARRAY)) { wrongDatatypes.put(currentName, "Array expected"); } break; case OBJECT: if (!valueToken.equals(JsonToken.START_OBJECT) && !valueToken.equals(JsonToken.END_OBJECT)) { wrongDatatypes.put(currentName, "Object expected"); } break; } } } } return wrongDatatypes.isEmpty(); }
From source file:com.github.heuermh.personalgenome.client.converter.JacksonPersonalGenomeConverter.java
/** * Parse the specified input stream and return a user. * * @param inputStream input stream/*w w w .j a va 2 s. c om*/ * @return the specified input stream parsed into a user */ @Override public User parseUser(final InputStream inputStream) { checkNotNull(inputStream); JsonParser parser = null; try { parser = jsonFactory.createParser(inputStream); parser.nextToken(); String id = null; String profileId = null; boolean genotyped = false; List<Profile> profiles = new ArrayList<Profile>(); while (parser.nextToken() != JsonToken.END_OBJECT) { String field = parser.getCurrentName(); parser.nextToken(); if ("id".equals(field)) { id = parser.getText(); } else if ("profiles".equals(field)) { while (parser.nextToken() != JsonToken.END_ARRAY) { while (parser.nextToken() != JsonToken.END_OBJECT) { String profileField = parser.getCurrentName(); parser.nextToken(); if ("id".equals(profileField)) { profileId = parser.getText(); } else if ("genotyped".equals(profileField)) { genotyped = parser.getBooleanValue(); } } profiles.add(new Profile(profileId, genotyped)); } } } return new User(id, profiles); } catch (IOException e) { logger.warn("could not parse user", e); } finally { try { inputStream.close(); } catch (Exception e) { // ignored } try { parser.close(); } catch (Exception e) { // ignored } } return null; }
From source file:org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.java
private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException { HttpHost publishedHost = null;//from w ww . j a v a2s .c o m /* * We sniff the bound hosts so we can look up the node based on any * address on which it is listening. This is useful in Elasticsearch's * test framework where we sometimes publish ipv6 addresses but the * tests contact the node on ipv4. */ Set<HttpHost> boundHosts = new HashSet<>(); String name = null; String version = null; /* * Multi-valued attributes come with key = `real_key.index` and we * unflip them after reading them because we can't rely on the order * that they arive. */ final Map<String, String> protoAttributes = new HashMap<String, String>(); boolean sawRoles = false; boolean master = false; boolean data = false; boolean ingest = false; String fieldName = null; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { if ("http".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { URI publishAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); publishedHost = new HttpHost(publishAddressAsURI.getHost(), publishAddressAsURI.getPort(), publishAddressAsURI.getScheme()); } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { while (parser.nextToken() != JsonToken.END_ARRAY) { URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); boundHosts.add(new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme())); } } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { parser.skipChildren(); } } } else if ("attributes".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.VALUE_STRING) { String oldValue = protoAttributes.put(parser.getCurrentName(), parser.getValueAsString()); if (oldValue != null) { throw new IOException("repeated attribute key [" + parser.getCurrentName() + "]"); } } else { parser.skipChildren(); } } } else { parser.skipChildren(); } } else if (parser.currentToken() == JsonToken.START_ARRAY) { if ("roles".equals(fieldName)) { sawRoles = true; while (parser.nextToken() != JsonToken.END_ARRAY) { switch (parser.getText()) { case "master": master = true; break; case "data": data = true; break; case "ingest": ingest = true; break; default: logger.warn("unknown role [" + parser.getText() + "] on node [" + nodeId + "]"); } } } else { parser.skipChildren(); } } else if (parser.currentToken().isScalarValue()) { if ("version".equals(fieldName)) { version = parser.getText(); } else if ("name".equals(fieldName)) { name = parser.getText(); } } } //http section is not present if http is not enabled on the node, ignore such nodes if (publishedHost == null) { logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; } Map<String, List<String>> realAttributes = new HashMap<>(protoAttributes.size()); List<String> keys = new ArrayList<>(protoAttributes.keySet()); for (String key : keys) { if (key.endsWith(".0")) { String realKey = key.substring(0, key.length() - 2); List<String> values = new ArrayList<>(); int i = 0; while (true) { String value = protoAttributes.remove(realKey + "." + i); if (value == null) { break; } values.add(value); i++; } realAttributes.put(realKey, unmodifiableList(values)); } } for (Map.Entry<String, String> entry : protoAttributes.entrySet()) { realAttributes.put(entry.getKey(), singletonList(entry.getValue())); } if (version.startsWith("2.")) { /* * 2.x doesn't send roles, instead we try to read them from * attributes. */ boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false); Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null); Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null); master = masterAttribute == null ? false == clientAttribute : masterAttribute; data = dataAttribute == null ? false == clientAttribute : dataAttribute; } else { assert sawRoles : "didn't see roles for [" + nodeId + "]"; } assert boundHosts.contains(publishedHost) : "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; logger.trace("adding node [" + nodeId + "]"); return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest), unmodifiableMap(realAttributes)); }
From source file:com.amazonaws.services.sns.util.SignatureChecker.java
private Map<String, String> parseJSON(String jsonmessage) { Map<String, String> parsed = new HashMap<String, String>(); JsonFactory jf = new JsonFactory(); try {//from www. j ava 2 s . c o m JsonParser parser = jf.createJsonParser(jsonmessage); parser.nextToken(); //shift past the START_OBJECT that begins the JSON while (parser.nextToken() != JsonToken.END_OBJECT) { String fieldname = parser.getCurrentName(); parser.nextToken(); // move to value, or START_OBJECT/START_ARRAY String value; if (parser.getCurrentToken() == JsonToken.START_ARRAY) { value = ""; boolean first = true; while (parser.nextToken() != JsonToken.END_ARRAY) { if (!first) value += ","; first = false; value += parser.getText(); } } else { value = parser.getText(); } parsed.put(fieldname, value); } } catch (JsonParseException e) { // JSON could not be parsed e.printStackTrace(); } catch (IOException e) { // Rare exception } return parsed; }
From source file:com.cedarsoft.couchdb.io.CouchDocSerializer.java
@Nonnull private static List<? extends CouchDoc.Attachment> deserializeAttachments( @Nonnull JacksonParserWrapper parserWrapper) throws IOException { List<CouchDoc.Attachment> attachments = new ArrayList<>(); //check for attachments if (parserWrapper.getCurrentToken() == JsonToken.FIELD_NAME && parserWrapper.getCurrentName().equals(PROPERTY_ATTACHMENTS)) { parserWrapper.nextToken(JsonToken.START_OBJECT); while (parserWrapper.nextToken() != JsonToken.END_OBJECT) { String attachmentId = parserWrapper.getCurrentName(); parserWrapper.nextToken(JsonToken.START_OBJECT); parserWrapper.nextFieldValue(PROPERTY_CONTENT_TYPE); String contentType = parserWrapper.getText(); parserWrapper.nextFieldValue("revpos"); parserWrapper.nextFieldValue("digest"); parserWrapper.nextFieldValue("length"); long length = parserWrapper.getNumberValue().longValue(); parserWrapper.nextFieldValue("stub"); attachments.add(new CouchDoc.StubbedAttachment(new AttachmentId(attachmentId), MediaType.valueOf(contentType), length)); parserWrapper.nextToken(JsonToken.END_OBJECT); }// ww w. jav a2 s. co m parserWrapper.nextToken(JsonToken.END_OBJECT); } return attachments; }
From source file:eu.project.ttc.models.index.JsonTermIndexIO.java
/** * Loads the json-serialized term index into the param {@link TermIndex} object. * /* w w w. ja va 2 s . c o m*/ * @param reader * @param options * The deserialization {@link IOOptions}. * @return * @throws JsonParseException * @throws IOException */ public static TermIndex load(Reader reader, JsonOptions options) throws JsonParseException, IOException { TermIndex termIndex = null; JsonFactory jsonFactory = new JsonFactory(); JsonParser jp = jsonFactory.createParser(reader); // or Stream, Reader jp.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES); jp.enable(JsonParser.Feature.STRICT_DUPLICATE_DETECTION); String fieldname; String compLemma = null; int fileSource = -1; String wordLemma = null; String syntacticLabel = null; int begin = -1; int end = -1; int nbWordAnnos = -1; int nbSpottedTerms = -1; Term b; Term v; String text; String base; String variant; // String rule; String infoToken; String variantType; double variantScore; Map<Integer, String> inputSources = Maps.newTreeMap(); Map<Integer, List<TempVecEntry>> contextVectors = Maps.newHashMap(); OccurrenceStore occurrenceStore = null; // useful var for debug JsonToken tok; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (METADATA.equals(fieldname)) { jp.nextToken(); String termIndexName = null; Lang lang = null; String corpusID = null; String occurrenceStorage = null; String occurrenceStoreURI = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LANG.equals(fieldname)) { lang = Lang.forName(jp.nextTextValue()); } else if (NAME.equals(fieldname)) { termIndexName = jp.nextTextValue(); } else if (NB_WORD_ANNOTATIONS.equals(fieldname)) { nbWordAnnos = jp.nextIntValue(-1); } else if (NB_SPOTTED_TERMS.equals(fieldname)) { nbSpottedTerms = jp.nextIntValue(-1); } else if (CORPUS_ID.equals(fieldname)) { corpusID = jp.nextTextValue(); } else if (OCCURRENCE_STORAGE.equals(fieldname)) { occurrenceStorage = jp.nextTextValue(); } else if (OCCURRENCE_MONGODB_STORE_URI.equals(fieldname)) { occurrenceStoreURI = jp.nextTextValue(); } } Preconditions.checkState(lang != null, "The property meta.lang must be defined"); Preconditions.checkState(termIndexName != null, "The property meta.name must be defined"); if (occurrenceStorage != null && occurrenceStorage.equals(OCCURRENCE_STORAGE_MONGODB)) { Preconditions.checkNotNull(occurrenceStoreURI, "Missing attribute " + OCCURRENCE_MONGODB_STORE_URI); occurrenceStore = new MongoDBOccurrenceStore(occurrenceStoreURI, OccurrenceStore.State.INDEXED); } else occurrenceStore = new MemoryOccurrenceStore(); termIndex = new MemoryTermIndex(termIndexName, lang, occurrenceStore); if (corpusID != null) termIndex.setCorpusId(corpusID); if (nbWordAnnos != -1) termIndex.setWordAnnotationsNum(nbWordAnnos); if (nbSpottedTerms != -1) termIndex.setSpottedTermsNum(nbSpottedTerms); if (options.isMetadataOnly()) return termIndex; } else if (WORDS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { WordBuilder wordBuilder = WordBuilder.start(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordBuilder.setLemma(jp.nextTextValue()); else if (COMPOUND_TYPE.equals(fieldname)) wordBuilder.setCompoundType(CompoundType.fromName(jp.nextTextValue())); else if (STEM.equals(fieldname)) wordBuilder.setStem(jp.nextTextValue()); else if (COMPONENTS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) compLemma = jp.nextTextValue(); else if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-2); else if (END.equals(fieldname)) end = jp.nextIntValue(-2); } wordBuilder.addComponent(begin, end, compLemma); } } } try { termIndex.addWord(wordBuilder.create()); } catch (Exception e) { LOGGER.error("Could not add word " + wordBuilder.getLemma() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } } } else if (TERMS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TermBuilder builder = TermBuilder.start(termIndex); List<TempVecEntry> currentContextVector = Lists.newArrayList(); int currentTermId = -1; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (GROUPING_KEY.equals(fieldname)) builder.setGroupingKey(jp.nextTextValue()); else if (SPOTTING_RULE.equals(fieldname)) builder.setSpottingRule(jp.nextTextValue()); else if (ID.equals(fieldname)) { currentTermId = jp.nextIntValue(-2); builder.setId(currentTermId); } else if (RANK.equals(fieldname)) { builder.setRank(jp.nextIntValue(-1)); } else if (FREQUENCY.equals(fieldname)) { builder.setFrequency(jp.nextIntValue(-1)); } else { if (FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setFrequencyNorm((double) jp.getFloatValue()); } else if (SPECIFICITY.equals(fieldname)) { jp.nextToken(); builder.setSpecificity((double) jp.getDoubleValue()); } else if (GENERAL_FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setGeneralFrequencyNorm((double) jp.getFloatValue()); } else if (WORDS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { wordLemma = null; syntacticLabel = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordLemma = jp.nextTextValue(); else if (SYN.equals(fieldname)) syntacticLabel = jp.nextTextValue(); } Preconditions.checkArgument(wordLemma != null, MSG_EXPECT_PROP_FOR_TERM_WORD, LEMMA); Preconditions.checkArgument(syntacticLabel != null, MSG_EXPECT_PROP_FOR_TERM_WORD, SYN); builder.addWord(termIndex.getWord(wordLemma), syntacticLabel); } // end words } else if (OCCURRENCES.equals(fieldname)) { tok = jp.nextToken(); if (tok == JsonToken.START_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { begin = -1; end = -1; fileSource = -1; text = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-1); else if (TEXT.equals(fieldname)) text = jp.nextTextValue(); else if (END.equals(fieldname)) end = jp.nextIntValue(-1); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } Preconditions.checkArgument(begin != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, BEGIN); Preconditions.checkArgument(end != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, END); Preconditions.checkArgument(fileSource != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, FILE); Preconditions.checkNotNull(inputSources.get(fileSource), "No file source with id: %s", fileSource); Preconditions.checkNotNull(text, MSG_EXPECT_PROP_FOR_OCCURRENCE, TEXT); if (occurrenceStore.getStoreType() == OccurrenceStore.Type.MEMORY) builder.addOccurrence(begin, end, termIndex.getDocument(inputSources.get(fileSource)), text); } } // end occurrences } else if (CONTEXT.equals(fieldname)) { @SuppressWarnings("unused") int totalCooccs = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (TOTAL_COOCCURRENCES.equals(fieldname)) /* * value never used since the total will * be reincremented in the contextVector */ totalCooccs = jp.nextIntValue(-1); else if (CO_OCCURRENCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TempVecEntry entry = new TempVecEntry(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (NB_COCCS.equals(fieldname)) entry.setNbCooccs(jp.nextIntValue(-1)); else if (ASSOC_RATE.equals(fieldname)) { jp.nextToken(); entry.setAssocRate(jp.getFloatValue()); } else if (CO_TERM.equals(fieldname)) entry.setTermGroupingKey(jp.nextTextValue()); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } currentContextVector.add(entry); } } } } } //end if fieldname } // end term object try { builder.createAndAddToIndex(); } catch (Exception e) { LOGGER.error("Could not add term " + builder.getGroupingKey() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } if (options.isWithContexts()) contextVectors.put(currentTermId, currentContextVector); } // end array of terms } else if (INPUT_SOURCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { String id = jp.getCurrentName(); try { inputSources.put(Integer.parseInt(id), jp.nextTextValue()); } catch (NumberFormatException e) { IOUtils.closeQuietly(jp); throw new IllegalArgumentException("Bad format for input source key: " + id); } } } else if (TERM_VARIATIONS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { base = null; variant = null; infoToken = null; variantType = null; variantScore = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BASE.equals(fieldname)) base = jp.nextTextValue(); else if (VARIANT.equals(fieldname)) variant = jp.nextTextValue(); else if (VARIANT_TYPE.equals(fieldname)) variantType = jp.nextTextValue(); else if (VARIANT_SCORE.equals(fieldname)) { jp.nextToken(); variantScore = jp.getDoubleValue(); } else if (INFO.equals(fieldname)) infoToken = jp.nextTextValue(); } // end syntactic variant object Preconditions.checkNotNull(base, MSG_EXPECT_PROP_FOR_VAR, BASE); Preconditions.checkNotNull(variant, MSG_EXPECT_PROP_FOR_VAR, VARIANT); Preconditions.checkNotNull(infoToken, MSG_EXPECT_PROP_FOR_VAR, INFO); b = termIndex.getTermByGroupingKey(base); v = termIndex.getTermByGroupingKey(variant); if (b != null && v != null) { VariationType vType = VariationType.fromShortName(variantType); TermVariation tv = new TermVariation(vType, b, v, vType == VariationType.GRAPHICAL ? Double.parseDouble(infoToken) : infoToken); tv.setScore(variantScore); b.addTermVariation(tv); } else { if (b == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", base); if (v == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", variant); } // Preconditions.checkNotNull(b, MSG_TERM_DOES_NOT_EXIST, base); // Preconditions.checkNotNull(v, MSG_TERM_DOES_NOT_EXIST, variant); } // end syntactic variations array } } jp.close(); if (options.isWithContexts()) { /* * map term ids with terms in context vectors and * set context vectors */ List<TempVecEntry> currentTempVecList; Term term = null; Term coTerm = null; ContextVector contextVector; for (int termId : contextVectors.keySet()) { currentTempVecList = contextVectors.get(termId); term = termIndex.getTermById(termId); contextVector = new ContextVector(term); for (TempVecEntry tempVecEntry : currentTempVecList) { coTerm = termIndex.getTermByGroupingKey(tempVecEntry.getTermGroupingKey()); contextVector.addEntry(coTerm, tempVecEntry.getNbCooccs(), tempVecEntry.getAssocRate()); } if (!contextVector.getEntries().isEmpty()) term.setContextVector(contextVector); } } return termIndex; }
From source file:com.addthis.codec.config.ConfigTraversingParser.java
@Override public JsonParser skipChildren() throws IOException, JsonParseException { if (_currToken == JsonToken.START_OBJECT) { _startContainer = false;//from w w w . j av a 2 s . co m _currToken = JsonToken.END_OBJECT; } else if (_currToken == JsonToken.START_ARRAY) { _startContainer = false; _currToken = JsonToken.END_ARRAY; } return this; }
From source file:org.mongojack.internal.object.BsonObjectTraversingParser.java
@Override public JsonParser skipChildren() throws IOException { if (_currToken == JsonToken.START_OBJECT) { startContainer = false;/*from w ww . j ava2 s. co m*/ _currToken = JsonToken.END_OBJECT; } else if (_currToken == JsonToken.START_ARRAY) { startContainer = false; _currToken = JsonToken.END_ARRAY; } return this; }