List of usage examples for com.fasterxml.jackson.core JsonParser getCurrentName
public abstract String getCurrentName() throws IOException, JsonParseException;
From source file:org.mongojack.internal.object.BsonObjectGenerator.java
@Override public void copyCurrentEvent(JsonParser jp) throws IOException { JsonToken t = jp.getCurrentToken();//w ww. j a v a 2s . c o m switch (t) { case START_OBJECT: writeStartObject(); break; case END_OBJECT: writeEndObject(); break; case START_ARRAY: writeStartArray(); break; case END_ARRAY: writeEndArray(); break; case FIELD_NAME: writeFieldName(jp.getCurrentName()); break; case VALUE_STRING: if (jp.hasTextCharacters()) { writeString(jp.getTextCharacters(), jp.getTextOffset(), jp.getTextLength()); } else { writeString(jp.getText()); } break; case VALUE_NUMBER_INT: switch (jp.getNumberType()) { case INT: writeNumber(jp.getIntValue()); break; case BIG_INTEGER: writeNumber(jp.getBigIntegerValue()); break; default: writeNumber(jp.getLongValue()); } break; case VALUE_NUMBER_FLOAT: switch (jp.getNumberType()) { case BIG_DECIMAL: writeNumber(jp.getDecimalValue()); break; case FLOAT: writeNumber(jp.getFloatValue()); break; default: writeNumber(jp.getDoubleValue()); } break; case VALUE_TRUE: writeBoolean(true); break; case VALUE_FALSE: writeBoolean(false); break; case VALUE_NULL: writeNull(); break; case VALUE_EMBEDDED_OBJECT: writeObject(jp.getEmbeddedObject()); break; } }
From source file:eu.project.ttc.models.index.JsonTermIndexIO.java
/** * Loads the json-serialized term index into the param {@link TermIndex} object. * //from w w w.j a va 2 s .c o m * @param reader * @param options * The deserialization {@link IOOptions}. * @return * @throws JsonParseException * @throws IOException */ public static TermIndex load(Reader reader, JsonOptions options) throws JsonParseException, IOException { TermIndex termIndex = null; JsonFactory jsonFactory = new JsonFactory(); JsonParser jp = jsonFactory.createParser(reader); // or Stream, Reader jp.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES); jp.enable(JsonParser.Feature.STRICT_DUPLICATE_DETECTION); String fieldname; String compLemma = null; int fileSource = -1; String wordLemma = null; String syntacticLabel = null; int begin = -1; int end = -1; int nbWordAnnos = -1; int nbSpottedTerms = -1; Term b; Term v; String text; String base; String variant; // String rule; String infoToken; String variantType; double variantScore; Map<Integer, String> inputSources = Maps.newTreeMap(); Map<Integer, List<TempVecEntry>> contextVectors = Maps.newHashMap(); OccurrenceStore occurrenceStore = null; // useful var for debug JsonToken tok; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (METADATA.equals(fieldname)) { jp.nextToken(); String termIndexName = null; Lang lang = null; String corpusID = null; String occurrenceStorage = null; String occurrenceStoreURI = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LANG.equals(fieldname)) { lang = Lang.forName(jp.nextTextValue()); } else if (NAME.equals(fieldname)) { termIndexName = jp.nextTextValue(); } else if (NB_WORD_ANNOTATIONS.equals(fieldname)) { nbWordAnnos = jp.nextIntValue(-1); } else if (NB_SPOTTED_TERMS.equals(fieldname)) { nbSpottedTerms = jp.nextIntValue(-1); } else if (CORPUS_ID.equals(fieldname)) { corpusID = jp.nextTextValue(); } else if (OCCURRENCE_STORAGE.equals(fieldname)) { occurrenceStorage = jp.nextTextValue(); } else if (OCCURRENCE_MONGODB_STORE_URI.equals(fieldname)) { occurrenceStoreURI = jp.nextTextValue(); } } Preconditions.checkState(lang != null, "The property meta.lang must be defined"); Preconditions.checkState(termIndexName != null, "The property meta.name must be defined"); if (occurrenceStorage != null && occurrenceStorage.equals(OCCURRENCE_STORAGE_MONGODB)) { Preconditions.checkNotNull(occurrenceStoreURI, "Missing attribute " + OCCURRENCE_MONGODB_STORE_URI); occurrenceStore = new MongoDBOccurrenceStore(occurrenceStoreURI, OccurrenceStore.State.INDEXED); } else occurrenceStore = new MemoryOccurrenceStore(); termIndex = new MemoryTermIndex(termIndexName, lang, occurrenceStore); if (corpusID != null) termIndex.setCorpusId(corpusID); if (nbWordAnnos != -1) termIndex.setWordAnnotationsNum(nbWordAnnos); if (nbSpottedTerms != -1) termIndex.setSpottedTermsNum(nbSpottedTerms); if (options.isMetadataOnly()) return termIndex; } else if (WORDS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { WordBuilder wordBuilder = WordBuilder.start(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordBuilder.setLemma(jp.nextTextValue()); else if (COMPOUND_TYPE.equals(fieldname)) wordBuilder.setCompoundType(CompoundType.fromName(jp.nextTextValue())); else if (STEM.equals(fieldname)) wordBuilder.setStem(jp.nextTextValue()); else if (COMPONENTS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) compLemma = jp.nextTextValue(); else if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-2); else if (END.equals(fieldname)) end = jp.nextIntValue(-2); } wordBuilder.addComponent(begin, end, compLemma); } } } try { termIndex.addWord(wordBuilder.create()); } catch (Exception e) { LOGGER.error("Could not add word " + wordBuilder.getLemma() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } } } else if (TERMS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TermBuilder builder = TermBuilder.start(termIndex); List<TempVecEntry> currentContextVector = Lists.newArrayList(); int currentTermId = -1; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (GROUPING_KEY.equals(fieldname)) builder.setGroupingKey(jp.nextTextValue()); else if (SPOTTING_RULE.equals(fieldname)) builder.setSpottingRule(jp.nextTextValue()); else if (ID.equals(fieldname)) { currentTermId = jp.nextIntValue(-2); builder.setId(currentTermId); } else if (RANK.equals(fieldname)) { builder.setRank(jp.nextIntValue(-1)); } else if (FREQUENCY.equals(fieldname)) { builder.setFrequency(jp.nextIntValue(-1)); } else { if (FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setFrequencyNorm((double) jp.getFloatValue()); } else if (SPECIFICITY.equals(fieldname)) { jp.nextToken(); builder.setSpecificity((double) jp.getDoubleValue()); } else if (GENERAL_FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setGeneralFrequencyNorm((double) jp.getFloatValue()); } else if (WORDS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { wordLemma = null; syntacticLabel = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordLemma = jp.nextTextValue(); else if (SYN.equals(fieldname)) syntacticLabel = jp.nextTextValue(); } Preconditions.checkArgument(wordLemma != null, MSG_EXPECT_PROP_FOR_TERM_WORD, LEMMA); Preconditions.checkArgument(syntacticLabel != null, MSG_EXPECT_PROP_FOR_TERM_WORD, SYN); builder.addWord(termIndex.getWord(wordLemma), syntacticLabel); } // end words } else if (OCCURRENCES.equals(fieldname)) { tok = jp.nextToken(); if (tok == JsonToken.START_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { begin = -1; end = -1; fileSource = -1; text = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-1); else if (TEXT.equals(fieldname)) text = jp.nextTextValue(); else if (END.equals(fieldname)) end = jp.nextIntValue(-1); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } Preconditions.checkArgument(begin != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, BEGIN); Preconditions.checkArgument(end != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, END); Preconditions.checkArgument(fileSource != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, FILE); Preconditions.checkNotNull(inputSources.get(fileSource), "No file source with id: %s", fileSource); Preconditions.checkNotNull(text, MSG_EXPECT_PROP_FOR_OCCURRENCE, TEXT); if (occurrenceStore.getStoreType() == OccurrenceStore.Type.MEMORY) builder.addOccurrence(begin, end, termIndex.getDocument(inputSources.get(fileSource)), text); } } // end occurrences } else if (CONTEXT.equals(fieldname)) { @SuppressWarnings("unused") int totalCooccs = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (TOTAL_COOCCURRENCES.equals(fieldname)) /* * value never used since the total will * be reincremented in the contextVector */ totalCooccs = jp.nextIntValue(-1); else if (CO_OCCURRENCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TempVecEntry entry = new TempVecEntry(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (NB_COCCS.equals(fieldname)) entry.setNbCooccs(jp.nextIntValue(-1)); else if (ASSOC_RATE.equals(fieldname)) { jp.nextToken(); entry.setAssocRate(jp.getFloatValue()); } else if (CO_TERM.equals(fieldname)) entry.setTermGroupingKey(jp.nextTextValue()); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } currentContextVector.add(entry); } } } } } //end if fieldname } // end term object try { builder.createAndAddToIndex(); } catch (Exception e) { LOGGER.error("Could not add term " + builder.getGroupingKey() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } if (options.isWithContexts()) contextVectors.put(currentTermId, currentContextVector); } // end array of terms } else if (INPUT_SOURCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { String id = jp.getCurrentName(); try { inputSources.put(Integer.parseInt(id), jp.nextTextValue()); } catch (NumberFormatException e) { IOUtils.closeQuietly(jp); throw new IllegalArgumentException("Bad format for input source key: " + id); } } } else if (TERM_VARIATIONS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { base = null; variant = null; infoToken = null; variantType = null; variantScore = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BASE.equals(fieldname)) base = jp.nextTextValue(); else if (VARIANT.equals(fieldname)) variant = jp.nextTextValue(); else if (VARIANT_TYPE.equals(fieldname)) variantType = jp.nextTextValue(); else if (VARIANT_SCORE.equals(fieldname)) { jp.nextToken(); variantScore = jp.getDoubleValue(); } else if (INFO.equals(fieldname)) infoToken = jp.nextTextValue(); } // end syntactic variant object Preconditions.checkNotNull(base, MSG_EXPECT_PROP_FOR_VAR, BASE); Preconditions.checkNotNull(variant, MSG_EXPECT_PROP_FOR_VAR, VARIANT); Preconditions.checkNotNull(infoToken, MSG_EXPECT_PROP_FOR_VAR, INFO); b = termIndex.getTermByGroupingKey(base); v = termIndex.getTermByGroupingKey(variant); if (b != null && v != null) { VariationType vType = VariationType.fromShortName(variantType); TermVariation tv = new TermVariation(vType, b, v, vType == VariationType.GRAPHICAL ? Double.parseDouble(infoToken) : infoToken); tv.setScore(variantScore); b.addTermVariation(tv); } else { if (b == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", base); if (v == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", variant); } // Preconditions.checkNotNull(b, MSG_TERM_DOES_NOT_EXIST, base); // Preconditions.checkNotNull(v, MSG_TERM_DOES_NOT_EXIST, variant); } // end syntactic variations array } } jp.close(); if (options.isWithContexts()) { /* * map term ids with terms in context vectors and * set context vectors */ List<TempVecEntry> currentTempVecList; Term term = null; Term coTerm = null; ContextVector contextVector; for (int termId : contextVectors.keySet()) { currentTempVecList = contextVectors.get(termId); term = termIndex.getTermById(termId); contextVector = new ContextVector(term); for (TempVecEntry tempVecEntry : currentTempVecList) { coTerm = termIndex.getTermByGroupingKey(tempVecEntry.getTermGroupingKey()); contextVector.addEntry(coTerm, tempVecEntry.getNbCooccs(), tempVecEntry.getAssocRate()); } if (!contextVector.getEntries().isEmpty()) term.setContextVector(contextVector); } } return termIndex; }
From source file:com.bazaarvoice.jsonpps.PrettyPrintJson.java
private void copyCurrentStructure(JsonParser parser, ObjectMapper mapper, int depth, JsonGenerator generator) throws IOException { // Avoid using the mapper to parse the entire input until we absolutely must. This allows pretty // printing huge top-level arrays (that wouldn't fit in memory) containing smaller objects (that // individually do fit in memory) where the objects are printed with sorted keys. JsonToken t = parser.getCurrentToken(); if (t == null) { generator.copyCurrentStructure(parser); // Will report the error of a null token. return;/*from w w w.j a va2s.c om*/ } int id = t.id(); if (id == ID_FIELD_NAME) { if (depth > flatten) { generator.writeFieldName(parser.getCurrentName()); } t = parser.nextToken(); id = t.id(); } switch (id) { case ID_START_OBJECT: if (sortKeys && depth >= flatten) { // Load the entire object in memory so we can sort its keys and serialize it back out. mapper.writeValue(generator, parser.readValueAs(Map.class)); } else { // Don't load the whole object into memory. Copy it in a memory-efficient streaming fashion. if (depth >= flatten) { generator.writeStartObject(); } while (parser.nextToken() != JsonToken.END_OBJECT) { copyCurrentStructure(parser, mapper, depth + 1, generator); } if (depth >= flatten) { generator.writeEndObject(); } } break; case ID_START_ARRAY: // Don't load the whole array into memory. Copy it in a memory-efficient streaming fashion. if (depth >= flatten) { generator.writeStartArray(); } while (parser.nextToken() != JsonToken.END_ARRAY) { copyCurrentStructure(parser, mapper, depth + 1, generator); } if (depth >= flatten) { generator.writeEndArray(); } break; default: generator.copyCurrentEvent(parser); break; } }
From source file:de.alexkamp.sandbox.ChrootSandbox.java
@Override public void run() { try {/* w w w . java 2s .c om*/ JsonParser parser = factory.createParser(socket.getInputStream()); String caller = null; String channel = null; String message = null; JsonToken jt = null; while (null != (jt = parser.nextToken())) { if (jt.isStructStart()) { caller = null; channel = null; message = null; } else if (jt.isStructEnd()) { try { handle(caller, channel, message); } catch (Exception ex) { handleAsyncError(ex); } } else { String name = parser.getCurrentName(); parser.nextToken(); switch (name) { case "Caller": caller = parser.getText(); break; case "Channel": channel = parser.getText(); break; case "Message": message = parser.getText(); break; } } } } catch (Exception e) { handleAsyncError(e); } }
From source file:no.ssb.jsonstat.v2.deser.DatasetDeserializer.java
@Override public DatasetBuildable deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { if (p.getCurrentToken() == JsonToken.START_OBJECT) { p.nextToken();/*from w w w. jav a 2 s.com*/ } Set<String> ids = Collections.emptySet(); List<Integer> sizes = Collections.emptyList(); Multimap<String, String> roles = ArrayListMultimap.create(); Map<String, Dimension.Builder> dims = Collections.emptyMap(); List<Number> values = Collections.emptyList(); DatasetBuilder builder = Dataset.create(); Optional<String> version = Optional.empty(); Optional<String> clazz = Optional.empty(); Optional<ObjectNode> extension = Optional.empty(); while (p.nextValue() != JsonToken.END_OBJECT) { switch (p.getCurrentName()) { case "label": builder.withLabel(_parseString(p, ctxt)); break; case "source": builder.withSource(_parseString(p, ctxt)); break; case "href": break; case "updated": Instant updated = parseEcmaDate(_parseString(p, ctxt)); builder.updatedAt(updated); break; case "value": values = parseValues(p, ctxt); break; case "dimension": if (!version.orElse("1.x").equals("2.0")) { dims = Maps.newHashMap(); // Deal with the id, size and role inside dimension. while (p.nextValue() != JsonToken.END_OBJECT) { switch (p.getCurrentName()) { case "id": ids = p.readValueAs(ID_SET); break; case "size": sizes = p.readValueAs(SIZE_LIST); break; case "role": roles = p.readValueAs(ROLE_MULTIMAP); break; default: dims.put(p.getCurrentName(), ctxt.readValue(p, Dimension.Builder.class)); } } } else { dims = p.readValueAs(DIMENSION_MAP); } break; case "id": ids = p.readValueAs(ID_SET); break; case "size": sizes = p.readValueAs(SIZE_LIST); break; case "role": roles = p.readValueAs(ROLE_MULTIMAP); break; case "extension": extension = Optional.of(ctxt.readValue(p, ObjectNode.class)); break; case "link": case "status": // TODO p.skipChildren(); break; case "version": version = Optional.of(_parseString(p, ctxt)); break; case "class": // TODO clazz = Optional.of(_parseString(p, ctxt)); break; default: boolean handled = ctxt.handleUnknownProperty(p, this, Dimension.Builder.class, p.getCurrentName()); if (!handled) p.skipChildren(); break; } } // Setup roles for (Map.Entry<String, String> dimRole : roles.entries()) { Dimension.Roles role = Dimension.Roles.valueOf(dimRole.getKey().toUpperCase()); Dimension.Builder dimension = checkNotNull(dims.get(dimRole.getValue()), "could not assign the role {} to the dimension {}. The dimension did not exist", role, dimRole.getValue() ); dimension.withRole(role); } List<Dimension.Builder> orderedDimensions = Lists.newArrayList(); for (String dimensionName : ids) { orderedDimensions.add(dims.get(dimensionName)); } // TODO: Check size? // Check ids and add to the data set. checkArgument(ids.size() == dims.size(), "dimension and size did not match"); if (extension.isPresent()) { builder.withExtension(extension.get()); } return builder.withDimensions(orderedDimensions).withValues(values); }
From source file:com.basistech.rosette.dm.jackson.ListAttributeDeserializer.java
@Override @SuppressWarnings("unchecked") public ListAttribute deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException { if (jp.getCurrentToken() == JsonToken.START_OBJECT) { // this is what we expect. // we advance to be in the same place the 'else' will be -- the first FIELD_NAME. jp.nextToken();/*from ww w. j a v a 2s . com*/ } else { /* In a full AnnotatedText, which is already doing some polymorphism shuffling, we end up here. */ /* We find a FIELD_NAME for the first field of the object */ if (jp.getCurrentToken() != JsonToken.FIELD_NAME) { throw ctxt.wrongTokenException(jp, JsonToken.START_OBJECT, "ListAttributeDeserializer called not at or FIELD_NAME of first field"); } /* We are at the field name, ready for the loop. */ } TokenBuffer tb = null; for (JsonToken t = jp.getCurrentToken(); t == JsonToken.FIELD_NAME; t = jp.nextToken()) { String name = jp.getCurrentName(); if ("itemType".equals(name)) { // gotcha! return deserialize(jp, ctxt, tb); } if (tb == null) { tb = new TokenBuffer(null, false); } tb.copyCurrentStructure(jp); } throw ctxt.mappingException("No itemType provided in a list"); }
From source file:com.github.shyiko.jackson.module.advice.AdvisedBeanDeserializer.java
protected Object deserializeWithView(JsonParser jp, DeserializationContext ctxt, Object bean, Class<?> activeView) throws IOException { beanDeserializerAdvice.before(bean, jp, ctxt); JsonToken t = jp.getCurrentToken();/*from ww w. ja v a 2 s. c o m*/ for (; t == JsonToken.FIELD_NAME; t = jp.nextToken()) { String propName = jp.getCurrentName(); // Skip field name: jp.nextToken(); if (beanDeserializerAdvice.intercept(bean, propName, jp, ctxt)) { continue; } SettableBeanProperty prop = _beanProperties.find(propName); if (prop != null) { if (!prop.visibleInView(activeView)) { jp.skipChildren(); continue; } try { prop.deserializeAndSet(jp, ctxt, bean); } catch (Exception e) { wrapAndThrow(e, bean, propName, ctxt); } continue; } handleUnknownVanilla(jp, ctxt, bean, propName); } beanDeserializerAdvice.after(bean, jp, ctxt); return bean; }
From source file:com.github.shyiko.jackson.module.advice.AdvisedBeanDeserializer.java
/** * Streamlined version that is only used when no "special" * features are enabled.//from w ww . j av a 2 s .co m */ private Object vanillaDeserialize(JsonParser jp, DeserializationContext ctxt, JsonToken t) throws IOException { final Object bean = _valueInstantiator.createUsingDefault(ctxt); beanDeserializerAdvice.before(bean, jp, ctxt); for (; jp.getCurrentToken() != JsonToken.END_OBJECT; jp.nextToken()) { String propName = jp.getCurrentName(); // Skip field name: jp.nextToken(); if (beanDeserializerAdvice.intercept(bean, propName, jp, ctxt)) { continue; } SettableBeanProperty prop = _beanProperties.find(propName); if (prop != null) { // normal case try { prop.deserializeAndSet(jp, ctxt, bean); } catch (Exception e) { wrapAndThrow(e, bean, propName, ctxt); } } else { handleUnknownVanilla(jp, ctxt, bean, propName); } } beanDeserializerAdvice.after(bean, jp, ctxt); return bean; }
From source file:data.DefaultExchanger.java
public void importData(String dbName, JsonParser parser, JdbcTemplate jdbcTemplate) throws IOException { PlatformTransactionManager tm = new DataSourceTransactionManager(jdbcTemplate.getDataSource()); TransactionStatus ts = tm.getTransaction(new DefaultTransactionDefinition()); try {/*from www . j a va 2 s . c om*/ if (dbName.equals("MySQL")) { jdbcTemplate.update("SET FOREIGN_KEY_CHECKS = 0"); jdbcTemplate.update("SET NAMES \'utf8mb4\'"); } final Configuration config = Configuration.root(); int batchSize = config.getInt(DATA_BATCH_SIZE_KEY, DEFAULT_BATCH_SIZE); if (parser.nextToken() != JsonToken.END_OBJECT) { String fieldName = parser.getCurrentName(); play.Logger.debug("importing {}", fieldName); if (fieldName.equalsIgnoreCase(getTable())) { truncateTable(jdbcTemplate); JsonToken current = parser.nextToken(); if (current == JsonToken.START_ARRAY) { importDataFromArray(parser, jdbcTemplate, batchSize); importSequence(dbName, parser, jdbcTemplate); } else { play.Logger.info("Error: records should be an array: skipping."); parser.skipChildren(); } } } tm.commit(ts); } catch (Exception e) { e.printStackTrace(); tm.rollback(ts); } finally { if (dbName.equals("MySQL")) { jdbcTemplate.update("SET FOREIGN_KEY_CHECKS = 1"); } } }
From source file:io.pdef.json.JsonJacksonFormat.java
private Map<String, Object> readMap(final JsonParser parser) throws IOException { JsonToken current = parser.getCurrentToken(); if (current != JsonToken.START_OBJECT) { throw new JsonFormatException("Bad JSON string, failed to read an object"); }// w w w . j ava 2 s.c o m Map<String, Object> map = new LinkedHashMap<String, Object>(); while (true) { JsonToken next = parser.nextToken(); if (next == null) { throw new JsonFormatException("End of file"); } else if (next == JsonToken.END_OBJECT) { break; } else if (next != JsonToken.FIELD_NAME) { throw new JsonFormatException("Failed to read a field name from " + next); } String field = parser.getCurrentName(); parser.nextToken(); Object value = read(parser); map.put(field, value); } return map; }