List of usage examples for com.fasterxml.jackson.core JsonFactory createParser
public JsonParser createParser(String content) throws IOException, JsonParseException
From source file:org.lambdamatic.internal.elasticsearch.clientdsl.Client.java
public static <T> T readResponse(final JsonFactory jsonFactory, final String responseBody, final Class<T> responseType) { try {//from www . j av a2s .com return jsonFactory.createParser(responseBody).readValueAs(responseType); } catch (UnsupportedOperationException | IOException e) { throw new ResponseParsingException("Failed to parse response body", e); } }
From source file:com.github.heuermh.ensemblrestclient.JacksonVariationConverter.java
static Variation parseVariation(final JsonFactory jsonFactory, final InputStream inputStream) throws IOException { JsonParser parser = null;// ww w .j a v a 2 s . com try { parser = jsonFactory.createParser(inputStream); parser.nextToken(); String identifier = null; String referenceAllele = null; List<String> alternateAlleles = new ArrayList<String>(); String locationName = null; String coordinateSystem = "chromosome"; int start = -1; int end = -1; int strand = -1; while (parser.nextToken() != JsonToken.END_OBJECT) { String field = parser.getCurrentName(); parser.nextToken(); if ("name".equals(field)) { identifier = parser.getText(); } else if ("mappings".equals(field)) { // todo: will only catch last mapping while (parser.nextToken() != JsonToken.END_ARRAY) { while (parser.nextToken() != JsonToken.END_OBJECT) { String mappingsField = parser.getCurrentName(); parser.nextToken(); if ("seq_region_name".equals(mappingsField)) { locationName = parser.getText(); } else if ("start".equals(mappingsField)) { start = Integer.parseInt(parser.getText()); } else if ("end".equals(mappingsField)) { end = Integer.parseInt(parser.getText()); } else if ("strand".equals(mappingsField)) { strand = Integer.parseInt(parser.getText()); } else if ("allele_string".equals(mappingsField)) { String[] tokens = parser.getText().split("/"); // todo: check ref here against ancestral_allele referenceAllele = tokens[0]; for (int i = 1; i < tokens.length; i++) { alternateAlleles.add(tokens[i]); } } } } } else if ("synonyms".equals(field)) { while (parser.nextToken() != JsonToken.END_ARRAY) { // ignore } } else if ("evidence".equals(field)) { while (parser.nextToken() != JsonToken.END_ARRAY) { // ignore } } } return new Variation(identifier, referenceAllele, alternateAlleles, new Location(locationName, coordinateSystem, start, end, strand)); } finally { try { inputStream.close(); } catch (Exception e) { // ignored } try { parser.close(); } catch (Exception e) { // ignored } } }
From source file:eu.project.ttc.readers.TermSuiteJsonCasDeserializer.java
public static void deserialize(InputStream inputStream, CAS cas, String encoding) { Preconditions.checkNotNull(inputStream, "Paramater input stream is null"); Preconditions.checkNotNull(inputStream, "Paramater CAS is null"); try {/*from w w w . j a v a2s .com*/ JsonFactory factory = new JsonFactory(); parser = factory.createParser(inputStream); SourceDocumentInformation sdi = (SourceDocumentInformation) cas .createAnnotation(cas.getJCas().getCasType(SourceDocumentInformation.type), 0, 0); WordAnnotation wa = (WordAnnotation) cas.createAnnotation(cas.getJCas().getCasType(WordAnnotation.type), 0, 0); TermOccAnnotation toa = (TermOccAnnotation) cas .createAnnotation(cas.getJCas().getCasType(TermOccAnnotation.type), 0, 0); FixedExpression fe = (FixedExpression) cas .createAnnotation(cas.getJCas().getCasType(FixedExpression.type), 0, 0); boolean inSdi = false; boolean inWa = false; boolean inToa = false; boolean inFe = false; boolean inCoveredText = false; while ((token = parser.nextToken()) != null) { if (inSdi) { if (token == JsonToken.END_OBJECT) { inSdi = false; } else { fillSdi(parser, token, sdi); } } else if (inWa) { if (token == JsonToken.END_ARRAY) { inWa = false; } else if (token == JsonToken.END_OBJECT) { wa.addToIndexes(); wa = (WordAnnotation) cas.createAnnotation(cas.getJCas().getCasType(WordAnnotation.type), 0, 0); } fillWordAnnotations(parser, token, wa); } else if (inToa) { if (token == JsonToken.END_ARRAY && Objects.equals(parser.getParsingContext().getCurrentName(), "term_occ_annotations")) { inToa = false; } else if (token == JsonToken.END_OBJECT) { toa.addToIndexes(); toa = (TermOccAnnotation) cas .createAnnotation(cas.getJCas().getCasType(TermOccAnnotation.type), 0, 0); } FillTermOccAnnotations(parser, token, toa, cas); } else if (inFe) { if (token == JsonToken.END_ARRAY && Objects.equals(parser.getParsingContext().getCurrentName(), "fixed_expressions")) { inFe = false; } else if (token == JsonToken.END_OBJECT) { fe.addToIndexes(); fe = (FixedExpression) cas.createAnnotation(cas.getJCas().getCasType(FixedExpression.type), 0, 0); } FillFixedExpressions(parser, token, fe, cas); } else if (inCoveredText) { if (token == JsonToken.VALUE_STRING) { String text = parser.getText(); cas.setDocumentText(text); } } else if ("sdi".equals(parser.getParsingContext().getCurrentName())) { inSdi = true; } else if ("word_annotations".equals(parser.getParsingContext().getCurrentName())) { inWa = true; } else if ("term_occ_annotations".equals(parser.getParsingContext().getCurrentName())) { inToa = true; } else if ("fixed_expressions".equals(parser.getParsingContext().getCurrentName())) { inFe = true; } else if ("covered_text".equals(parser.getParsingContext().getCurrentName())) { inCoveredText = true; } } sdi.addToIndexes(); } catch (IOException | CASException e) { logger.error("An error occurred during TermSuite Json Cas parsing", e); } }
From source file:com.github.heuermh.ensemblrestclient.JacksonVariationConverter.java
static VariationConsequences parseVariationConsequences(final JsonFactory jsonFactory, final InputStream inputStream) throws IOException { JsonParser parser = null;/*from w ww.j av a2 s .co m*/ try { parser = jsonFactory.createParser(inputStream); parser.nextToken(); String identifier = null; String referenceAllele = null; List<String> alternateAlleles = new ArrayList<String>(); String locationName = null; String coordinateSystem = "chromosome"; int start = -1; int end = -1; int strand = -1; List<TranscriptConsequences> transcriptConsequences = new ArrayList<TranscriptConsequences>(); String alternateAllele = null; int transcriptStrand = -1; boolean canonical = false; String geneId = null; String transcriptId = null; String translationId = null; String transcriptAlleleString = null; String codons = null; String hgvsc = null; String aminoAcids = null; String hgvsp = null; List<String> consequenceTerms = new ArrayList<String>(); while (parser.nextToken() != JsonToken.END_ARRAY) { while (parser.nextToken() != JsonToken.END_OBJECT) { String field = parser.getCurrentName(); parser.nextToken(); if ("id".equals(field)) { identifier = parser.getText(); } else if ("seq_region_name".equals(field)) { locationName = parser.getText(); } else if ("start".equals(field)) { start = parser.getIntValue(); } else if ("end".equals(field)) { end = parser.getIntValue(); } else if ("strand".equals(field)) { strand = parser.getIntValue(); } else if ("allele_string".equals(field)) { String[] tokens = parser.getText().split("/"); referenceAllele = tokens[0]; for (int i = 1; i < tokens.length; i++) { alternateAlleles.add(tokens[i]); } } else if ("transcript_consequences".equals(field)) { while (parser.nextToken() != JsonToken.END_ARRAY) { while (parser.nextToken() != JsonToken.END_OBJECT) { String transcriptField = parser.getCurrentName(); parser.nextToken(); if ("variant_allele".equals(transcriptField)) { alternateAllele = parser.getText(); } else if ("strand".equals(transcriptField)) { transcriptStrand = parser.getIntValue(); } else if ("canonical".equals(transcriptField)) { canonical = (Integer.parseInt(parser.getText()) > 0); } else if ("gene_id".equals(transcriptField)) { geneId = parser.getText(); } else if ("transcript_id".equals(transcriptField)) { transcriptId = parser.getText(); } else if ("protein_id".equals(transcriptField)) { translationId = parser.getText(); } else if ("codons".equals(transcriptField)) { codons = parser.getText(); } else if ("hgvsc".equals(transcriptField)) { hgvsc = parser.getText(); } else if ("amino_acids".equals(transcriptField)) { aminoAcids = parser.getText(); } else if ("hgvsp".equals(transcriptField)) { hgvsp = parser.getText(); } else if ("consequence_terms".equals(transcriptField)) { while (parser.nextToken() != JsonToken.END_ARRAY) { consequenceTerms.add(parser.getText()); } } } transcriptConsequences.add(new TranscriptConsequences(alternateAllele, transcriptStrand, canonical, geneId, transcriptId, translationId, codons, hgvsc, aminoAcids, hgvsp, consequenceTerms)); alternateAllele = null; transcriptStrand = -1; canonical = false; geneId = null; transcriptId = null; translationId = null; transcriptAlleleString = null; codons = null; hgvsc = null; aminoAcids = null; hgvsp = null; consequenceTerms.clear(); } } else if ("colocated_variants".equals(field)) { while (parser.nextToken() != JsonToken.END_ARRAY) { while (parser.nextToken() != JsonToken.END_OBJECT) { // ignore } } } } } Location location = new Location(locationName, coordinateSystem, start, end, strand); return new VariationConsequences(identifier, referenceAllele, alternateAlleles, location, transcriptConsequences); } finally { try { inputStream.close(); } catch (Exception e) { // ignored } try { parser.close(); } catch (Exception e) { // ignored } } }
From source file:org.lambdamatic.internal.elasticsearch.clientdsl.Client.java
public static <T> T readResponse(final JsonFactory jsonFactory, final Response response, final Class<T> responseType) { try {/*from www.j a va 2 s. c o m*/ try (final InputStream responseBodyStream = response.getEntity().getContent()) { if (LOGGER.isTraceEnabled()) { final String responseBody = formatJsonDocument(responseBodyStream); LOGGER.trace("Parsing response body:\n{}", responseBody); return jsonFactory.createParser(responseBody).readValueAs(responseType); } return jsonFactory.createParser(responseBodyStream).readValueAs(responseType); } } catch (UnsupportedOperationException | IOException e) { throw new ResponseParsingException("Failed to parse response body", e); } }
From source file:eu.project.ttc.models.index.JsonTermIndexIO.java
/** * Loads the json-serialized term index into the param {@link TermIndex} object. * /*from w w w .j a v a2 s. c om*/ * @param reader * @param options * The deserialization {@link IOOptions}. * @return * @throws JsonParseException * @throws IOException */ public static TermIndex load(Reader reader, JsonOptions options) throws JsonParseException, IOException { TermIndex termIndex = null; JsonFactory jsonFactory = new JsonFactory(); JsonParser jp = jsonFactory.createParser(reader); // or Stream, Reader jp.enable(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES); jp.enable(JsonParser.Feature.STRICT_DUPLICATE_DETECTION); String fieldname; String compLemma = null; int fileSource = -1; String wordLemma = null; String syntacticLabel = null; int begin = -1; int end = -1; int nbWordAnnos = -1; int nbSpottedTerms = -1; Term b; Term v; String text; String base; String variant; // String rule; String infoToken; String variantType; double variantScore; Map<Integer, String> inputSources = Maps.newTreeMap(); Map<Integer, List<TempVecEntry>> contextVectors = Maps.newHashMap(); OccurrenceStore occurrenceStore = null; // useful var for debug JsonToken tok; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (METADATA.equals(fieldname)) { jp.nextToken(); String termIndexName = null; Lang lang = null; String corpusID = null; String occurrenceStorage = null; String occurrenceStoreURI = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LANG.equals(fieldname)) { lang = Lang.forName(jp.nextTextValue()); } else if (NAME.equals(fieldname)) { termIndexName = jp.nextTextValue(); } else if (NB_WORD_ANNOTATIONS.equals(fieldname)) { nbWordAnnos = jp.nextIntValue(-1); } else if (NB_SPOTTED_TERMS.equals(fieldname)) { nbSpottedTerms = jp.nextIntValue(-1); } else if (CORPUS_ID.equals(fieldname)) { corpusID = jp.nextTextValue(); } else if (OCCURRENCE_STORAGE.equals(fieldname)) { occurrenceStorage = jp.nextTextValue(); } else if (OCCURRENCE_MONGODB_STORE_URI.equals(fieldname)) { occurrenceStoreURI = jp.nextTextValue(); } } Preconditions.checkState(lang != null, "The property meta.lang must be defined"); Preconditions.checkState(termIndexName != null, "The property meta.name must be defined"); if (occurrenceStorage != null && occurrenceStorage.equals(OCCURRENCE_STORAGE_MONGODB)) { Preconditions.checkNotNull(occurrenceStoreURI, "Missing attribute " + OCCURRENCE_MONGODB_STORE_URI); occurrenceStore = new MongoDBOccurrenceStore(occurrenceStoreURI, OccurrenceStore.State.INDEXED); } else occurrenceStore = new MemoryOccurrenceStore(); termIndex = new MemoryTermIndex(termIndexName, lang, occurrenceStore); if (corpusID != null) termIndex.setCorpusId(corpusID); if (nbWordAnnos != -1) termIndex.setWordAnnotationsNum(nbWordAnnos); if (nbSpottedTerms != -1) termIndex.setSpottedTermsNum(nbSpottedTerms); if (options.isMetadataOnly()) return termIndex; } else if (WORDS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { WordBuilder wordBuilder = WordBuilder.start(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordBuilder.setLemma(jp.nextTextValue()); else if (COMPOUND_TYPE.equals(fieldname)) wordBuilder.setCompoundType(CompoundType.fromName(jp.nextTextValue())); else if (STEM.equals(fieldname)) wordBuilder.setStem(jp.nextTextValue()); else if (COMPONENTS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) compLemma = jp.nextTextValue(); else if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-2); else if (END.equals(fieldname)) end = jp.nextIntValue(-2); } wordBuilder.addComponent(begin, end, compLemma); } } } try { termIndex.addWord(wordBuilder.create()); } catch (Exception e) { LOGGER.error("Could not add word " + wordBuilder.getLemma() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } } } else if (TERMS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TermBuilder builder = TermBuilder.start(termIndex); List<TempVecEntry> currentContextVector = Lists.newArrayList(); int currentTermId = -1; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (GROUPING_KEY.equals(fieldname)) builder.setGroupingKey(jp.nextTextValue()); else if (SPOTTING_RULE.equals(fieldname)) builder.setSpottingRule(jp.nextTextValue()); else if (ID.equals(fieldname)) { currentTermId = jp.nextIntValue(-2); builder.setId(currentTermId); } else if (RANK.equals(fieldname)) { builder.setRank(jp.nextIntValue(-1)); } else if (FREQUENCY.equals(fieldname)) { builder.setFrequency(jp.nextIntValue(-1)); } else { if (FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setFrequencyNorm((double) jp.getFloatValue()); } else if (SPECIFICITY.equals(fieldname)) { jp.nextToken(); builder.setSpecificity((double) jp.getDoubleValue()); } else if (GENERAL_FREQ_NORM.equals(fieldname)) { jp.nextToken(); builder.setGeneralFrequencyNorm((double) jp.getFloatValue()); } else if (WORDS.equals(fieldname)) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { wordLemma = null; syntacticLabel = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (LEMMA.equals(fieldname)) wordLemma = jp.nextTextValue(); else if (SYN.equals(fieldname)) syntacticLabel = jp.nextTextValue(); } Preconditions.checkArgument(wordLemma != null, MSG_EXPECT_PROP_FOR_TERM_WORD, LEMMA); Preconditions.checkArgument(syntacticLabel != null, MSG_EXPECT_PROP_FOR_TERM_WORD, SYN); builder.addWord(termIndex.getWord(wordLemma), syntacticLabel); } // end words } else if (OCCURRENCES.equals(fieldname)) { tok = jp.nextToken(); if (tok == JsonToken.START_ARRAY) { while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { begin = -1; end = -1; fileSource = -1; text = null; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BEGIN.equals(fieldname)) begin = jp.nextIntValue(-1); else if (TEXT.equals(fieldname)) text = jp.nextTextValue(); else if (END.equals(fieldname)) end = jp.nextIntValue(-1); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } Preconditions.checkArgument(begin != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, BEGIN); Preconditions.checkArgument(end != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, END); Preconditions.checkArgument(fileSource != -1, MSG_EXPECT_PROP_FOR_OCCURRENCE, FILE); Preconditions.checkNotNull(inputSources.get(fileSource), "No file source with id: %s", fileSource); Preconditions.checkNotNull(text, MSG_EXPECT_PROP_FOR_OCCURRENCE, TEXT); if (occurrenceStore.getStoreType() == OccurrenceStore.Type.MEMORY) builder.addOccurrence(begin, end, termIndex.getDocument(inputSources.get(fileSource)), text); } } // end occurrences } else if (CONTEXT.equals(fieldname)) { @SuppressWarnings("unused") int totalCooccs = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (TOTAL_COOCCURRENCES.equals(fieldname)) /* * value never used since the total will * be reincremented in the contextVector */ totalCooccs = jp.nextIntValue(-1); else if (CO_OCCURRENCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { TempVecEntry entry = new TempVecEntry(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (NB_COCCS.equals(fieldname)) entry.setNbCooccs(jp.nextIntValue(-1)); else if (ASSOC_RATE.equals(fieldname)) { jp.nextToken(); entry.setAssocRate(jp.getFloatValue()); } else if (CO_TERM.equals(fieldname)) entry.setTermGroupingKey(jp.nextTextValue()); else if (FILE.equals(fieldname)) { fileSource = jp.nextIntValue(-1); } } currentContextVector.add(entry); } } } } } //end if fieldname } // end term object try { builder.createAndAddToIndex(); } catch (Exception e) { LOGGER.error("Could not add term " + builder.getGroupingKey() + " to term index", e); LOGGER.warn("Error ignored, trying ton continue the loading of TermIndex"); } if (options.isWithContexts()) contextVectors.put(currentTermId, currentContextVector); } // end array of terms } else if (INPUT_SOURCES.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { String id = jp.getCurrentName(); try { inputSources.put(Integer.parseInt(id), jp.nextTextValue()); } catch (NumberFormatException e) { IOUtils.closeQuietly(jp); throw new IllegalArgumentException("Bad format for input source key: " + id); } } } else if (TERM_VARIATIONS.equals(fieldname)) { jp.nextToken(); while ((tok = jp.nextToken()) != JsonToken.END_ARRAY) { base = null; variant = null; infoToken = null; variantType = null; variantScore = 0; while ((tok = jp.nextToken()) != JsonToken.END_OBJECT) { fieldname = jp.getCurrentName(); if (BASE.equals(fieldname)) base = jp.nextTextValue(); else if (VARIANT.equals(fieldname)) variant = jp.nextTextValue(); else if (VARIANT_TYPE.equals(fieldname)) variantType = jp.nextTextValue(); else if (VARIANT_SCORE.equals(fieldname)) { jp.nextToken(); variantScore = jp.getDoubleValue(); } else if (INFO.equals(fieldname)) infoToken = jp.nextTextValue(); } // end syntactic variant object Preconditions.checkNotNull(base, MSG_EXPECT_PROP_FOR_VAR, BASE); Preconditions.checkNotNull(variant, MSG_EXPECT_PROP_FOR_VAR, VARIANT); Preconditions.checkNotNull(infoToken, MSG_EXPECT_PROP_FOR_VAR, INFO); b = termIndex.getTermByGroupingKey(base); v = termIndex.getTermByGroupingKey(variant); if (b != null && v != null) { VariationType vType = VariationType.fromShortName(variantType); TermVariation tv = new TermVariation(vType, b, v, vType == VariationType.GRAPHICAL ? Double.parseDouble(infoToken) : infoToken); tv.setScore(variantScore); b.addTermVariation(tv); } else { if (b == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", base); if (v == null) LOGGER.warn("Could not build variant because term \"{}\" was not found.", variant); } // Preconditions.checkNotNull(b, MSG_TERM_DOES_NOT_EXIST, base); // Preconditions.checkNotNull(v, MSG_TERM_DOES_NOT_EXIST, variant); } // end syntactic variations array } } jp.close(); if (options.isWithContexts()) { /* * map term ids with terms in context vectors and * set context vectors */ List<TempVecEntry> currentTempVecList; Term term = null; Term coTerm = null; ContextVector contextVector; for (int termId : contextVectors.keySet()) { currentTempVecList = contextVectors.get(termId); term = termIndex.getTermById(termId); contextVector = new ContextVector(term); for (TempVecEntry tempVecEntry : currentTempVecList) { coTerm = termIndex.getTermByGroupingKey(tempVecEntry.getTermGroupingKey()); contextVector.addEntry(coTerm, tempVecEntry.getNbCooccs(), tempVecEntry.getAssocRate()); } if (!contextVector.getEntries().isEmpty()) term.setContextVector(contextVector); } } return termIndex; }
From source file:pl.edu.pwr.iiar.zak.thermalKit.util.DBConnector.java
public DBConnector() throws IOException { JsonFactory f = new JsonFactory(); JsonParser jp = f.createParser(new File("~/.jgenerilorc")); JsonNode node = jp.getCodec().readTree(jp); databaseName = node.get("database").asText(); }
From source file:com.anrisoftware.simplerest.json.AbstractParseJsonResponse.java
@Override public T parse(HttpResponse response) throws IOException { HttpEntity entity = response.getEntity(); ObjectMapper mapper = new ObjectMapper(); JsonFactory factory = mapper.getFactory(); JsonParser parser = factory.createParser(entity.getContent()); T result = mapper.readValue(parser, responseType); return result; }
From source file:com.anrisoftware.simplerest.json.AbstractParseJsonResponseFromMap.java
@Override public T parse(HttpResponse response) throws IOException { HttpEntity entity = response.getEntity(); ObjectMapper mapper = new ObjectMapper(); JsonFactory factory = mapper.getFactory(); JsonParser parser = factory.createParser(entity.getContent()); Map<String, T> results = mapper.readValue(parser, responseType); return results.get(keyName); }
From source file:org.o3project.ocnrm.model.bind.OduBindingData.java
@Override public void bind(String name, String resource) throws JsonParseException, JsonMappingException, IOException { JsonFactory factory = new JsonFactory(); JsonParser jp = factory.createParser(resource.toString()); jp.nextToken();/*from w w w .jav a2s.c om*/ OduMapping terminationPoint = new OduMapping(); terminationPoint.setName(name); while (jp.nextToken() != JsonToken.END_OBJECT) { String fieldname = jp.getCurrentName(); jp.nextToken(); if ("dpid".equals(fieldname)) { terminationPoint.setDpid(jp.getText()); } else if ("port".equals(fieldname)) { terminationPoint.setPort(jp.getText()); } else if ("odutype".equals(fieldname)) { terminationPoint.setOdutype(jp.getText()); } else if ("ts".equals(fieldname)) { String ts = jp.getText(); terminationPoint.setTs(ts); } else if ("tpn".equals(fieldname)) { terminationPoint.setTpn(jp.getText()); } else { throw new IllegalStateException("Unrecognized field '" + fieldname + "'!"); } bindMap.put(terminationPoint.getName(), terminationPoint); } jp.close(); }