Example usage for com.fasterxml.jackson.databind MappingIterator next

List of usage examples for com.fasterxml.jackson.databind MappingIterator next

Introduction

In this page you can find the example usage for com.fasterxml.jackson.databind MappingIterator next.

Prototype

@Override
    public T next() 

Source Link

Usage

From source file:edu.cmu.cs.lti.discoursedb.annotation.lightside.io.LightSideService.java

/**
 * Imports a file that was previously generated by exportDataForAnnotation() and then annotated by LightSide classifiers
 * /*from  w  w w.j  a  v  a 2  s  .co  m*/
 * @param inputFilePath path to the file that should be imported
 */
public void importAnnotatedData(String inputFilePath) {
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
    File csvFile = new File(inputFilePath);
    try {
        MappingIterator<String[]> it = mapper.readerFor(String[].class).readValues(csvFile);

        //process header
        String[] header = it.next();
        Map<String, Integer> headerId = new HashMap<>();
        for (int i = 0; i < header.length; i++) {
            headerId.put(header[i], i);
        }

        //process data
        while (it.hasNext()) {
            String[] row = it.next();
            Contribution curContrib = null;
            List<AnnotationInstance> curAnnos = new ArrayList<>();
            for (int i = 0; i < row.length; i++) {
                String field = row[i];
                if (i == headerId.get(TEXT_COL)) {
                    //we don't need the text column
                } else if (i == headerId.get(ID_COL)) {
                    curContrib = contribService.findOne(Long.parseLong(field)).orElseThrow(
                            () -> new EntityNotFoundException("Cannot find annotated entity in database."));
                } else {
                    //we don't need to create an annotation if it's a binary label set to false
                    if (!field.equalsIgnoreCase(LABEL_MISSING_VAL)) {
                        String label = header[i].split(LIGHTSIDE_PREDICTION_COL_SUFFIX)[0]; //remove suffix from label if it exists                    
                        AnnotationInstance newAnno = annoService.createTypedAnnotation(label);
                        annoService.saveAnnotationInstance(newAnno);
                        curAnnos.add(newAnno);
                        //if we have any other value than true or false, store this value as a feature
                        if (!field.equalsIgnoreCase(LABEL_ASSIGNED_VAL)) {
                            Feature newFeat = annoService.createFeature(field);
                            annoService.saveFeature(newFeat);
                            annoService.addFeature(newAnno, newFeat);
                        }
                    }
                }
            }
            //wipe old annotations  
            //TODO we might not want to delete ALL annotations
            delete(annoService.findAnnotations(curContrib));

            //add new annotations to the contribution it belongs to 
            for (AnnotationInstance newAnno : curAnnos) {
                annoService.addAnnotation(curContrib, newAnno);
            }
        }
    } catch (IOException e) {
        log.error("Error reading and parsing data from csv");
    }
}

From source file:net.flutterflies.fwapaderp.game.TeamManager.java

/**
 * Converts a .csv spreadsheet template into a UHCTeam object
 *
 * @param teamsList Used purely as a reference to an earlier object, overwriting it
 * @return A list of all UHCTeams//from w ww  . ja  va  2s . c o  m
 */
public ArrayList<UHCTeam> createTeamsFromCSV(ArrayList<UHCTeam> teamsList) {
    ArrayList<String[]> rows = new ArrayList<>();
    File teamsFile = new File(plugin.getDataFolder(), "teams.csv");
    CsvMapper mapper = new CsvMapper();

    //Clear any existing teams on the team list
    teamsList.clear();

    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);

    //Try to load values from teams.csv
    try {
        MappingIterator<String[]> iterator = mapper.readerFor(String[].class).readValues(teamsFile);
        while (iterator.hasNext()) {
            rows.add(rows.size(), iterator.next());
        }
    } catch (IOException e) {
        plugin.getLogger().log(Level.SEVERE, "Could not find the file teams.csv! Please either supply"
                + "a teams.csv file or disable usePreMadeTeams in the plugin's config file.");
        System.exit(0);
    }

    //For each row in the csv file create a new team
    for (int i = 1; i < rows.size(); i++) {
        String[] team = rows.get(i);
        List<String> teamPlayerList = new ArrayList<>();
        for (int j = 2; j < team.length; j++) {
            if (!team[j].equals("")) {
                teamPlayerList.add(teamPlayerList.size(), team[j]);
            }
        }
        teamsList.add(teamsList.size(),
                new UHCTeam(team[0], team[1].toUpperCase().replace(' ', '_'), teamPlayerList));
    }

    //Write Teams to a yaml file
    for (int i = 0; i < teamList.size(); i++) {
        //Get the team
        UHCTeam team = teamList.get(i);

        //Write the team name
        plugin.getTeamConfig().set("teams.team" + (i + 1) + ".name", team.getTeamName(false));
        //Write the team's color
        plugin.getTeamConfig().set("teams.team" + (i + 1) + ".color", team.getTeamColor());
        //Write all the players in the team
        for (int j = 0; j < team.getTeamSize(); j++) {
            plugin.getTeamConfig().set("teams.team" + (i + 1) + ".players.player" + (j + 1),
                    team.getPlayers().get(j));
        }
    }
    plugin.saveTeamsConfig();

    return teamsList;
}

From source file:com.marklogic.client.test.SPARQLManagerTest.java

private int countLines(MappingIterator<?> iter) {
    int numLines = 0;
    while (iter.hasNext()) {
        iter.next();
        numLines++;// w w  w.jav  a 2  s  .c  o  m
    }
    return numLines;
}

From source file:com.marklogic.client.test.SPARQLManagerTest.java

@Test
public void testInference() throws Exception {
    gmgr.write("/ontology", new StringHandle(ontology).withMimetype("application/n-triples"));
    SPARQLQueryDefinition qdef = smgr.newQueryDefinition("SELECT ?s { ?s a <http://example.org/C1>  }");
    qdef.setIncludeDefaultRulesets(false);
    StringHandle handle = new StringHandle().withMimetype(SPARQLMimeTypes.SPARQL_CSV);
    String results = smgr.executeSelect(qdef, handle).get();
    assertNull(results);/*from w  w  w .ja v a2s . c  om*/

    qdef.setRulesets(SPARQLRuleset.RANGE);
    results = smgr.executeSelect(qdef, handle).get();
    assertEquals(1, countLines(parseCsv(results)));

    qdef.setRulesets(SPARQLRuleset.RANGE, SPARQLRuleset.DOMAIN);
    results = smgr.executeSelect(qdef, handle).get();
    MappingIterator<Map<String, String>> csvRows = parseCsv(results);
    assertTrue(csvRows.hasNext());
    Map<String, String> row = csvRows.next();
    assertEquals("http://example.org/o1", row.get("s"));
    assertTrue(csvRows.hasNext());
    row = csvRows.next();
    assertEquals("http://example.org/s2", row.get("s"));
    assertFalse(csvRows.hasNext());

    gmgr.delete("/ontology");
}

From source file:nl.esciencecenter.ptk.csv.CSVData.java

public void parseText(String csvText) throws IOException {
    // Extended CSV !
    // Pass I: remove comments including the ending newline!
    Pattern pat = Pattern.compile("^#.*\n", Pattern.MULTILINE);
    csvText = pat.matcher(csvText).replaceAll("");

    // todo: check how jackson can parse alternative field separators;
    if (fieldSeparators != null) {
        // csvText=csvText.replaceAll(",","_");

        for (String sep : fieldSeparators) {
            // lazy replace
            csvText = csvText.replaceAll(sep, ",");
        }//from   www .j a v a  2  s.  c o  m
    }

    // Not needed: Pass II: remove empty lines as a result of the
    // pat=Pattern.compile("\n\n",Pattern.MULTILINE);
    // newTxt=pat.matcher(newTxt).replaceAll("");

    // ObjectMapper mapper=new ObjectMapper();
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);

    MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(csvText);

    if (it.hasNext() == false) {
        throw new IOException("Empty text or csv text contains no headers!");
    }

    // read header:
    Object headers[] = it.next();

    StringList list = new StringList();
    for (int i = 0; i < headers.length; i++) {
        list.add(headers[i].toString());
    }

    logger.debugPrintf("Headers=%s\n", list.toString("<>"));
    headerList = list;

    data = new ArrayList<String[]>();

    // check header values.
    while (it.hasNext()) {
        Object line[] = it.next();
        String row[] = new String[line.length];

        for (int j = 0; j < line.length; j++) {
            Object value = line[j];
            if (value != null) {
                row[j] = value.toString();
            }
        }
        data.add(row);
    }

    logger.debugPrintf("Read %d number of rows\n", data.size());
}

From source file:de.undercouch.bson4jackson.BsonParserTest.java

/**
 * Tests if a simple BSON file can be read successfully
 * @throws Exception if something went wrong
 *///from ww  w  .  ja v a  2 s.  c  o  m
@Test
public void readBSONFile() throws Exception {
    InputStream is = getClass().getResourceAsStream("test.bson");
    try {
        ObjectMapper mapper = new ObjectMapper(new BsonFactory());
        MappingIterator<BSONObject> iterator = mapper.reader(BasicBSONObject.class).readValues(is);

        BSONObject o = null;
        while (iterator.hasNext()) {
            assertNull(o);
            BSONObject object = iterator.next();
            assertNotNull(object);
            o = object;
        }

        assertEquals("Hello world", o.get("message"));
        assertEquals(10.0, o.get("size"));
        assertTrue(o.keySet().contains("_id"));
        assertEquals(3, o.keySet().size());
    } finally {
        is.close();
    }
}

From source file:org.hexlogic.model.DockerNode.java

@VsoMethod(showInApi = true, name = "pullImage", description = "Pull the image matching the given string from the docker hub repository, saving it on the docker host.")
public String pullImage(String imageName) throws Exception {
    log.debug("Pulling image '" + imageName + "'...");

    @SuppressWarnings("rawtypes")
    MappingIterator<Map> it = null;
    try {/* w  ww  .  j a v a 2s  .c o m*/
        configureNode();
        DockerClient dockerClient = DockerClientBuilder.getInstance(config).build();
        log.debug("Starting pull operation...");

        /*
         * We will check the final result by comparing the initial image id, which is the first ID provided by the stream such as:
         * 
         * {status=Pulling image (latest) from dockerfile/nodejs, progressDetail={}, id=406eb4a4dcad}
         * 
         * to the image id of the last entity which owns id AND status which will look something like:
         * {status=Download complete, progressDetail={}, id=406eb4a4dcad}
         * 
         * If both IDs match, we know that the latest layer is the same as the requested image layer.
         * So the next step is to compare the download status of that layer
         */
        String firstId = null;
        String lastId = "";
        String lastStatus = "undefined";

        /*
         * In addition to the download status of the layer, we provide additional information about how the process went by
         * returning information to the user using the last entity which has no id and only a status, which looks like this:
         * {status=Status: Image is up to date for dockerfile/nodejs}
         * or
         * {status=Status: Downloaded newer image for dockerfile/nodejs}
         * or
         * {status=Repository dockerfile/nodejs already being pulled by another client. Waiting.}
         */
        String finalStatus = "undefined";

        for (it = new ObjectMapper().readValues(
                new JsonFactory().createJsonParser(dockerClient.pullImageCmd(imageName).exec()), Map.class); it
                        .hasNext();) {
            Map<?, ?> element = it.next();
            String id = "";
            String status = "";
            String progress = "";

            // info OUTPUT
            // log.debug("info: " + element);

            try {
                id = element.get("id").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }
            try {
                status = element.get("status").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }
            try {
                progress = element.get("progress").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }

            // if the key was found and we got some status
            if (!id.isEmpty() && !status.isEmpty()) {
                // remember the first id of the output stream, which is the id of the image we want to pull
                if (firstId == null) {
                    log.debug("Remembering first id: " + id);
                    firstId = id;
                }

                // if the same layer is returned multiple times in a row, don't log everything but just the progress
                if (id.equals(lastId)) {
                    lastId = id;
                    lastStatus = status;
                    if (!progress.isEmpty()) {
                        log.debug("Progress: " + progress);
                    }
                } else {
                    lastId = id;
                    log.debug("Image '" + id + "' status is: " + status + ".");
                    if (!progress.isEmpty()) {
                        log.debug("Progress: " + progress);
                    }
                }
            }

            if (!status.isEmpty()) {
                finalStatus = status;
            }
        }

        // TODO find a more robust way to handle downloadStatus and finalStatus
        String downloadStatus = "undefined";
        if (lastId.equals(firstId)) {
            log.debug("Last download layer id does match the requested image id: " + firstId);
            if (StringUtils.containsIgnoreCase(lastStatus, "Download complete")) {
                downloadStatus = "successed";
                log.debug("The requested layer was downloaded successfuly.");
            } else {
                downloadStatus = "failed";
                log.error("The requested layer failed to download.");
                // throw exception in order for the workflow to fail
                throw new IllegalStateException("The requested layer failed to download.");
            }
        }

        // reload images from docker node
        this.reloadImages();
        // update inventory - another way to do this would be to update our ArrayList and call notifyElementDeleted on the image object
        notificationHandler.notifyElementInvalidate(toRef());

        log.debug("Pull operation " + downloadStatus + ". " + finalStatus + ".");
        return "Pull operation " + downloadStatus + ". " + finalStatus + ".";

    } catch (InternalServerErrorException e) {
        // image dosn't exist
        log.error("Error: the image was not found.");
        // Throw error detail message so vCO can display it
        throw new Exception("Error: the image was not found.");
    } catch (Exception e) {
        final StringWriter sw = new StringWriter();
        final PrintWriter pw = new PrintWriter(sw, true);
        e.printStackTrace(pw);

        log.error("Error while pulling image: " + sw.getBuffer().toString());
        // Throw error detail message so vCO can display it
        throw new Exception("Error while pulling image: " + sw.getBuffer().toString());
    } finally {
        if (it != null) {
            log.debug("Closeing pullImage stream...");
            it.close();
            log.debug("Closed pullImage stream.");
        }
    }

}

From source file:com.datafibers.kafka.connect.SchemaedFileSourceTask.java

private List<SourceRecord> pollFromFile() throws InterruptedException {
    log.trace("pollFromFile");
    CsvSchema bootstrapCsv;//from w w  w  .  j a  v  a 2 s  .c  o m
    CsvMapper csvMapper = new CsvMapper();
    ObjectMapper jsonMapper = new ObjectMapper();
    MappingIterator<Map<?, ?>> mappingIterator;
    ArrayList<SourceRecord> records = null;
    long currentTime = System.currentTimeMillis();
    long recordsPerPoll;

    // TODO: Improve ExceptionOnEof logic.
    // The code below only works when each pass through
    // poll() reads all available records (not a given).
    if (config.getExceptionOnEof() && streamOffset != null) {
        throw new ConnectException("No more deta available on FileInputStream");
    }

    // Initialize the bootstrapCsv schema if necessary
    if (recordSchema == null || inputType.equalsIgnoreCase("json")) {
        log.trace("Constructing csvSchema from emptySchema");
        bootstrapCsv = config.getCsvHeaders() ? CsvSchema.emptySchema().withHeader()
                : CsvSchema.emptySchema().withoutHeader();
    } else {
        // We've seen a schema, so we'll assume headers from the recordSchema
        log.trace("Constructing csvSchema from recordSchema");
        CsvSchema.Builder builder = new CsvSchema.Builder();
        builder.setUseHeader(false);
        builder.setColumnSeparator(',');
        for (Field f : recordSchema.fields()) {
            log.trace("adding column {}", f.name());
            builder.addColumn(f.name());
        }
        bootstrapCsv = builder.build();
    }
    try {
        if (stream == null)
            openFileStream();
        if (reader == null)
            reader = new BufferedReader(new InputStreamReader(stream));

        if (inputType.equalsIgnoreCase("json")) {
            mappingIterator = jsonMapper.readerFor(Map.class).readValues(reader);
        } else if (inputType.equalsIgnoreCase("csv")) {
            mappingIterator = csvMapper.readerWithSchemaFor(Map.class).with(bootstrapCsv).readValues(reader);
        } else {
            log.error("Unsupported file input type specified ({})", inputType);
            return null;
        }
    } catch (FileNotFoundException fnf) {
        log.warn("Couldn't find file {} for SchemaedFileSourceTask, sleeping to wait for it to be created",
                logFilename());
        synchronized (this) {
            this.wait(1000);
        }
        return null;
    } catch (IOException e) {
        // IOException thrown when no more records in stream
        log.warn("Processed all available data from {}; sleeping to wait additional records", logFilename());
        // Close reader and stream; swallowing exceptions ... we're about to throw a Retry
        try {
            reader.close();
        } catch (Exception nested) {
        } finally {
            reader = null;
        }

        if (stream != System.in) {
            try {
                stream.close();
            } catch (Exception nested) {
            } finally {
                stream = null;
            }
        }

        synchronized (this) {
            this.wait(1000);
        }
        return null;
    }
    log.debug("mappingIterator of type {} created; begin reading data file",
            mappingIterator.getClass().toString());

    // The csvMapper class is really screwy; can't figure out why it
    // won't return a rational Schema ... so we'll extract it from the
    // the first object later.
    if (recordSchema == null && inputType.equalsIgnoreCase("csv") && csvMapper.schema().size() > 0) {
        recordSchema = ConvertMappingSchema(csvMapper.schemaWithHeader());
        log.trace("recordSchema created from csvMapper; type {}", recordSchema.type().toString());
    }
    try {
        FileInputStream fstream = (FileInputStream) stream;
        Long lastElementOffset = streamOffset;
        recordsPerPoll = 3;

        while (mappingIterator.hasNext()) {
            Map<?, ?> element = mappingIterator.next();
            Long elementOffset, iteratorOffset;
            recordCount++;
            recordsPerPoll--;

            iteratorOffset = mappingIterator.getCurrentLocation().getByteOffset(); // never works !!!
            if (iteratorOffset < 0) {
                // The stream channel will CLOSE on the last clean record
                // seen by mapping Iterator, so we have be careful here
                // Additionally, when parsing CSV files, there seems to be a
                // lot of Bad File Descriptor errors; ignore them.
                try {
                    elementOffset = fstream.getChannel().position();
                } catch (java.nio.channels.ClosedChannelException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                } catch (IOException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                }
            } else {
                log.trace("mappingIterator.getCurrentLocation() returns {}", iteratorOffset.toString());
                elementOffset = iteratorOffset;
            }
            log.trace("Next input record: {} (class {}) from file position {}", element.toString(),
                    element.getClass().toString(), elementOffset.toString());

            if (recordSchema == null) {
                recordSchema = ConvertMappingSchema(element.keySet());
                log.trace("recordSchema created from element; type {}", recordSchema.type().toString());
            }

            if (records == null)
                records = new ArrayList<>();
            records.add(new SourceRecord(offsetKey(filename), offsetValue(elementOffset), topic, recordSchema,
                    ConvertMappingElement(recordSchema, (HashMap<?, ?>) element)));
            streamOffset = lastElementOffset = elementOffset;
        }
    } catch (Exception e) {
        throw new ConnectException(e);
    }

    lastPollTime = currentTime;
    return records;
}

From source file:org.icgc.dcc.release.test.util.TestFiles.java

@SneakyThrows
public static List<ObjectNode> readInputFile(File source) {
    if (source.isDirectory()) {
        return readInputDirectory(source);
    }/*from  www. j av a2  s  .  c om*/

    val reader = MAPPER.reader(ObjectNode.class);

    @Cleanup
    MappingIterator<ObjectNode> iterator = reader.readValues(source);

    val rows = Lists.<ObjectNode>newArrayList();
    while (iterator.hasNext()) {
        val row = iterator.next();
        rows.add(row);
    }

    return rows;
}