Example usage for com.fasterxml.jackson.databind MappingIterator hasNext

List of usage examples for com.fasterxml.jackson.databind MappingIterator hasNext

Introduction

In this page you can find the example usage for com.fasterxml.jackson.databind MappingIterator hasNext.

Prototype

@Override
    public boolean hasNext() 

Source Link

Usage

From source file:edu.cmu.cs.lti.discoursedb.annotation.lightside.io.LightSideService.java

/**
 * Imports a file that was previously generated by exportDataForAnnotation() and then annotated by LightSide classifiers
 * //w  ww  .j ava2s . c om
 * @param inputFilePath path to the file that should be imported
 */
public void importAnnotatedData(String inputFilePath) {
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
    File csvFile = new File(inputFilePath);
    try {
        MappingIterator<String[]> it = mapper.readerFor(String[].class).readValues(csvFile);

        //process header
        String[] header = it.next();
        Map<String, Integer> headerId = new HashMap<>();
        for (int i = 0; i < header.length; i++) {
            headerId.put(header[i], i);
        }

        //process data
        while (it.hasNext()) {
            String[] row = it.next();
            Contribution curContrib = null;
            List<AnnotationInstance> curAnnos = new ArrayList<>();
            for (int i = 0; i < row.length; i++) {
                String field = row[i];
                if (i == headerId.get(TEXT_COL)) {
                    //we don't need the text column
                } else if (i == headerId.get(ID_COL)) {
                    curContrib = contribService.findOne(Long.parseLong(field)).orElseThrow(
                            () -> new EntityNotFoundException("Cannot find annotated entity in database."));
                } else {
                    //we don't need to create an annotation if it's a binary label set to false
                    if (!field.equalsIgnoreCase(LABEL_MISSING_VAL)) {
                        String label = header[i].split(LIGHTSIDE_PREDICTION_COL_SUFFIX)[0]; //remove suffix from label if it exists                    
                        AnnotationInstance newAnno = annoService.createTypedAnnotation(label);
                        annoService.saveAnnotationInstance(newAnno);
                        curAnnos.add(newAnno);
                        //if we have any other value than true or false, store this value as a feature
                        if (!field.equalsIgnoreCase(LABEL_ASSIGNED_VAL)) {
                            Feature newFeat = annoService.createFeature(field);
                            annoService.saveFeature(newFeat);
                            annoService.addFeature(newAnno, newFeat);
                        }
                    }
                }
            }
            //wipe old annotations  
            //TODO we might not want to delete ALL annotations
            delete(annoService.findAnnotations(curContrib));

            //add new annotations to the contribution it belongs to 
            for (AnnotationInstance newAnno : curAnnos) {
                annoService.addAnnotation(curContrib, newAnno);
            }
        }
    } catch (IOException e) {
        log.error("Error reading and parsing data from csv");
    }
}

From source file:org.hexlogic.model.DockerNode.java

@VsoMethod(showInApi = true, name = "pullImage", description = "Pull the image matching the given string from the docker hub repository, saving it on the docker host.")
public String pullImage(String imageName) throws Exception {
    log.debug("Pulling image '" + imageName + "'...");

    @SuppressWarnings("rawtypes")
    MappingIterator<Map> it = null;
    try {//from  w  w w . java2s . com
        configureNode();
        DockerClient dockerClient = DockerClientBuilder.getInstance(config).build();
        log.debug("Starting pull operation...");

        /*
         * We will check the final result by comparing the initial image id, which is the first ID provided by the stream such as:
         * 
         * {status=Pulling image (latest) from dockerfile/nodejs, progressDetail={}, id=406eb4a4dcad}
         * 
         * to the image id of the last entity which owns id AND status which will look something like:
         * {status=Download complete, progressDetail={}, id=406eb4a4dcad}
         * 
         * If both IDs match, we know that the latest layer is the same as the requested image layer.
         * So the next step is to compare the download status of that layer
         */
        String firstId = null;
        String lastId = "";
        String lastStatus = "undefined";

        /*
         * In addition to the download status of the layer, we provide additional information about how the process went by
         * returning information to the user using the last entity which has no id and only a status, which looks like this:
         * {status=Status: Image is up to date for dockerfile/nodejs}
         * or
         * {status=Status: Downloaded newer image for dockerfile/nodejs}
         * or
         * {status=Repository dockerfile/nodejs already being pulled by another client. Waiting.}
         */
        String finalStatus = "undefined";

        for (it = new ObjectMapper().readValues(
                new JsonFactory().createJsonParser(dockerClient.pullImageCmd(imageName).exec()), Map.class); it
                        .hasNext();) {
            Map<?, ?> element = it.next();
            String id = "";
            String status = "";
            String progress = "";

            // info OUTPUT
            // log.debug("info: " + element);

            try {
                id = element.get("id").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }
            try {
                status = element.get("status").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }
            try {
                progress = element.get("progress").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }

            // if the key was found and we got some status
            if (!id.isEmpty() && !status.isEmpty()) {
                // remember the first id of the output stream, which is the id of the image we want to pull
                if (firstId == null) {
                    log.debug("Remembering first id: " + id);
                    firstId = id;
                }

                // if the same layer is returned multiple times in a row, don't log everything but just the progress
                if (id.equals(lastId)) {
                    lastId = id;
                    lastStatus = status;
                    if (!progress.isEmpty()) {
                        log.debug("Progress: " + progress);
                    }
                } else {
                    lastId = id;
                    log.debug("Image '" + id + "' status is: " + status + ".");
                    if (!progress.isEmpty()) {
                        log.debug("Progress: " + progress);
                    }
                }
            }

            if (!status.isEmpty()) {
                finalStatus = status;
            }
        }

        // TODO find a more robust way to handle downloadStatus and finalStatus
        String downloadStatus = "undefined";
        if (lastId.equals(firstId)) {
            log.debug("Last download layer id does match the requested image id: " + firstId);
            if (StringUtils.containsIgnoreCase(lastStatus, "Download complete")) {
                downloadStatus = "successed";
                log.debug("The requested layer was downloaded successfuly.");
            } else {
                downloadStatus = "failed";
                log.error("The requested layer failed to download.");
                // throw exception in order for the workflow to fail
                throw new IllegalStateException("The requested layer failed to download.");
            }
        }

        // reload images from docker node
        this.reloadImages();
        // update inventory - another way to do this would be to update our ArrayList and call notifyElementDeleted on the image object
        notificationHandler.notifyElementInvalidate(toRef());

        log.debug("Pull operation " + downloadStatus + ". " + finalStatus + ".");
        return "Pull operation " + downloadStatus + ". " + finalStatus + ".";

    } catch (InternalServerErrorException e) {
        // image dosn't exist
        log.error("Error: the image was not found.");
        // Throw error detail message so vCO can display it
        throw new Exception("Error: the image was not found.");
    } catch (Exception e) {
        final StringWriter sw = new StringWriter();
        final PrintWriter pw = new PrintWriter(sw, true);
        e.printStackTrace(pw);

        log.error("Error while pulling image: " + sw.getBuffer().toString());
        // Throw error detail message so vCO can display it
        throw new Exception("Error while pulling image: " + sw.getBuffer().toString());
    } finally {
        if (it != null) {
            log.debug("Closeing pullImage stream...");
            it.close();
            log.debug("Closed pullImage stream.");
        }
    }

}

From source file:com.datafibers.kafka.connect.SchemaedFileSourceTask.java

private List<SourceRecord> pollFromFile() throws InterruptedException {
    log.trace("pollFromFile");
    CsvSchema bootstrapCsv;//from  ww w  . j av a2s.c  om
    CsvMapper csvMapper = new CsvMapper();
    ObjectMapper jsonMapper = new ObjectMapper();
    MappingIterator<Map<?, ?>> mappingIterator;
    ArrayList<SourceRecord> records = null;
    long currentTime = System.currentTimeMillis();
    long recordsPerPoll;

    // TODO: Improve ExceptionOnEof logic.
    // The code below only works when each pass through
    // poll() reads all available records (not a given).
    if (config.getExceptionOnEof() && streamOffset != null) {
        throw new ConnectException("No more deta available on FileInputStream");
    }

    // Initialize the bootstrapCsv schema if necessary
    if (recordSchema == null || inputType.equalsIgnoreCase("json")) {
        log.trace("Constructing csvSchema from emptySchema");
        bootstrapCsv = config.getCsvHeaders() ? CsvSchema.emptySchema().withHeader()
                : CsvSchema.emptySchema().withoutHeader();
    } else {
        // We've seen a schema, so we'll assume headers from the recordSchema
        log.trace("Constructing csvSchema from recordSchema");
        CsvSchema.Builder builder = new CsvSchema.Builder();
        builder.setUseHeader(false);
        builder.setColumnSeparator(',');
        for (Field f : recordSchema.fields()) {
            log.trace("adding column {}", f.name());
            builder.addColumn(f.name());
        }
        bootstrapCsv = builder.build();
    }
    try {
        if (stream == null)
            openFileStream();
        if (reader == null)
            reader = new BufferedReader(new InputStreamReader(stream));

        if (inputType.equalsIgnoreCase("json")) {
            mappingIterator = jsonMapper.readerFor(Map.class).readValues(reader);
        } else if (inputType.equalsIgnoreCase("csv")) {
            mappingIterator = csvMapper.readerWithSchemaFor(Map.class).with(bootstrapCsv).readValues(reader);
        } else {
            log.error("Unsupported file input type specified ({})", inputType);
            return null;
        }
    } catch (FileNotFoundException fnf) {
        log.warn("Couldn't find file {} for SchemaedFileSourceTask, sleeping to wait for it to be created",
                logFilename());
        synchronized (this) {
            this.wait(1000);
        }
        return null;
    } catch (IOException e) {
        // IOException thrown when no more records in stream
        log.warn("Processed all available data from {}; sleeping to wait additional records", logFilename());
        // Close reader and stream; swallowing exceptions ... we're about to throw a Retry
        try {
            reader.close();
        } catch (Exception nested) {
        } finally {
            reader = null;
        }

        if (stream != System.in) {
            try {
                stream.close();
            } catch (Exception nested) {
            } finally {
                stream = null;
            }
        }

        synchronized (this) {
            this.wait(1000);
        }
        return null;
    }
    log.debug("mappingIterator of type {} created; begin reading data file",
            mappingIterator.getClass().toString());

    // The csvMapper class is really screwy; can't figure out why it
    // won't return a rational Schema ... so we'll extract it from the
    // the first object later.
    if (recordSchema == null && inputType.equalsIgnoreCase("csv") && csvMapper.schema().size() > 0) {
        recordSchema = ConvertMappingSchema(csvMapper.schemaWithHeader());
        log.trace("recordSchema created from csvMapper; type {}", recordSchema.type().toString());
    }
    try {
        FileInputStream fstream = (FileInputStream) stream;
        Long lastElementOffset = streamOffset;
        recordsPerPoll = 3;

        while (mappingIterator.hasNext()) {
            Map<?, ?> element = mappingIterator.next();
            Long elementOffset, iteratorOffset;
            recordCount++;
            recordsPerPoll--;

            iteratorOffset = mappingIterator.getCurrentLocation().getByteOffset(); // never works !!!
            if (iteratorOffset < 0) {
                // The stream channel will CLOSE on the last clean record
                // seen by mapping Iterator, so we have be careful here
                // Additionally, when parsing CSV files, there seems to be a
                // lot of Bad File Descriptor errors; ignore them.
                try {
                    elementOffset = fstream.getChannel().position();
                } catch (java.nio.channels.ClosedChannelException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                } catch (IOException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                }
            } else {
                log.trace("mappingIterator.getCurrentLocation() returns {}", iteratorOffset.toString());
                elementOffset = iteratorOffset;
            }
            log.trace("Next input record: {} (class {}) from file position {}", element.toString(),
                    element.getClass().toString(), elementOffset.toString());

            if (recordSchema == null) {
                recordSchema = ConvertMappingSchema(element.keySet());
                log.trace("recordSchema created from element; type {}", recordSchema.type().toString());
            }

            if (records == null)
                records = new ArrayList<>();
            records.add(new SourceRecord(offsetKey(filename), offsetValue(elementOffset), topic, recordSchema,
                    ConvertMappingElement(recordSchema, (HashMap<?, ?>) element)));
            streamOffset = lastElementOffset = elementOffset;
        }
    } catch (Exception e) {
        throw new ConnectException(e);
    }

    lastPollTime = currentTime;
    return records;
}

From source file:org.icgc.dcc.release.test.util.TestFiles.java

@SneakyThrows
public static List<ObjectNode> readInputFile(File source) {
    if (source.isDirectory()) {
        return readInputDirectory(source);
    }//w  ww .  j a  v  a 2s .  c  o  m

    val reader = MAPPER.reader(ObjectNode.class);

    @Cleanup
    MappingIterator<ObjectNode> iterator = reader.readValues(source);

    val rows = Lists.<ObjectNode>newArrayList();
    while (iterator.hasNext()) {
        val row = iterator.next();
        rows.add(row);
    }

    return rows;
}