Example usage for org.apache.commons.io LineIterator nextLine

List of usage examples for org.apache.commons.io LineIterator nextLine

Introduction

In this page you can find the example usage for org.apache.commons.io LineIterator nextLine.

Prototype

public String nextLine() 

Source Link

Document

Returns the next line in the wrapped Reader.

Usage

From source file:net.sf.logsaw.dialect.pattern.APatternDialect.java

@Override
public void parse(ILogResource log, InputStream input, ILogEntryCollector collector) throws CoreException {
    Assert.isNotNull(log, "log"); //$NON-NLS-1$
    Assert.isNotNull(input, "input"); //$NON-NLS-1$
    Assert.isNotNull(collector, "collector"); //$NON-NLS-1$
    Assert.isTrue(isConfigured(), "Dialect should be configured by now"); //$NON-NLS-1$
    try {//from  w w  w. ja v a2s . c  o  m
        LogEntry currentEntry = null;
        IHasEncoding enc = (IHasEncoding) log.getAdapter(IHasEncoding.class);
        IHasLocale loc = (IHasLocale) log.getAdapter(IHasLocale.class);
        if (loc != null) {
            // Apply the locale
            getPatternTranslator().applyLocale(loc.getLocale(), rules);
        }
        IHasTimeZone tz = (IHasTimeZone) log.getAdapter(IHasTimeZone.class);
        if (tz != null) {
            // Apply the timezone
            getPatternTranslator().applyTimeZone(tz.getTimeZone(), rules);
        }
        LineIterator iter = IOUtils.lineIterator(input, enc.getEncoding());
        int minLinesPerEntry = getPatternTranslator().getMinLinesPerEntry();
        int lineNo = 0;
        int moreLinesToCome = 0;
        try {
            String line = null;
            while (iter.hasNext()) {
                lineNo++;

                if (minLinesPerEntry == 1) {
                    // Simple case
                    line = iter.nextLine();
                } else {
                    String s = iter.nextLine();
                    if (moreLinesToCome == 0) {
                        Matcher m = getInternalPatternFirstLine().matcher(s);
                        if (m.find()) {
                            // First line
                            line = s;
                            moreLinesToCome = minLinesPerEntry - 1;
                            continue;
                        } else {
                            // Some crazy stuff
                            line = s;
                        }
                    } else if (iter.hasNext() && (moreLinesToCome > 1)) {
                        // Some middle line
                        line += IOUtils.LINE_SEPARATOR + s;
                        moreLinesToCome--;
                        continue;
                    } else {
                        // Last line
                        line += IOUtils.LINE_SEPARATOR + s;
                        if (!iter.hasNext()) {
                            line += IOUtils.LINE_SEPARATOR;
                        }
                        moreLinesToCome = 0;
                    }
                }

                // Error handling
                List<IStatus> statuses = null;
                boolean fatal = false; // determines whether to interrupt parsing

                Matcher m = getInternalPatternFull().matcher(line);
                if (m.find()) {
                    // The next line matches, so flush the previous entry and continue
                    if (currentEntry != null) {
                        collector.collect(currentEntry);
                        currentEntry = null;
                    }
                    currentEntry = new LogEntry();
                    for (int i = 0; i < m.groupCount(); i++) {
                        try {
                            getPatternTranslator().extractField(currentEntry, getRules().get(i),
                                    m.group(i + 1));
                        } catch (CoreException e) {
                            // Mark for interruption
                            fatal = fatal || e.getStatus().matches(IStatus.ERROR);

                            // Messages will be displayed later
                            if (statuses == null) {
                                statuses = new ArrayList<IStatus>();
                            }
                            if (e.getStatus().isMultiStatus()) {
                                Collections.addAll(statuses, e.getStatus().getChildren());
                            } else {
                                statuses.add(e.getStatus());
                            }
                        }
                    }

                    // We encountered errors or warnings
                    if (statuses != null && !statuses.isEmpty()) {
                        currentEntry = null; // Stop propagation
                        IStatus status = new MultiStatus(PatternDialectPlugin.PLUGIN_ID, 0,
                                statuses.toArray(new IStatus[statuses.size()]),
                                NLS.bind(Messages.APatternDialect_error_failedToParseLine, lineNo), null);
                        if (fatal) {
                            // Interrupt parsing in case of error
                            throw new CoreException(status);
                        } else {
                            collector.addMessage(status);
                        }
                    }
                } else if (currentEntry != null) {
                    // Append to message
                    String msg = currentEntry.get(getFieldProvider().getMessageField());
                    currentEntry.put(getFieldProvider().getMessageField(), msg + IOUtils.LINE_SEPARATOR + line);
                }

                if (collector.isCanceled()) {
                    // Cancel parsing
                    break;
                }
            }

            if (currentEntry != null) {
                // Collect left over entry
                collector.collect(currentEntry);
            }
        } finally {
            LineIterator.closeQuietly(iter);
        }
    } catch (Exception e) {
        throw new CoreException(new Status(IStatus.ERROR, PatternDialectPlugin.PLUGIN_ID,
                NLS.bind(Messages.APatternDialect_error_failedToParseFile,
                        new Object[] { log.getName(), e.getLocalizedMessage() }),
                e));
    }
}

From source file:com.daphne.es.showcase.excel.service.ExcelDataService.java

/**
 * csv?/*from  w w  w .j a v  a 2s  .  co m*/
 * @param user
 * @param is
 */
@Async
public void importCvs(final User user, final InputStream is) {

    ExcelDataService proxy = ((ExcelDataService) AopContext.currentProxy());
    BufferedInputStream bis = null;
    try {
        long beginTime = System.currentTimeMillis();

        bis = new BufferedInputStream(is);
        String encoding = FileCharset.getCharset(bis);

        LineIterator iterator = IOUtils.lineIterator(bis, encoding);

        String separator = ",";
        int totalSize = 0; //?

        final List<ExcelData> dataList = Lists.newArrayList();

        if (iterator.hasNext()) {
            iterator.nextLine();//
        }

        while (iterator.hasNext()) {

            totalSize++;

            String line = iterator.nextLine();
            String[] dataArray = StringUtils.split(line, separator);

            ExcelData data = new ExcelData();
            data.setId(Long.valueOf(dataArray[0]));
            data.setContent(dataArray[1]);
            dataList.add(data);

            if (totalSize % batchSize == 0) {
                try {
                    proxy.doBatchSave(dataList);
                } catch (Exception e) {
                    Long fromId = dataList.get(0).getId();
                    Long endId = dataList.get(dataList.size() - 1).getId();
                    log.error("from " + fromId + " to " + endId + ", error", e);
                }
                dataList.clear();
            }
        }

        if (dataList.size() > 0) {
            proxy.doBatchSave(dataList);
        }

        long endTime = System.currentTimeMillis();

        Map<String, Object> context = Maps.newHashMap();
        context.put("seconds", (endTime - beginTime) / 1000);
        notificationApi.notify(user.getId(), "excelImportSuccess", context);
    } catch (Exception e) {
        log.error("excel import error", e);
        Map<String, Object> context = Maps.newHashMap();
        context.put("error", e.getMessage());
        notificationApi.notify(user.getId(), "excelImportError", context);
    } finally {
        IOUtils.closeQuietly(bis);
    }
}

From source file:at.ac.ait.ubicity.fileloader.FileLoader.java

/**
 * //from  w  w w.  ja  v  a2s .c  o  m
 * @param _fileInfo A FileInformation object representing usage information on the file we are supposed to load: line count already ingested, last usage time...
 * @param _keySpace Cassandra key space into which to ingest
 * @param _host Cassandra host / server
 * @param _batchSize MutationBatch size
 * @throws Exception Shouldn't happen, although the Disruptor may throw an Exception under duress
 */
@SuppressWarnings("unchecked")
public final static void load(final FileInformation _fileInfo, final String _keySpace, final String _host,
        final int _batchSize) throws Exception {

    if (!cassandraInitialized) {
        keySpace = AstyanaxInitializer.doInit("Test Cluster", _host, _keySpace);
        cassandraInitialized = true;
    }

    LongTimeStampSorter tsSorter = new LongTimeStampSorter();
    Thread tTSSorter = new Thread(tsSorter);
    tTSSorter.setPriority(Thread.MAX_PRIORITY - 1);
    tTSSorter.setName("long timestamp sorter ");
    tTSSorter.start();
    //get the log id from the file's URI
    final String log_id = _fileInfo.getURI().toString();

    final MutationBatch batch = keySpace.prepareMutationBatch();

    logger.info("got keyspace " + keySpace.getKeyspaceName() + " from Astyanax initializer");

    final LineIterator onLines = FileUtils.lineIterator(new File(_fileInfo.getURI()));

    final ExecutorService exec = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors() * 2);

    ColumnFamily crawl_stats = null;

    AggregationJob aggregationJob = new AggregationJob(keySpace, crawl_stats);
    Thread tAggJob = new Thread(aggregationJob);
    tAggJob.setName("Monitrix loader / aggregation job ");
    tAggJob.setPriority(Thread.MIN_PRIORITY + 1);
    tAggJob.start();
    logger.info("[FILELOADER] started aggregation job, ring buffer running");

    final Disruptor<SingleLogLineAsString> disruptor = new Disruptor(SingleLogLineAsString.EVENT_FACTORY,
            (int) Math.pow(TWO, 17), exec);
    SingleLogLineAsStringEventHandler.batch = batch;
    SingleLogLineAsStringEventHandler.keySpace = keySpace;
    SingleLogLineAsStringEventHandler.batchSize = _batchSize;
    SingleLogLineAsStringEventHandler.LOG_ID = log_id;
    SingleLogLineAsStringEventHandler.tsSorter = tsSorter;
    SingleLogLineAsStringEventHandler.aggregationJob = aggregationJob;

    //The EventHandler contains the actual logic for ingesting
    final EventHandler<SingleLogLineAsString> handler = new SingleLogLineAsStringEventHandler();

    disruptor.handleEventsWith(handler);

    //get our Aggregate job in place

    //we are almost ready to start
    final RingBuffer<SingleLogLineAsString> rb = disruptor.start();

    int _lineCount = 0;
    long _start, _lapse;
    _start = System.nanoTime();

    int _linesAlreadyProcessed = _fileInfo.getLineCount();

    //cycle through the lines already processed
    while (_lineCount < _linesAlreadyProcessed) {
        onLines.nextLine();
        _lineCount++;
    }

    //now get down to the work we actually must do, and fill the ring buffer
    logger.info("begin proccessing of file " + _fileInfo.getURI() + " @line #" + _lineCount);
    while (onLines.hasNext()) {

        final long _seq = rb.next();
        final SingleLogLineAsString event = rb.get(_seq);
        event.setValue(onLines.nextLine());
        rb.publish(_seq);
        _lineCount++;
    }
    _lapse = System.nanoTime() - _start;
    logger.info("ended proccessing of file " + _fileInfo.getURI() + " @line #" + _lineCount);

    //stop, waiting for last threads still busy to finish their work
    disruptor.shutdown();

    //update the file info, this will  land in the cache
    _fileInfo.setLineCount(_lineCount);
    _fileInfo.setLastAccess(System.currentTimeMillis());
    int _usageCount = _fileInfo.getUsageCount();
    _fileInfo.setUsageCount(_usageCount++);

    //make sure we release resources
    onLines.close();

    logger.info(
            "handled " + (_lineCount - _linesAlreadyProcessed) + " log lines in " + _lapse + " nanoseconds");

    //now go to aggregation step
    SortedSet<Long> timeStamps = new TreeSet(tsSorter.timeStamps);

    long _minTs = timeStamps.first();
    long _maxTs = timeStamps.last();
    logger.info("**** min TimeStamp = " + _minTs);
    logger.info("**** max TimeStamp = " + _maxTs);

    StatsTableActualizer.update(_fileInfo.getURI().toString(), _minTs, _maxTs, _lineCount);

    //        AggregationJob aggJob = new AggregationJob( keySpace, _host, _batchSize );
    //        Thread tAgg = new Thread( aggJob );
    //        tAgg.setName( "aggregation job " );
    //        tAgg.setPriority( Thread.MAX_PRIORITY - 1 );
    //        tAgg.start();

}

From source file:edu.ku.brc.util.HelpIndexer.java

protected void processFile(final File file, final Vector<String> lines) {
    // System.out.println("processing file: " + file.getName());

    LineIterator it;
    try {/*  w w  w  .j av  a  2 s .c  o  m*/
        it = FileUtils.lineIterator(file, "UTF-8");
    } catch (IOException ex) {
        edu.ku.brc.af.core.UsageTracker.incrHandledUsageCount();
        edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(HelpIndexer.class, ex);
        System.out.println("error processing file: " + file.getName());
        return;
    }
    String target = getTarget(file);
    String title = getFileTitle(file);
    boolean removeTitleEntry = false;
    if (title != null) {
        String tline = "<indexitem text=\"" + title;
        if (target != null) {
            tline += "\"  target=\"" + target;
        }
        tline += "\">";
        lines.add(tline);
        removeTitleEntry = true;
    }
    if (target != null) {
        try {
            while (it.hasNext()) {
                String line = it.nextLine();
                //System.out.println(line);
                if (isIndexLine(line)) {
                    System.out.println("indexing " + file.getName() + ": " + line);
                    String indexEntry = processIndexLine(line, target);
                    if (indexEntry != null) {
                        lines.add("     " + indexEntry);
                        removeTitleEntry = false;
                    }
                }
            }
        } finally {
            LineIterator.closeQuietly(it);
        }
    }
    if (title != null && !removeTitleEntry) {
        lines.add("</indexitem>");
    }
    if (removeTitleEntry) {
        lines.remove(lines.size() - 1);
    }
}

From source file:itemsetmining.itemset.ItemsetTree.java

/**
 * Build the itemset-tree based on an input file containing transactions
 *
 * @param input//from w ww . j  av  a  2s  .c om
 *            an input file
 * @return
 */
public void buildTree(final File inputFile) throws IOException {
    // record start time
    startTimestamp = System.currentTimeMillis();

    // reset memory usage statistics
    MemoryLogger.getInstance().reset();

    // create an empty root for the tree
    root = new ItemsetTreeNode(null, 0);

    // Scan the database to read the transactions
    int count = 0;
    final LineIterator it = FileUtils.lineIterator(inputFile, "UTF-8");
    while (it.hasNext()) {

        final String line = it.nextLine();
        // if the line is a comment, is empty or is a
        // kind of metadata
        if (line.isEmpty() == true || line.charAt(0) == '#' || line.charAt(0) == '%' || line.charAt(0) == '@') {
            continue;
        }

        // add transaction to the tree
        addTransaction(line);
        count++;
    }
    // close the input file
    LineIterator.closeQuietly(it);

    // set the number of transactions
    noTransactions = count;

    // check the memory usage
    MemoryLogger.getInstance().checkMemory();
    endTimestamp = System.currentTimeMillis();
}

From source file:edu.cornell.med.icb.goby.modes.EmpiricalPMode.java

private void scan() throws FileNotFoundException {
    LineIterator iterator = new LineIterator(new FastBufferedReader(new FileReader(inputFilename)));
    int lineNumber = 0;
    ObjectArrayList<String> elementIds = new ObjectArrayList<String>();

    IntArrayList valuesA = new IntArrayList();

    IntArrayList valuesB = new IntArrayList();
    IntArrayList covariatesA = new IntArrayList();
    IntArrayList covariatesB = new IntArrayList();

    counter = new FormatFieldCounter(0, 2, 2, new String[] { "ALL" });
    setupOutput();/* w w w. j av a 2 s .co m*/
    // ignore the header line:
    iterator.next();
    ProgressLogger pg = new ProgressLogger(LOG);
    pg.displayFreeMemory = true;
    pg.itemsName = "pairs";
    pg.expectedUpdates = countLines(inputFilename) - 1;
    pg.start("Starting to scan pairs.");
    while (iterator.hasNext()) {
        String next = iterator.nextLine();
        String[] tokens = next.split("\t");
        boolean pastIds = false;
        boolean pastValues = false;

        String typeOfPairString = tokens[0];
        ObservationWriter.TypeOfPair typeOfPair = ObservationWriter.TypeOfPair.UNDEFINED;

        for (int i = 0; i < tokens.length; i++) {

            try {
                typeOfPair = ObservationWriter.TypeOfPair.valueOf(typeOfPairString);
            } catch (IllegalArgumentException e) {
                System.err.println(
                        "First token of every line should be WITHIN_GROUP_PAIR or BETWEEN_GROUP_PAIR. Found "
                                + typeOfPairString + " on line " + lineNumber);
                System.exit(1);
            }

            elementIds.clear();
            valuesA.clear();
            valuesB.clear();
            covariatesA.clear();
            covariatesB.clear();
            int j;
            String groupComparison = tokens[1];
            elementIds.add(groupComparison);
            for (j = 2; !"VALUES_A".equals(tokens[j]); j++) {
                if (j == tokens.length) {
                    break;
                }
                elementIds.add(tokens[j]);
            }
            if (j == tokens.length) {
                System.err.println(
                        "Every line must contain the VALUES keyword. Keyword not found on line " + lineNumber);
                System.exit(1);
            }
            j++;
            for (; !"VALUES_B".equals(tokens[j]); j++) {
                if (j == tokens.length) {
                    break;
                }
                valuesA.add(Integer.parseInt(tokens[j]));
            }
            j++;
            for (; !"COVARIATES_A".equals(tokens[j]); j++) {
                if (j == tokens.length) {
                    break;
                }
                valuesB.add(Integer.parseInt(tokens[j]));
            }
            if (j == tokens.length) {
                System.err
                        .println("Every line must contain the COVARIATES_A keyword. Keyword not found on line "
                                + lineNumber);
                System.exit(1);
            }
            j++;
            for (; !"COVARIATES_B".equals(tokens[j]); j++) {
                if (j == tokens.length) {
                    break;
                }
                covariatesA.add(Integer.parseInt(tokens[j]));
            }
            if (j == tokens.length) {
                System.err
                        .println("Every line must contain the COVARIATES_B keyword. Keyword not found on line "
                                + lineNumber);
                System.exit(1);
            }
            j++;
            for (; j < tokens.length; j++) {
                covariatesB.add(Integer.parseInt(tokens[j]));
            }

        }
        lineNumber++;
        final String groupComparison = elementIds.get(0);
        process(typeOfPair, groupComparison, elementIds, valuesA, valuesB, covariatesA, covariatesB);
        pg.lightUpdate();
    }
    pg.done(lineNumber);
}

From source file:au.org.ala.names.search.ALANameIndexer.java

public void init() throws Exception {

    tnse = new TaxonNameSoundEx();
    // init the known homonyms
    LineIterator lines = new LineIterator(
            new BufferedReader(new InputStreamReader(this.getClass().getClassLoader()
                    .getResource("au/org/ala/propertystore/known_homonyms.txt").openStream(), "ISO-8859-1")));
    LineIterator blines = new LineIterator(new BufferedReader(
            new InputStreamReader(this.getClass().getClassLoader().getResource("blacklist.txt").openStream())));
    try {/*from w  w w  .  j av  a  2s  .  c o m*/
        //load known homonyms
        while (lines.hasNext()) {
            String line = lines.nextLine().trim();
            knownHomonyms.add(line.toUpperCase());
        }
        //load the blacklist
        while (blines.hasNext()) {
            String line = blines.nextLine().trim();
            if (!line.startsWith("#") && StringUtils.isNotBlank(line))
                blacklist.add(line);
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        lines.close();
        blines.close();
    }
}

From source file:com.datamelt.nifi.processors.ExecuteRuleEngine.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // map used to store the attribute name and its value from the content of the flow file
    final Map<String, String> propertyMap = new HashMap<>();

    // get a logger instance
    final ComponentLog logger = getLogger();

    // a header from the content if present
    final AtomicReference<HeaderRow> header = new AtomicReference<>();

    AtomicBoolean error = new AtomicBoolean();

    // get the flow file
    FlowFile flowFile = session.get();/*from  w w w  . ja  v a  2s.  co  m*/
    if (flowFile == null) {
        return;
    }

    // list of rows from splitting the original flow file content
    ArrayList<RuleEngineRow> flowFileRows = new ArrayList<RuleEngineRow>();

    // list of rows containing the detailed results of the ruleengine
    ArrayList<RuleEngineRow> flowFileDetails = new ArrayList<RuleEngineRow>();

    boolean headerPresent = context.getProperty(ATTRIBUTE_HEADER_PRESENT).getValue().equals("true");

    // put the name of the ruleengine zip file in the list of properties
    propertyMap.put(PROPERTY_RULEENGINE_ZIPFILE_NAME,
            context.getProperty(ATTRIBUTE_RULEENGINE_ZIPFILE).getValue());

    final int batchSize = Integer.parseInt(context.getProperty(BATCH_SIZE_NAME).getValue());

    // read flow file into input stream
    session.read(flowFile, new InputStreamCallback() {
        public void process(InputStream in) throws IOException {
            try {
                // iterator over the lines from the input stream
                LineIterator iterator = IOUtils.lineIterator(in, "utf-8");

                // check if configuration indicates that a header row is present in the flow file content
                if (headerPresent) {
                    logger.debug("configuration indicates a header row is present in flow file content");

                    // if there is at least one row of data and the header is not defined yet
                    if (iterator.hasNext() && header.get() == null) {
                        // set the header from the content
                        header.set(new HeaderRow(iterator.nextLine(), separator));
                    }
                }
                // if no header row is present in the flow file content
                else {
                    logger.debug("configuration indicates no header row is present in flow file content");

                    // use the header from the field names
                    header.set(headerFromFieldNames);
                }

                // loop over all rows of data
                while (iterator.hasNext()) {
                    // we handle the error per row of data
                    error.set(false);

                    // get a row to process
                    String row = iterator.nextLine();

                    // check that we have data
                    if (row != null && !row.trim().equals("")) {
                        RowFieldCollection rowFieldCollection = null;
                        try {
                            rowFieldCollection = getRowFieldCollection(row, header.get());

                            logger.debug("RowFieldCollection header contains: "
                                    + rowFieldCollection.getHeader().getNumberOfFields() + " fields");
                            logger.debug("RowFieldCollection contains: "
                                    + rowFieldCollection.getNumberOfFields() + " fields");

                            // run the ruleengine with the given data from the flow file
                            logger.debug("running business ruleengine...");

                            // run the business logic/rules against the data
                            ruleEngine.run("flowfile", rowFieldCollection);

                            // add some debugging output that might be useful
                            logger.debug("number of rulegroups: " + ruleEngine.getNumberOfGroups());
                            logger.debug(
                                    "number of rulegroups passed: " + ruleEngine.getNumberOfGroupsPassed());
                            logger.debug(
                                    "number of rulegroups failed: " + ruleEngine.getNumberOfGroupsFailed());
                            logger.debug(
                                    "number of rulegroups skipped: " + ruleEngine.getNumberOfGroupsSkipped());
                            logger.debug("number of rules: " + ruleEngine.getNumberOfRules());
                            logger.debug("number of rules passed: " + ruleEngine.getNumberOfRulesPassed());
                            logger.debug("number of rules failed: " + ruleEngine.getNumberOfRulesFailed());
                            logger.debug("number of actions: " + ruleEngine.getNumberOfActions());

                            // add some properties of the ruleengine execution to the map
                            addRuleEngineProperties(propertyMap);
                        } catch (Exception ex) {
                            error.set(true);
                            logger.error(ex.getMessage(), ex);
                        }

                        // if no error occurred we create a save the data for the creation of the flow files
                        if (!error.get()) {
                            // process only if the collection of fields was changed by
                            // a ruleengine action. this means the data was updated so
                            // we will have to re-write/re-create the flow file content.
                            if (rowFieldCollection.isCollectionUpdated()) {
                                // put an indicator that the data was modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "true");

                                logger.debug(
                                        "data was modified - updating flow file content with ruleengine results");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows
                                        .add(new RuleEngineRow(getResultRow(rowFieldCollection), propertyMap));
                            } else {
                                // put an indicator that the data was NOT modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "false");

                                logger.debug("data was not modified - using original content");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows.add(new RuleEngineRow(row, propertyMap));
                            }

                            if (flowFileRows.size() >= batchSize) {
                                // generate flow files from the individual rows
                                List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session,
                                        flowFileRows, header.get(), headerPresent);
                                // transfer all individual rows to success relationship
                                if (splitFlowFiles.size() > 0) {
                                    session.transfer(splitFlowFiles, SUCCESS);
                                }
                            }

                            // if the user configured detailed results 
                            if (context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS).getValue()
                                    .equals("true")) {
                                // get the configured output type
                                String outputType = context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS_TYPE)
                                        .getValue();
                                logger.debug("configuration set to output detailed results with type ["
                                        + outputType + "]");

                                // we need to create a flow file only, if the ruleengine results are according to the output type settings
                                if (outputType.equals(OUTPUT_TYPE_ALL_GROUPS_ALL_RULES)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_PASSED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0
                                                || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_PASSED_RULES)
                                                        && ruleEngine.getNumberOfGroupsPassed() > 0))) {
                                    // create the content for the flow file
                                    String content = getFlowFileRuleEngineDetailsContent(header.get(),
                                            headerPresent, outputType, row);

                                    // add results to the list
                                    flowFileDetails.add(new RuleEngineRow(content, propertyMap));

                                    if (flowFileDetails.size() >= batchSize) {
                                        List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(
                                                context, session, flowFileDetails, header.get(), headerPresent);
                                        // transfer all individual rows to detailed relationship
                                        if (detailsFlowFiles.size() > 0) {
                                            session.transfer(detailsFlowFiles, DETAILED_RESULTS);
                                        }
                                    }
                                }
                            }
                            // clear the collections of ruleengine results
                            ruleEngine.getRuleExecutionCollection().clear();
                        }
                        // if we have an error we create a flow file from the current row of data and send it to the failure relationsship
                        else {
                            FlowFile failureFlowFile = generateFailureFlowFile(context, session, row,
                                    header.get(), headerPresent);
                            session.transfer(failureFlowFile, FAILURE);
                        }
                    }
                }

                LineIterator.closeQuietly(iterator);
            } catch (Exception ex) {
                ex.printStackTrace();
                logger.error("error running the business ruleengine", ex);
            }
        }
    });

    // generate flow files from the individual rows
    List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(),
            headerPresent);

    // generate flow files from the individual rows
    List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(context, session, flowFileDetails,
            header.get(), headerPresent);

    // transfer the original flow file
    session.transfer(flowFile, ORIGINAL);

    // transfer all individual rows to success relationship
    if (splitFlowFiles.size() > 0) {
        session.transfer(splitFlowFiles, SUCCESS);
    }

    // transfer all individual rows to success relationship
    if (detailsFlowFiles.size() > 0) {
        session.transfer(detailsFlowFiles, DETAILED_RESULTS);
    }
}

From source file:com.edgenius.wiki.service.impl.BackupServiceImpl.java

/**
 * @param binderFile/*from w  w  w .  ja va  2  s .  c om*/
 */
private int versionCheck(File binderFile) {
    //it does not worth to use any XML technique to get version number...
    String verStr = null;
    LineIterator iter = null;
    try {
        for (iter = FileUtils.lineIterator(binderFile); iter.hasNext();) {
            String line = iter.nextLine();
            if (verStr != null) {
                int eIdx = line.indexOf("</version>");
                if (eIdx != -1) {
                    verStr += line.substring(0, eIdx);
                    break;
                } else {
                    AuditLogger.error("Version in binder can not find close tag in next line, failed~");
                }
                //I don't bear version tag in more than 2 lines!
                break;
            }

            int sIdx = line.indexOf("<version>");
            if (sIdx != -1) {
                int eIdx = line.indexOf("</version>");
                if (eIdx != -1) {
                    verStr = line.substring(sIdx + "<version>".length(), eIdx);
                    break;
                } else {
                    verStr = line.substring(sIdx + "<version>".length());
                    AuditLogger.error("Version in binder even not insdie same line!?");
                }
            }

        }

    } catch (IOException e) {
        log.error("Unable to read binder file to get version ", e);
    } finally {
        if (iter != null)
            iter.close();
    }

    if (verStr == null || NumberUtils.toFloat(verStr.trim(), -1f) == -1f) {
        log.error("version parse failed.");
        return 0;
    }

    //upgrade binder file
    try {
        upgradeService.doBackupPackageUpgardeForBinder(verStr.trim(), binderFile);
    } catch (Exception e) {
        log.error("Unexpected erorr while upgrade backup binder file from " + verStr.trim() + " to "
                + Version.VERSION, e);
    }

    return (int) (NumberUtils.toFloat(verStr.trim(), 0) * 1000);
}

From source file:mitm.common.security.smime.SMIMEBuilderImplTest.java

@Test
public void testEncryptBase64EncodeBug() throws Exception {
    MimeMessage message = new MimeMessage(MailSession.getDefaultSession());

    message.setSubject("test");
    message.setContent("test", "text/plain");

    SMIMEBuilder builder = new SMIMEBuilderImpl(message, "to", "subject", "from");

    X509Certificate certificate = TestUtils
            .loadCertificate("test/resources/testdata/certificates/certificate-base64-encode-bug.cer");

    builder.addRecipient(certificate, SMIMERecipientMode.ISSUER_SERIAL);

    builder.encrypt(SMIMEEncryptionAlgorithm.DES_EDE3_CBC);

    MimeMessage newMessage = builder.buildMessage();

    newMessage.saveChanges();/* w w  w .ja  va 2s.  com*/

    File file = new File(tempDir, "testEncryptBase64EncodeBug.eml");

    FileOutputStream output = new FileOutputStream(file);

    MailUtils.writeMessage(newMessage, output);

    newMessage = MailUtils.loadMessage(file);

    ByteArrayOutputStream bos = new ByteArrayOutputStream();

    newMessage.writeTo(new SkipHeadersOutputStream(bos));

    String blob = new String(bos.toByteArray(), "us-ascii");

    // check if all lines are not longer than 76 characters
    LineIterator it = IOUtils.lineIterator(new StringReader(blob));

    while (it.hasNext()) {
        String next = it.nextLine();

        if (next.length() > 76) {
            fail("Line length exceeds 76: " + next);
        }
    }
}