Example usage for org.apache.commons.io LineIterator nextLine

List of usage examples for org.apache.commons.io LineIterator nextLine

Introduction

In this page you can find the example usage for org.apache.commons.io LineIterator nextLine.

Prototype

public String nextLine() 

Source Link

Document

Returns the next line in the wrapped Reader.

Usage

From source file:org.limy.eclipse.qalab.task.Java2HtmlTask.java

/**
 * javahtml?B/* w  w  w .j  av a 2s .co  m*/
 * @param cmd java->htmlR}h
 * @param lines Java
 * @param fileName t@C
 * @return HTML
 */
private String convertHtml(JavaToHtml cmd, String lines, String fileName) {
    LineIterator iterator = new LineIterator(new StringReader(lines));

    StringBuilder buff = new StringBuilder();
    appendLine(buff, "<head>");
    appendLine(buff, "<meta http-equiv=\"Content-Type\" content=\"text/html;" + " charset=UTF-8\"/>");
    buff.append("<title>").append(fileName).append("</title>");
    appendLine(buff, "<style type=\"text/css\">");
    appendLine(buff, "<!-- pre.src { margin-top: 0px; margin-bottom: 0px; } -->");
    appendLine(buff, "<!-- td.numLine { background: #f0f0f0; border-right: #dcdcdc 1px solid;"
            + " padding-right: 3px; text-align: right; } -->");
    appendLine(buff, "<!-- table.src {  border: #dcdcdc 1px solid; font-size: 16px; } -->");
    appendLine(buff, "<!-- td.src { width: 100%; } -->");
    appendLine(buff, "<!-- span.comment { color: #b22222; font-style: italic; } -->");
    appendLine(buff, "<!-- span.keyword { color: #2020bf; font-weight: bold; } -->");
    appendLine(buff, "<!-- span.string { color: #2a00ff; } -->");
    appendLine(buff, "<!-- span.text_italic { font-size: 12px; font-style: italic; } -->");
    appendLine(buff, "<!-- body { font-family: verdana, arial, helvetica; } -->");
    appendLine(buff, "<!-- div.separator { height: 10px; } -->");
    appendLine(buff, "<!-- table tr td, table tr th { font-size: 75%; } -->");
    appendLine(buff, "<!-- h1, h2, h3, h4, h5, h6 { margin-bottom: 0.5em; } -->");
    appendLine(buff, "<!-- h5 { margin-top: 0.5em; } -->");
    appendLine(buff, "</style>");
    //        appendLine(buff, "<link title=\"Style\" type=\"text/css\" rel=\"stylesheet\" href=\"css/main.css\"/>");
    appendLine(buff, "</head>");

    if (enableLineAnchor) {
        appendLine(buff, "<script>");
        appendLine(buff,
                "function funcInit() { " + "var pos = window.location.href.lastIndexOf(\"#\"); "
                        + "if (pos >= 0) { var number = window.location.href.substring(pos + 1);"
                        + " document.getElementById(number).style.backgroundColor = \"yellow\"; }}");
        appendLine(buff, "</script>");
        appendLine(buff, "<body onload=\"funcInit()\">");
    } else {
        appendLine(buff, "<body>");
    }
    buff.append("<h5>").append(fileName).append("</h5>");
    appendLine(buff, "<div class=\"separator\">&nbsp;</div>");
    appendLine(buff, "<table cellspacing=\"0\" cellpadding=\"0\" class=\"src\">");

    int number = 1;
    while (iterator.hasNext()) {
        String line = cmd.process(iterator.nextLine());
        buff.append("<tr id=\"").append(number).append("\">");
        buff.append("<td class=\"numLine\">&nbsp;").append(number).append("</td>");
        buff.append("<td class=\"src\"><pre class=\"src\">&nbsp;").append(line);
        buff.append("</pre></td>");
        appendLine(buff, "</tr>");

        ++number;
    }

    appendLine(buff, "</table>");
    appendLine(buff, "</body>");

    return buff.toString();
}

From source file:org.limy.eclipse.qalab.task.TodoReport.java

public void parseJavaSource(String fileName, String contents) {
    TodoBean bean = new TodoBean();
    bean.setName(fileName);/*from   w  w w.  jav  a2 s.  c o  m*/
    LineIterator iterator = new LineIterator(new StringReader(contents));
    int lineNumber = 1;
    while (iterator.hasNext()) {
        String line = iterator.nextLine();
        Matcher matcher = PATTERN_TODO.matcher(line);
        if (matcher.matches()) {
            String message = matcher.group(3);
            bean.addError(new TodoError(lineNumber, message));
        }
        ++lineNumber;
    }
    beans.add(bean);
}

From source file:org.m1theo.apt.repo.signing.PGPSigner.java

/**
 * Creates a clear sign signature over the input data. (Not detached)
 *
 * @param input      the content to be signed
 * @param output     the output destination of the signature
 *///ww w .j a va  2  s  .  c o m
public void clearSign(InputStream input, OutputStream output)
        throws IOException, PGPException, GeneralSecurityException {

    PGPSignatureGenerator signatureGenerator = new PGPSignatureGenerator(
            new BcPGPContentSignerBuilder(privateKey.getPublicKeyPacket().getAlgorithm(), digest));
    signatureGenerator.init(PGPSignature.CANONICAL_TEXT_DOCUMENT, privateKey);

    ArmoredOutputStream armoredOutput = new ArmoredOutputStream(output);
    armoredOutput.beginClearText(digest);

    LineIterator iterator = new LineIterator(new InputStreamReader(input));

    while (iterator.hasNext()) {
        String line = iterator.nextLine();

        // trailing spaces must be removed for signature calculation (see http://tools.ietf.org/html/rfc4880#section-7.1)
        byte[] data = trim(line).getBytes("UTF-8");

        armoredOutput.write(data);
        armoredOutput.write(EOL);

        signatureGenerator.update(data);
        if (iterator.hasNext()) {
            signatureGenerator.update(EOL);
        }
    }

    armoredOutput.endClearText();

    PGPSignature signature = signatureGenerator.generate();
    signature.encode(new BCPGOutputStream(armoredOutput));

    armoredOutput.close();
}

From source file:org.m1theo.apt.repo.signing.PGPSigner.java

/**
 * Creates a detached clear sign signature over the input data.
 *
 * @param input      the content to be signed
 * @param output     the output destination of the signature
 *///from www .  java2  s. com
public void clearSignDetached(InputStream input, OutputStream output)
        throws IOException, PGPException, GeneralSecurityException {

    PGPSignatureGenerator signatureGenerator = new PGPSignatureGenerator(
            new BcPGPContentSignerBuilder(privateKey.getPublicKeyPacket().getAlgorithm(), digest));
    signatureGenerator.init(PGPSignature.CANONICAL_TEXT_DOCUMENT, privateKey);

    ArmoredOutputStream armoredOutput = new ArmoredOutputStream(output);

    LineIterator iterator = new LineIterator(new InputStreamReader(input));

    while (iterator.hasNext()) {
        String line = iterator.nextLine();

        // trailing spaces must be removed for signature calculation (see http://tools.ietf.org/html/rfc4880#section-7.1)
        byte[] data = trim(line).getBytes("UTF-8");

        signatureGenerator.update(data);
        if (iterator.hasNext()) {
            signatureGenerator.update(EOL);
        }
    }

    PGPSignature signature = signatureGenerator.generate();
    signature.encode(new BCPGOutputStream(armoredOutput));

    armoredOutput.close();
}

From source file:org.mskcc.cbio.importer.io.internal.FileUtilsImpl.java

/**
 * Reads the precomputed md5 digest out of a .md5 file (firehose).
  * Assume the file only contains one line wit checksum.
 *
 * @param file File//from  w w  w .j a  va 2 s  .com
 * @return String
 * @throws Exception 
 */
@Override
public String getPrecomputedMD5Digest(File file) throws Exception {

    if (LOG.isInfoEnabled()) {
        LOG.info("getPrecomputedMD5Digest(): " + file.getCanonicalPath());
    }

    String toReturn = "";
    LineIterator it = org.apache.commons.io.FileUtils.lineIterator(file);
    try {
        while (it.hasNext()) {
            String content = it.nextLine();
            if (content.split(" ").length == 2) {
                toReturn = content.split(" ")[0].toUpperCase();
            }
        }
    } finally {
        LineIterator.closeQuietly(it);
    }

    // outta here
    return toReturn;
}

From source file:org.mskcc.cbio.importer.io.internal.FileUtilsImpl.java

/**
 * Get the case list from the staging file.
 *
 * @param caseIDs CaseIDs;//from   ww  w.j  a v  a 2s. c  om
  * @param portalMetadata PortalMetadata
 * @param cancerStudyMetadata CancerStudyMetadata
 * @param stagingFilename String
 * @return List<String>
 * @throws Exception
 */
@Override
public List<String> getCaseListFromStagingFile(CaseIDs caseIDs, PortalMetadata portalMetadata,
        CancerStudyMetadata cancerStudyMetadata, String stagingFilename) throws Exception {

    if (LOG.isInfoEnabled()) {
        LOG.info("getCaseListFromStagingFile(): " + stagingFilename);
    }

    // we use set here
    HashSet<String> caseSet = new HashSet<String>();

    // staging file
    File stagingFile = org.apache.commons.io.FileUtils.getFile(portalMetadata.getStagingDirectory(),
            cancerStudyMetadata.getStudyPath(), stagingFilename);
    // sanity check
    if (!stagingFile.exists()) {
        return new ArrayList<String>();
    }

    // iterate over all rows in file
    org.apache.commons.io.LineIterator it = org.apache.commons.io.FileUtils.lineIterator(stagingFile);
    try {
        int mafCaseIDColumnIndex = 0;
        boolean processHeader = true;
        while (it.hasNext()) {
            // create a string list from row in file
            List<String> thisRow = Arrays.asList(it.nextLine().split(Converter.VALUE_DELIMITER));
            // is this the header file?
            if (processHeader) {
                // look for MAF file case id column header
                mafCaseIDColumnIndex = thisRow.indexOf(Converter.MUTATION_CASE_ID_COLUMN_HEADER);
                // this is not a MAF file, header contains the case ids, return here
                if (mafCaseIDColumnIndex == -1) {
                    for (String potentialCaseID : thisRow) {
                        if (caseIDs.isTumorCaseID(potentialCaseID)) {
                            caseSet.add(caseIDs.convertCaseID(potentialCaseID));
                        }
                    }
                    break;
                }
                processHeader = false;
                continue;
            }
            // we want to add the value at mafCaseIDColumnIndex into return set - this is a case ID
            String potentialCaseID = thisRow.get(mafCaseIDColumnIndex);
            if (caseIDs.isTumorCaseID(potentialCaseID)) {
                caseSet.add(caseIDs.convertCaseID(potentialCaseID));
            }
        }
    } finally {
        it.close();
    }

    // outta here
    return new ArrayList<String>(caseSet);
}

From source file:org.mskcc.cbio.importer.io.internal.FileUtilsImpl.java

/**
 * Runs a MAF file through the Oncotator and OMA tools.
 *
 * @param inputMAFURL String//from   ww w.  ja v  a  2 s . c o  m
 * @param outputMAFURL String
 * @throws Exception 
 */
@Override
public void oncotateMAF(String inputMAFURL, String outputMAFURL) throws Exception {

    // sanity check
    if (inputMAFURL == null || inputMAFURL.length() == 0 || outputMAFURL == null
            || outputMAFURL.length() == 0) {
        throw new IllegalArgumentException(
                "oncotateMAFdownloadFile(): url or urlDestination argument is null...");
    }

    URL inputMAF = new URL(inputMAFURL);
    URL outputMAF = new URL(outputMAFURL);

    // determine if we have to call liftover
    boolean cleanOncotatorInputFile = false;
    File oncotatorInputFile = new File(inputMAF.getFile());
    org.apache.commons.io.LineIterator it = org.apache.commons.io.FileUtils.lineIterator(oncotatorInputFile);
    it.nextLine(); // skip header
    String[] parts = it.nextLine().split("\t");
    if (parts[3].contains("36") || parts[3].equals("hg18")) {
        it.close();
        File liftoverInputFile = org.apache.commons.io.FileUtils
                .getFile(org.apache.commons.io.FileUtils.getTempDirectory(), "liftoverInputFile");
        org.apache.commons.io.FileUtils.copyFile(oncotatorInputFile, liftoverInputFile);
        oncotatorInputFile = new File(inputMAF.getFile());
        // call lift over
        if (LOG.isInfoEnabled()) {
            LOG.info("oncotateMAF(), calling Hg18ToHg19...");
        }
        Hg18ToHg19.driver(liftoverInputFile.getCanonicalPath(), oncotatorInputFile.getCanonicalPath(),
                getLiftOverBinary(), getLiftOverChain());
        org.apache.commons.io.FileUtils.forceDelete(liftoverInputFile);
        cleanOncotatorInputFile = true;
    }

    // create a temp output file from the oncotator
    File oncotatorOutputFile = org.apache.commons.io.FileUtils
            .getFile(org.apache.commons.io.FileUtils.getTempDirectory(), "oncotatorOutputFile");
    // call oncotator
    if (LOG.isInfoEnabled()) {
        LOG.info("oncotateMAF(), calling OncotateTool...");
    }
    OncotateTool.driver(oncotatorInputFile.getCanonicalPath(), oncotatorOutputFile.getCanonicalPath(), true,
            true, true);
    // we call OMA here -
    // we use output from oncotator as input file
    if (LOG.isInfoEnabled()) {
        LOG.info("oncotateMAF(), calling MutationAssessorTool...");
    }
    File outputMAFFile = new File(outputMAF.getFile());
    outputMAFFile.createNewFile();
    MutationAssessorTool.driver(oncotatorOutputFile.getCanonicalPath(), outputMAFFile.getCanonicalPath(), false,
            true, true);

    // clean up
    org.apache.commons.io.FileUtils.forceDelete(oncotatorOutputFile);
    if (cleanOncotatorInputFile)
        org.apache.commons.io.FileUtils.forceDelete(oncotatorInputFile);
}

From source file:org.mskcc.cbio.importer.io.internal.FileUtilsImpl.java

/**
 * Helper function to create DataMatrix.
 *
 * @param data InputStream//from ww w. j a va 2 s .  c  o m
 * @return DataMatrix
 */
private DataMatrix getDataMatrix(InputStream data) throws Exception {

    // iterate over all lines in byte[]
    List<String> columnNames = null;
    List<LinkedList<String>> rowData = null;
    LineIterator it = IOUtils.lineIterator(data, null);
    try {
        int count = -1;
        while (it.hasNext()) {
            // first row is our column heading, create column vector
            if (++count == 0) {
                columnNames = new LinkedList(Arrays.asList(it.nextLine().split(Converter.VALUE_DELIMITER, -1)));
            }
            // all other rows are rows in the table
            else {
                rowData = (rowData == null) ? new LinkedList<LinkedList<String>>() : rowData;
                rowData.add(new LinkedList(Arrays.asList(it.nextLine().split(Converter.VALUE_DELIMITER, -1))));
            }
        }
    } finally {
        LineIterator.closeQuietly(it);
    }

    // problem reading from data?
    if (columnNames == null || rowData == null) {
        if (LOG.isInfoEnabled()) {
            LOG.info(
                    "getDataMatrix(), problem creating DataMatrix from file, data file probably missing data, returning null");
        }
        return null;
    }

    // made it here, we can create DataMatrix
    if (LOG.isInfoEnabled()) {
        LOG.info("creating new DataMatrix(), from file data");
    }

    // outta here
    return new DataMatrix(rowData, columnNames);
}

From source file:org.mskcc.cbio.portal.util.IGVLinking.java

private static String getFileContents(File file) throws Exception {
    StringBuilder sb = new StringBuilder();
    LineIterator it = null;

    try {//w w  w  .j  av  a2s  .  c  o  m
        it = FileUtils.lineIterator(file, "UTF-8");
        while (it.hasNext()) {
            sb.append(it.nextLine());
        }
    } finally {
        if (it != null)
            it.close();
    }

    return sb.toString();
}

From source file:org.nd4j.linalg.factory.Nd4j.java

/**
 * Read line via input streams/*from w  w  w  . j  a  v  a 2s  .c o m*/
 *
 * @param ndarray the input stream ndarray
 * @param  sep character, defaults to ","
 * @return NDArray
 */
public static INDArray readTxtString(InputStream ndarray, String sep) {
    /*
     We could dump an ndarray to a file with the tostring (since that is valid json) and use put/get to parse it as json
            
     But here we leverage our information of the tostring method to be more efficient
     With our current toString format we use tads along dimension (rank-1,rank-2) to write to the array in two dimensional chunks at a time.
     This is more efficient than setting each value at a time with putScalar.
     This also means we can read the file one line at a time instead of loading the whole thing into memory
            
     Future work involves enhancing the write json method to provide more features to make the load more efficient
    */
    int lineNum = 0;
    int rowNum = 0;
    int tensorNum = 0;
    char theOrder = 'c';
    int[] theShape = { 1, 1 };
    int rank = 0;
    double[][] subsetArr = { { 0.0, 0.0 }, { 0.0, 0.0 } };
    INDArray newArr = Nd4j.zeros(2, 2);
    BufferedReader reader = new BufferedReader(new InputStreamReader(ndarray));
    LineIterator it = IOUtils.lineIterator(reader);
    DecimalFormat format = (DecimalFormat) NumberFormat.getInstance(Locale.US);
    format.setParseBigDecimal(true);
    try {
        while (it.hasNext()) {
            String line = it.nextLine();
            lineNum++;
            line = line.replaceAll("\\s", "");
            if (line.equals("") || line.equals("}"))
                continue;
            // is it from dl4j?
            if (lineNum == 2) {
                String[] lineArr = line.split(":");
                String fileSource = lineArr[1].replaceAll("\\W", "");
                if (!fileSource.equals("dl4j"))
                    return null;
            }
            // parse ordering
            if (lineNum == 3) {
                String[] lineArr = line.split(":");
                theOrder = lineArr[1].replaceAll("\\W", "").charAt(0);
                continue;
            }
            // parse shape
            if (lineNum == 4) {
                String[] lineArr = line.split(":");
                String dropJsonComma = lineArr[1].split("]")[0];
                String[] shapeString = dropJsonComma.replace("[", "").split(",");
                rank = shapeString.length;
                theShape = new int[rank];
                for (int i = 0; i < rank; i++) {
                    try {
                        theShape[i] = Integer.parseInt(shapeString[i]);
                    } catch (NumberFormatException nfe) {
                    }
                    ;
                }
                subsetArr = new double[theShape[rank - 2]][theShape[rank - 1]];
                newArr = Nd4j.zeros(theShape, theOrder);
                continue;
            }
            //parse data
            if (lineNum > 5) {
                String[] entries = line.replace("\\],", "").replaceAll("\\[", "").replaceAll("\\],", "")
                        .replaceAll("\\]", "").split(sep);
                for (int i = 0; i < theShape[rank - 1]; i++) {
                    try {
                        BigDecimal number = (BigDecimal) format.parse(entries[i]);
                        subsetArr[rowNum][i] = number.doubleValue();
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }
                }
                rowNum++;
                if (rowNum == theShape[rank - 2]) {
                    INDArray subTensor = Nd4j.create(subsetArr);
                    newArr.tensorAlongDimension(tensorNum, rank - 1, rank - 2).addi(subTensor);
                    rowNum = 0;
                    tensorNum++;
                }
            }
        }
    } finally {
        LineIterator.closeQuietly(it);
    }
    return newArr;
}