Example usage for java.io StreamTokenizer TT_EOL

List of usage examples for java.io StreamTokenizer TT_EOL

Introduction

In this page you can find the example usage for java.io StreamTokenizer TT_EOL.

Prototype

int TT_EOL

To view the source code for java.io StreamTokenizer TT_EOL.

Click Source Link

Document

A constant indicating that the end of the line has been read.

Usage

From source file:com.fluffypeople.managesieve.ManageSieveClient.java

private ManageSieveResponse parseCapabilities() throws IOException, ParseException {
    cap = new ServerCapabilities();

    while (true) {
        int token = in.nextToken();
        switch (token) {
        case StreamTokenizer.TT_WORD:
            // Unquoted word - end of capabilites
            in.pushBack();//from w w w .ja va 2  s  . c  o  m
            return parseResponse();
        case DQUOTE:
        case LEFT_CURRLY_BRACE:
            // Capabilities can be either literal or quoted
            in.pushBack();
            String word = parseString();
            if (word.equalsIgnoreCase("IMPLEMENTATION")) {
                cap.setImplementationName(parseString());
            } else if (word.equalsIgnoreCase("SASL")) {
                cap.setSASLMethods(parseString());
            } else if (word.equalsIgnoreCase("SIEVE")) {
                cap.setSieveExtensions(parseString());
            } else if (word.equalsIgnoreCase("MAXREDIRECTS")) {
                token = in.nextToken();
                if (token == StreamTokenizer.TT_NUMBER) {
                    cap.setMaxRedirects((int) in.nval);
                } else {
                    throw new ParseException(
                            "Expecting NUMBER got " + tokenToString(token) + " at " + in.lineno());
                }
            } else if (word.equalsIgnoreCase("NOTIFY")) {
                cap.setNotify(parseString());
            } else if (word.equalsIgnoreCase("STARTTLS")) {
                cap.setHasTLS(true);
            } else if (word.equalsIgnoreCase("LANGUAGE")) {
                cap.setLanguage(parseString());
            } else if (word.equalsIgnoreCase("VERSION")) {
                cap.setVersion(parseString());
            } else if (word.equalsIgnoreCase("OWNER")) {
                cap.setOwner(parseString());
            } else {
                // Unknown capability, read until EOL
                while (token != StreamTokenizer.TT_EOL) {
                    token = in.nextToken();
                }
                in.pushBack();
            }
            token = in.nextToken();
            if (token != StreamTokenizer.TT_EOL) {
                throw new ParseException("Expecing EOL got " + tokenToString(token) + " at " + in.lineno());
            }
            break;

        default:
            throw new ParseException("Unexpected token " + token + " at " + in.lineno());
        }
    }
}

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java

public void writeAggregateStatisticsForOptimisationConstraints_ISARHP_ISARCEP(String a_OutputDir_String)
        throws Exception {
    HashMap a_ID_RecordID_HashMap = _ISARDataHandler.get_ID_RecordID_HashMap();
    File optimisationConstraints_SARs = new File(a_OutputDir_String, "OptimisationConstraints_SARs.csv");
    FileOutputStream a_FileOutputStream = new FileOutputStream(optimisationConstraints_SARs);
    OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEPHeader(a_FileOutputStream);
    a_FileOutputStream.flush();/*  w w  w.j av a2 s  . c o  m*/
    Object[] fitnessCounts;
    HashMap<String, Integer> a_SARCounts = null;
    TreeSet<String> a_LADCodes_TreeSet = _CASDataHandler.getLADCodes_TreeSet();
    String s2;
    String s1;
    Iterator<String> a_Iterator_String = a_LADCodes_TreeSet.iterator();
    while (a_Iterator_String.hasNext()) {
        // Need to reorder data for each LAD as OAs not necessarily returned
        // in any order and an ordered result is wanted
        TreeMap<String, HashMap<String, Integer>> resultsForLAD = new TreeMap<String, HashMap<String, Integer>>();
        boolean setPrevious_OA_String = true;
        s1 = a_Iterator_String.next();
        s2 = s1.substring(0, 3);
        File resultsFile = new File(a_OutputDir_String + s2 + "/" + s1 + "/population.csv");
        // A few results are missing
        if (resultsFile.exists()) {
            System.out.println(resultsFile.toString() + " exists");
            String previous_OA_String = "";
            BufferedReader aBufferedReader = new BufferedReader(
                    new InputStreamReader(new FileInputStream(resultsFile)));
            StreamTokenizer aStreamTokenizer = new StreamTokenizer(aBufferedReader);
            Generic_StaticIO.setStreamTokenizerSyntax1(aStreamTokenizer);
            String line = "";
            int tokenType = aStreamTokenizer.nextToken();
            while (tokenType != StreamTokenizer.TT_EOF) {
                switch (tokenType) {
                case StreamTokenizer.TT_EOL:
                    //System.out.println(line);
                    String[] lineFields = line.split(",");
                    String a_OA_String = lineFields[0];
                    if (previous_OA_String.equalsIgnoreCase(a_OA_String)) {
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsHP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                        }
                    } else {
                        // Store result
                        if (setPrevious_OA_String) {
                            previous_OA_String = a_OA_String;
                            setPrevious_OA_String = false;
                        } else {
                            // Store
                            resultsForLAD.put(previous_OA_String, a_SARCounts);
                        }
                        // Initialise/Re-initialise
                        CASDataRecord a_CASDataRecord = (CASDataRecord) _CASDataHandler
                                .getDataRecord(a_OA_String);
                        fitnessCounts = GeneticAlgorithm_ISARHP_ISARCEP.getFitnessCounts(a_CASDataRecord);
                        a_SARCounts = (HashMap<String, Integer>) fitnessCounts[1];
                        // Start a new aggregation
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsHP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_ISARDataRecord.toString());
                        }
                        //a_OA_String = lineFields[0];
                    }
                    previous_OA_String = a_OA_String;
                    break;
                case StreamTokenizer.TT_WORD:
                    line = aStreamTokenizer.sval;
                    break;
                }
                tokenType = aStreamTokenizer.nextToken();
            }
        } else {
            System.out.println(resultsFile.toString() + " !exists");
        }
        Iterator<String> string_Iterator = resultsForLAD.keySet().iterator();
        while (string_Iterator.hasNext()) {
            String oa_Code = string_Iterator.next();
            a_SARCounts = resultsForLAD.get(oa_Code);
            //GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(null, a_ID_RecordID_HashMap, _Random)
            OutputDataHandler_OptimisationConstraints.writeISARHP_ISARCEP(a_SARCounts, oa_Code,
                    a_FileOutputStream);
        }
    }
    a_FileOutputStream.close();
}

From source file:com.fluffypeople.managesieve.ManageSieveClient.java

private ManageSieveResponse parseResponse() throws IOException, ParseException {
    ManageSieveResponse resp = new ManageSieveResponse();
    int token = in.nextToken();
    if (token == StreamTokenizer.TT_WORD) {
        // Get the type (OK NO BYTE)
        resp.setType(in.sval);/*w w  w  .  j  ava  2  s.  c  o  m*/
        token = in.nextToken();
        // Check for reason code
        if (token == LEFT_BRACKET) {
            token = in.nextToken();
            if (token == StreamTokenizer.TT_WORD) {
                resp.setCode(in.sval);
            } else {
                throw new ParseException(
                        "Expecting LEFT_BRACKET got " + tokenToString(token) + " at line " + in.lineno());
            }
            if (resp.getCode().hasParam()) {
                resp.setParam(parseString());
            }
            token = in.nextToken();
            if (token != RIGHT_BRACKET) {
                throw new ParseException(
                        "Expecting RIGHT_BRACKET got " + tokenToString(token) + " at line " + in.lineno());
            }
        } else {
            in.pushBack();
        }
        // Check for human readable message
        token = in.nextToken();
        if (token != StreamTokenizer.TT_EOL) {
            in.pushBack();
            resp.setMessage(parseString());
            token = in.nextToken();
        }

        // Done, end of line
        if (token != StreamTokenizer.TT_EOL) {
            throw new ParseException("Expecting EOL got " + tokenToString(token) + " at line " + in.lineno());
        }

    } else {
        throw new ParseException("Expecting WORD got " + tokenToString(token) + " at line " + in.lineno());
    }
    return resp;
}

From source file:com.fluffypeople.managesieve.ManageSieveClient.java

private String parseString() throws IOException, ParseException {
    int token = in.nextToken();
    if (token == DQUOTE) {
        return in.sval;
    } else if (token == '{') {
        // "Literal" String - {<length>}CRLF<length bytes of string>
        token = in.nextToken();/* www . j  a  v a2  s  . co m*/
        if (token != StreamTokenizer.TT_NUMBER) {
            throw new ParseException(
                    "Expecting NUMBER got " + tokenToString(token) + " at line " + in.lineno());
        }
        // Irritatingly, the tokenizer will parse a double here, even
        // if we only want an int. Sigh.
        int length = (int) in.nval;
        token = in.nextToken();
        if (token != '}') {
            throw new ParseException("Expecing } got " + tokenToString(token) + " at line " + in.lineno());
        }
        token = in.nextToken();
        if (token != StreamTokenizer.TT_EOL) {
            throw new ParseException("Expecting EOL got " + tokenToString(token) + " at line " + in.lineno());
        }
        // Drop out of the tokenizer to read the raw bytes...

        StringBuilder rawString = new StringBuilder();
        log.debug("Raw string: reading {} bytes", length);

        in.resetSyntax();
        int count = 0;
        while (count < length) {
            token = in.nextToken();
            if (token == StreamTokenizer.TT_WORD) {
                // Tokenizer calls unicode "WORD" even in raw(ish) mode
                rawString.append(in.sval);
                count += in.sval.getBytes(UTF8).length;
            } else {
                // Probably only ever one byte chars, however lets be
                // careful out there.
                char[] chars = Character.toChars(token);
                rawString.append(chars);
                count += chars.length;
            }
        }

        // Remember to reset the tokenizer now we're done
        setupTokenizer();

        return rawString.toString();
    } else {
        throw new ParseException(
                "Expecing DQUOTE or {, got " + tokenToString(token) + " at line " + in.lineno());
    }
}

From source file:com.fluffypeople.managesieve.ManageSieveClient.java

private String tokenToString(final int c) {
    if (c > 0) {

        return new String(Character.toChars(c));
    } else {/*from w w  w. j av a2 s . c  om*/
        switch (c) {
        case StreamTokenizer.TT_EOF:
            return "EOF";
        case StreamTokenizer.TT_NUMBER:
            return "NUMBER";
        case StreamTokenizer.TT_EOL:
            return "EOL";
        case StreamTokenizer.TT_WORD:
            return ("WORD [" + in.sval + "]");
        default:
            return "UNKNOWN";
        }
    }
}

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java

public void writeAggregateStatisticsForOptimisationConstraints_HSARHP_ISARCEP(String a_OutputDir_String)
        throws Exception {
    HashMap a_HID_HSARDataRecordVector_HashMap = _HSARDataHandler.get_HID_HSARDataRecordVector_HashMap();
    HashMap a_ID_RecordID_HashMap = _ISARDataHandler.get_ID_RecordID_HashMap();
    File optimisationConstraints_SARs = new File(a_OutputDir_String, "OptimisationConstraints_SARs.csv");
    FileOutputStream a_FileOutputStream = new FileOutputStream(optimisationConstraints_SARs);
    OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEPHeader(a_FileOutputStream);
    a_FileOutputStream.flush();/*from   w  w w .j  a  v  a  2 s  . c  o m*/
    HashMap<String, Integer> a_SARCounts = null;
    CASDataRecord a_CASDataRecord;
    TreeSet<String> a_LADCodes_TreeSet = _CASDataHandler.getLADCodes_TreeSet();
    String s2;
    String s1;
    Iterator<String> a_Iterator_String = a_LADCodes_TreeSet.iterator();
    while (a_Iterator_String.hasNext()) {
        // Need to reorder data for each LAD as OAs not necessarily returned
        // in any order and an ordered result is wanted
        TreeMap<String, HashMap<String, Integer>> resultsForLAD = new TreeMap<String, HashMap<String, Integer>>();
        boolean setPrevious_OA_String = true;
        s1 = a_Iterator_String.next();
        s2 = s1.substring(0, 3);
        File resultsFile = new File(a_OutputDir_String + s2 + "/" + s1 + "/population.csv");
        // A few results are missing
        if (resultsFile.exists()) {
            System.out.println(resultsFile.toString() + " exists");
            String previous_OA_String = "";
            BufferedReader aBufferedReader = new BufferedReader(
                    new InputStreamReader(new FileInputStream(resultsFile)));
            StreamTokenizer aStreamTokenizer = new StreamTokenizer(aBufferedReader);
            Generic_StaticIO.setStreamTokenizerSyntax1(aStreamTokenizer);
            String line = "";
            int tokenType = aStreamTokenizer.nextToken();
            while (tokenType != StreamTokenizer.TT_EOF) {
                switch (tokenType) {
                case StreamTokenizer.TT_EOL:
                    //System.out.println(line);
                    String[] lineFields = line.split(",");
                    String a_OA_String = lineFields[0];
                    if (previous_OA_String.equalsIgnoreCase(a_OA_String)) {
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            // From the id of a household get a Vector 
                            // of HSARDataRecords
                            Vector household = (Vector) a_HID_HSARDataRecordVector_HashMap
                                    .get(new Integer(lineFields[2]));
                            HSARDataRecord a_HSARDataRecord;
                            for (int i = 0; i < household.size(); i++) {
                                a_HSARDataRecord = (HSARDataRecord) household.elementAt(i);
                                GeneticAlgorithm_HSARHP_ISARCEP.addToCounts(a_HSARDataRecord, a_SARCounts,
                                        _Random);
                            }
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_HSARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                        }
                    } else {
                        // Store result
                        if (setPrevious_OA_String) {
                            previous_OA_String = a_OA_String;
                            setPrevious_OA_String = false;
                        } else {
                            // Store
                            resultsForLAD.put(previous_OA_String, a_SARCounts);
                        }
                        // Initialise/Re-initialise
                        a_CASDataRecord = (CASDataRecord) _CASDataHandler.getDataRecord(a_OA_String);
                        Object[] fitnessCounts = GeneticAlgorithm_HSARHP_ISARCEP
                                .getFitnessCounts(a_CASDataRecord);
                        a_SARCounts = (HashMap<String, Integer>) fitnessCounts[1];
                        // Start a new aggregation
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            // From the id of a household get a Vector
                            // of HSARDataRecords
                            Vector household = (Vector) a_HID_HSARDataRecordVector_HashMap
                                    .get(new Integer(lineFields[2]));
                            HSARDataRecord a_HSARDataRecord;
                            for (int i = 0; i < household.size(); i++) {
                                a_HSARDataRecord = (HSARDataRecord) household.elementAt(i);
                                GeneticAlgorithm_HSARHP_ISARCEP.addToCounts(a_HSARDataRecord, a_SARCounts,
                                        _Random);
                            }
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_HSARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_ISARDataRecord.toString());
                        }
                        //a_OA_String = lineFields[0];
                    }
                    previous_OA_String = a_OA_String;
                    break;
                case StreamTokenizer.TT_WORD:
                    line = aStreamTokenizer.sval;
                    break;
                }
                tokenType = aStreamTokenizer.nextToken();
            }
        } else {
            System.out.println(resultsFile.toString() + " !exists");
        }
        Iterator<String> string_Iterator = resultsForLAD.keySet().iterator();
        while (string_Iterator.hasNext()) {
            String oa_Code = string_Iterator.next();
            OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEP(resultsForLAD.get(oa_Code), oa_Code,
                    a_FileOutputStream);
        }
    }
    a_FileOutputStream.close();
}

From source file:Matrix.java

/**
 * Read a matrix from a stream. The format is the same the print method, so
 * printed matrices can be read back in (provided they were printed using US
 * Locale). Elements are separated by whitespace, all the elements for each
 * row appear on a single line, the last row is followed by a blank line.
 * //ww w .  j  a v a  2s . c  o  m
 * @param input
 *            the input stream.
 */

public static Matrix read(BufferedReader input) throws java.io.IOException {
    StreamTokenizer tokenizer = new StreamTokenizer(input);

    // Although StreamTokenizer will parse numbers, it doesn't recognize
    // scientific notation (E or D); however, Double.valueOf does.
    // The strategy here is to disable StreamTokenizer's number parsing.
    // We'll only get whitespace delimited words, EOL's and EOF's.
    // These words should all be numbers, for Double.valueOf to parse.

    tokenizer.resetSyntax();
    tokenizer.wordChars(0, 255);
    tokenizer.whitespaceChars(0, ' ');
    tokenizer.eolIsSignificant(true);
    java.util.Vector v = new java.util.Vector();

    // Ignore initial empty lines
    while (tokenizer.nextToken() == StreamTokenizer.TT_EOL)
        ;
    if (tokenizer.ttype == StreamTokenizer.TT_EOF)
        throw new java.io.IOException("Unexpected EOF on matrix read.");
    do {
        v.addElement(Double.valueOf(tokenizer.sval)); // Read & store 1st
        // row.
    } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD);

    int n = v.size(); // Now we've got the number of columns!
    double row[] = new double[n];
    for (int j = 0; j < n; j++)
        // extract the elements of the 1st row.
        row[j] = ((Double) v.elementAt(j)).doubleValue();
    v.removeAllElements();
    v.addElement(row); // Start storing rows instead of columns.
    while (tokenizer.nextToken() == StreamTokenizer.TT_WORD) {
        // While non-empty lines
        v.addElement(row = new double[n]);
        int j = 0;
        do {
            if (j >= n)
                throw new java.io.IOException("Row " + v.size() + " is too long.");
            row[j++] = Double.valueOf(tokenizer.sval).doubleValue();
        } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD);
        if (j < n)
            throw new java.io.IOException("Row " + v.size() + " is too short.");
    }
    int m = v.size(); // Now we've got the number of rows.
    double[][] A = new double[m][];
    v.copyInto(A); // copy the rows out of the vector
    return new Matrix(A);
}

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java

public void writeOutResidualsISARHP_ISARCEP(File observed_File, File expected_File) throws Exception {
    File outputFile = new File(observed_File.getParentFile(), "residuals.csv");
    FileOutputStream a_FileOutputStream = new FileOutputStream(outputFile);
    TreeMap<String, double[]> a_SAROptimistaionConstraints = loadCASOptimistaionConstraints(observed_File);
    TreeMap<String, double[]> a_CASOptimistaionConstraints = loadCASOptimistaionConstraints(expected_File);
    String line = OutputDataHandler_OptimisationConstraints.getISARHP_ISARCEPHeader();
    String[] variableNames = line.split(",");
    a_FileOutputStream.write(line.getBytes());
    a_FileOutputStream.write(StreamTokenizer.TT_EOL);
    a_FileOutputStream.flush();/* w ww . j  av  a2 s . c  o  m*/
    String oa;
    double[] a_SARExpectedRow;
    double[] a_CASObservedRow;
    double[] a_Residual;
    Iterator<String> iterator_String = a_SAROptimistaionConstraints.keySet().iterator();
    while (iterator_String.hasNext()) {
        oa = iterator_String.next();
        line = oa + ",";
        a_SARExpectedRow = a_SAROptimistaionConstraints.get(oa);
        a_CASObservedRow = a_CASOptimistaionConstraints.get(oa);
        a_Residual = new double[a_SARExpectedRow.length];
        for (int i = 0; i < a_SARExpectedRow.length; i++) {
            a_Residual[i] = a_SARExpectedRow[i] - a_CASObservedRow[i];
            if (i == a_SARExpectedRow.length - 1) {
                line += a_Residual[i];
            } else {
                line += a_Residual[i] + ",";
            }
        }
        a_FileOutputStream.write(line.getBytes());
        a_FileOutputStream.write(StreamTokenizer.TT_EOL);
        a_FileOutputStream.flush();
    }
}

From source file:com.zimbra.common.calendar.ZoneInfo2iCalendar.java

private static void readExtraData(Reader reader) throws IOException, ParseException {
    char dquote = '"';
    StreamTokenizer tokenizer = new StreamTokenizer(reader);
    tokenizer.resetSyntax();/*from  www  .java 2s.c o m*/
    tokenizer.wordChars(32, 126);
    tokenizer.whitespaceChars(' ', ' ');
    tokenizer.whitespaceChars('\t', '\t');
    tokenizer.whitespaceChars(0, 20);
    tokenizer.commentChar('#');
    tokenizer.quoteChar(dquote);
    tokenizer.eolIsSignificant(true);

    List<String> tokenList = new ArrayList<String>();
    LineType lineType = LineType.UNKNOWN;
    boolean atLineStart = true;

    int ttype;
    int prevTtype = StreamTokenizer.TT_EOL; // used for empty line detection
    while ((ttype = tokenizer.nextToken()) != StreamTokenizer.TT_EOF) {
        int lineNum = tokenizer.lineno();
        if (ttype == StreamTokenizer.TT_WORD || ttype == dquote) {
            String token = tokenizer.sval;
            if (atLineStart) {
                lineType = LineType.lookUp(token);
                if (LineType.UNKNOWN.equals(lineType))
                    throw new ParseException("Invalid line type", lineNum);
            } else {
                tokenList.add(token);
            }
            atLineStart = false;
        } else if (ttype == StreamTokenizer.TT_EOL) {
            if (prevTtype == StreamTokenizer.TT_EOL) {
                prevTtype = ttype;
                continue;
            }
            atLineStart = true;
            switch (lineType) {
            case PRIMARYZONE:
                if (tokenList.size() < 1)
                    throw new ParseException("Not enough fields in a PrimaryZone line", lineNum);
                String primaryTZID = tokenList.get(0);
                sPrimaryTZIDs.add(primaryTZID);
                break;
            case ZONEMATCHSCORE:
                if (tokenList.size() < 2)
                    throw new ParseException("Not enough fields in a ZoneMatchScore line", lineNum);
                String zoneName = tokenList.get(0);
                String zoneMatchScoreStr = tokenList.get(1);
                int zoneMatchScore = 0;
                try {
                    zoneMatchScore = Integer.parseInt(zoneMatchScoreStr);
                } catch (NumberFormatException e) {
                    throw new ParseException("Zone match score must be an integer: " + zoneMatchScoreStr,
                            lineNum);
                }
                sMatchScores.put(zoneName, zoneMatchScore);
                break;
            }
            if (atLineStart) {
                tokenList.clear();
                lineType = LineType.UNKNOWN;
            }
        } else if (ttype == StreamTokenizer.TT_NUMBER) {
            // shouldn't happen
            throw new ParseException("Invalid parser state: TT_NUMBER found", lineNum);
        }
        prevTtype = ttype;
    }
}

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java

public void writeOutResidualsHSARHP_ISARCEP(File observed_File, File expected_File) throws Exception {
    File outputFile = new File(observed_File.getParentFile(), "residuals.csv");
    FileOutputStream a_FileOutputStream = new FileOutputStream(outputFile);
    TreeMap<String, double[]> a_SAROptimistaionConstraints = loadCASOptimistaionConstraints(observed_File);
    TreeMap<String, double[]> a_CASOptimistaionConstraints = loadCASOptimistaionConstraints(expected_File);
    String line = OutputDataHandler_OptimisationConstraints.getHSARHP_ISARCEPHeader();
    String[] variableNames = line.split(",");
    a_FileOutputStream.write(line.getBytes());
    a_FileOutputStream.write(StreamTokenizer.TT_EOL);
    a_FileOutputStream.flush();/*from w  w w. j a v a  2 s  . c om*/
    String oa;
    double[] a_SARExpectedRow;
    double[] a_CASObservedRow;
    double[] a_Residual;
    Iterator<String> iterator_String = a_SAROptimistaionConstraints.keySet().iterator();
    while (iterator_String.hasNext()) {
        oa = iterator_String.next();
        line = oa + ",";
        a_SARExpectedRow = a_SAROptimistaionConstraints.get(oa);
        a_CASObservedRow = a_CASOptimistaionConstraints.get(oa);
        a_Residual = new double[a_SARExpectedRow.length];
        for (int i = 0; i < a_SARExpectedRow.length; i++) {
            a_Residual[i] = a_SARExpectedRow[i] - a_CASObservedRow[i];
            if (i == a_SARExpectedRow.length - 1) {
                line += a_Residual[i];
            } else {
                line += a_Residual[i] + ",";
            }
        }
        a_FileOutputStream.write(line.getBytes());
        a_FileOutputStream.write(StreamTokenizer.TT_EOL);
        a_FileOutputStream.flush();
    }
}