Example usage for java.io StreamTokenizer nextToken

List of usage examples for java.io StreamTokenizer nextToken

Introduction

In this page you can find the example usage for java.io StreamTokenizer nextToken.

Prototype

public int nextToken() throws IOException 

Source Link

Document

Parses the next token from the input stream of this tokenizer.

Usage

From source file:com.enonic.cms.core.portal.datasource.el.ExpressionFunctions.java

/**
 * This method will take a freetext search string and create a valid query that can be used in the getContent* methods.  The search
 * string is spilt into tokens.  Using the operator, it may be specified whether the field must contain all or any of the words in the
 * search string.//w  w  w .  j  a  va  2  s .co  m
 *
 * @param fieldName    The name of the field to search for the words in the search string.
 * @param searchString The words to search for.
 * @param operator     Must be either AND or OR.  Case doesn't matter.
 * @return A syntactically correct search that may be used as the query parameter in getContent* methods on the data source. With care,
 *         it may also be merged with other queries using AND or OR.
 * @throws IllegalArgumentException If any of the parameters are empty or the operator is not AND or OR.
 */
public String buildFreetextQuery(String fieldName, String searchString, String operator) {
    if (searchString == null || searchString.trim().equals("")) {
        return "";
    }
    if (fieldName == null || fieldName.trim().equals("")) {
        throw new IllegalArgumentException("fieldName can not be empty.");
    }

    String op = "";
    if (operator != null) {
        op = operator.trim().toUpperCase();
    }
    if (!(op.equals("AND") || op.equals("OR"))) {
        throw new IllegalArgumentException("Illegal operator: " + operator);
    }

    boolean first = true;
    StringBuilder queryTokens = new StringBuilder();
    Reader searchStringReader = new StringReader(searchString);
    StreamTokenizer searchStringTokens = new StreamTokenizer(searchStringReader);
    searchStringTokens.slashSlashComments(false);
    searchStringTokens.slashStarComments(false);
    searchStringTokens.eolIsSignificant(false);
    searchStringTokens.ordinaryChar('!');
    searchStringTokens.ordinaryChars('#', '}');
    searchStringTokens.wordChars('!', '!');
    searchStringTokens.wordChars('#', '}');

    try {
        while (searchStringTokens.nextToken() != StreamTokenizer.TT_EOF) {
            String token = searchStringTokens.sval;
            addQueryPart(queryTokens, first, fieldName, token, op);
            if (first) {
                first = false;
            }
        }
    } catch (IOException e) {
        throw new IllegalStateException("This should never happen, since the IO class is wrapping a string!");
    }

    return queryTokens.toString();
}

From source file:edu.buffalo.cse.pigout.parser.PigOutMacro.java

void validate() throws IOException {
    if (rets.isEmpty()) {
        return;//from  ww  w . j  a  v a  2  s .co m
    }

    HashSet<String> testSet = new HashSet<String>();

    StreamTokenizer st = new StreamTokenizer(new StringReader(body));

    st.wordChars('.', '.');
    st.wordChars('0', '9');
    st.wordChars('_', '_');
    st.wordChars('$', '$');
    st.lowerCaseMode(false);
    st.ordinaryChar('/');
    st.slashStarComments(true);

    while (st.nextToken() != StreamTokenizer.TT_EOF) {
        if (matchWord(st, "define", false) && matchDollarAlias(st, true)) {
            testSet.add(st.sval.substring(1));
        } else if (matchDollarAlias(st, false)) {
            String prevWord = st.sval;
            if (matchWord(st, "if", true) || matchWord(st, "otherwise", true)) {
                testSet.add(prevWord.substring(1));
            } else if (matchChar(st, '=', true) && !matchChar(st, '=', true)) {
                testSet.add(prevWord.substring(1));
            } else if (matchChar(st, ',', true)) {
                // possible mult-alias inlining of a macro
                ArrayList<String> mlist = new ArrayList<String>();
                mlist.add(prevWord);
                if (isMultiValueReturn(st, mlist, true)) {
                    for (String s : mlist) {
                        testSet.add(s.substring(1));
                    }
                }
            }
        } else if (matchChar(st, '-', false) && matchChar(st, '-', true)) {
            skipSingleLineComment(st);
        }
    }

    for (String s : rets) {
        if (!testSet.contains(s)) {
            throw new IOException("Macro '" + name + "' missing return alias: " + s);
        }
    }
}

From source file:com.zimbra.common.calendar.ZoneInfo2iCalendar.java

private static void readExtraData(Reader reader) throws IOException, ParseException {
    char dquote = '"';
    StreamTokenizer tokenizer = new StreamTokenizer(reader);
    tokenizer.resetSyntax();//from   w  ww .j av a2  s.c om
    tokenizer.wordChars(32, 126);
    tokenizer.whitespaceChars(' ', ' ');
    tokenizer.whitespaceChars('\t', '\t');
    tokenizer.whitespaceChars(0, 20);
    tokenizer.commentChar('#');
    tokenizer.quoteChar(dquote);
    tokenizer.eolIsSignificant(true);

    List<String> tokenList = new ArrayList<String>();
    LineType lineType = LineType.UNKNOWN;
    boolean atLineStart = true;

    int ttype;
    int prevTtype = StreamTokenizer.TT_EOL; // used for empty line detection
    while ((ttype = tokenizer.nextToken()) != StreamTokenizer.TT_EOF) {
        int lineNum = tokenizer.lineno();
        if (ttype == StreamTokenizer.TT_WORD || ttype == dquote) {
            String token = tokenizer.sval;
            if (atLineStart) {
                lineType = LineType.lookUp(token);
                if (LineType.UNKNOWN.equals(lineType))
                    throw new ParseException("Invalid line type", lineNum);
            } else {
                tokenList.add(token);
            }
            atLineStart = false;
        } else if (ttype == StreamTokenizer.TT_EOL) {
            if (prevTtype == StreamTokenizer.TT_EOL) {
                prevTtype = ttype;
                continue;
            }
            atLineStart = true;
            switch (lineType) {
            case PRIMARYZONE:
                if (tokenList.size() < 1)
                    throw new ParseException("Not enough fields in a PrimaryZone line", lineNum);
                String primaryTZID = tokenList.get(0);
                sPrimaryTZIDs.add(primaryTZID);
                break;
            case ZONEMATCHSCORE:
                if (tokenList.size() < 2)
                    throw new ParseException("Not enough fields in a ZoneMatchScore line", lineNum);
                String zoneName = tokenList.get(0);
                String zoneMatchScoreStr = tokenList.get(1);
                int zoneMatchScore = 0;
                try {
                    zoneMatchScore = Integer.parseInt(zoneMatchScoreStr);
                } catch (NumberFormatException e) {
                    throw new ParseException("Zone match score must be an integer: " + zoneMatchScoreStr,
                            lineNum);
                }
                sMatchScores.put(zoneName, zoneMatchScore);
                break;
            }
            if (atLineStart) {
                tokenList.clear();
                lineType = LineType.UNKNOWN;
            }
        } else if (ttype == StreamTokenizer.TT_NUMBER) {
            // shouldn't happen
            throw new ParseException("Invalid parser state: TT_NUMBER found", lineNum);
        }
        prevTtype = ttype;
    }
}

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.Comparison.java

/**
 * Aim is to produce an aggregated data set for comparison totalling males
 * and females by MSOA to compare with CASUV003DataRecord
 *//* w  w  w .  j a  va2 s.c o m*/
private void run3() throws IOException {
    boolean aggregateToMSOA = true;
    // boolean aggregateToMSOA = false;
    ToyModelDataHandler tToyModelDataHandler = new ToyModelDataHandler();
    String startOfFilename = "C:/Work/Projects/MoSeS/Workspace/Leeds/ToyModel_SWR_OA_HSARHP_ISARCEP_0_5_5000_3_30_12_20";
    // String startOfFilename = new String(
    // "C:/Work/Projects/MoSeS/Workspace/Leeds/ToyModel_SWR_OA_HSARHP_ISARCEP_0_5_1000_3_30_12_20"
    // );
    // String startOfFilename = new String(
    // "C:/Work/Projects/MoSeS/Workspace/Leeds/ToyModel_SWR_OA_ISARHP_ISARCEP_0_5_200_3_30_12_20"
    // );
    File tToyModelDataRecord2CSVFile = new File(startOfFilename + ".csv");
    File tToyModelDataRecordMaleFemaleComparisonFile;
    if (aggregateToMSOA) {
        tToyModelDataRecordMaleFemaleComparisonFile = new File(
                startOfFilename + "_MSOAMaleFemaleComparison.csv");
    } else {
        tToyModelDataRecordMaleFemaleComparisonFile = new File(startOfFilename + "_OAMaleFemaleComparison.csv");
    }
    if (!tToyModelDataRecordMaleFemaleComparisonFile.exists()) {
        tToyModelDataRecordMaleFemaleComparisonFile.createNewFile();
    }
    PrintWriter tToyModelDataRecordMaleFemaleComparisonFilePrintWriter = new PrintWriter(
            tToyModelDataRecordMaleFemaleComparisonFile);
    // CASUV003DataHandler tCASUV003DataHandler = new CASUV003DataHandler(
    // new File(
    // "C:/Work/Projects/MoSeS/Workspace/Leeds/CASUV003DataRecordsMSOA.dat"
    // ) );
    CASUV003DataHandler tCASUV003DataHandler;
    CAS001DataHandler tCAS001DataHandler;
    if (aggregateToMSOA) {
        tCASUV003DataHandler = new CASUV003DataHandler(
                new File("C:/Work/Projects/MoSeS/Workspace/Leeds/CASUV003DataRecordsMSOA.dat"));
        tCAS001DataHandler = new CAS001DataHandler(
                new File("C:/Work/Projects/MoSeS/Workspace/Leeds/CAS001DataRecordsMSOA.dat"));
    } else {
        tCASUV003DataHandler = new CASUV003DataHandler(
                new File("C:/Work/Projects/MoSeS/Workspace/CASUV003DataRecords.dat"));
        tCAS001DataHandler = new CAS001DataHandler(
                new File("C:/Work/Projects/MoSeS/Workspace/CAS001DataRecords.dat"));
    }
    CASUV003DataRecord aCASUV003DataRecord;
    CAS001DataRecord aCAS001DataRecord;
    BufferedReader tBufferedReader = new BufferedReader(
            new InputStreamReader(new FileInputStream(tToyModelDataRecord2CSVFile)));
    StreamTokenizer tStreamTokenizer = new StreamTokenizer(tBufferedReader);
    Generic_StaticIO.setStreamTokenizerSyntax1(tStreamTokenizer);
    // Initialise
    int tMaleCount;
    int tFemaleCount;
    int tMaleCEPCount;
    int tMaleHPCount;
    int tFemaleCEPCount;
    int tFemaleHPCount;
    int tokenType = tStreamTokenizer.nextToken();
    ToyModelDataRecord_2 aToyModelDataRecord2;
    String aZoneCode;
    HashMap tLookUpMSOAfromOAHashMap = null;
    CASDataHandler tCASDataHandler = new CASDataHandler();
    if (aggregateToMSOA) {
        tLookUpMSOAfromOAHashMap = tCASDataHandler.get_LookUpMSOAfromOAHashMap();
    }
    Counts aCounts;
    tToyModelDataRecordMaleFemaleComparisonFilePrintWriter.println(
            "ZoneCode,CAS001HPFemales,CAS001CEPFemales,CAS001Females,CASUV003Females,ToyModelFemales,ToyModelHPFemales,ToyModelCEPFemales,CAS001HPMales,CAS001CEPMales,CAS001Males,CASUV003Males,ToyModelMales,ToyModelHPMales,ToyModelCEPMales");
    TreeMap result = new TreeMap();
    while (tokenType != StreamTokenizer.TT_EOF) {
        switch (tokenType) {
        case StreamTokenizer.TT_WORD:
            aToyModelDataRecord2 = new ToyModelDataRecord_2(tToyModelDataHandler, tStreamTokenizer.sval);
            if (aggregateToMSOA) {
                aZoneCode = (String) tLookUpMSOAfromOAHashMap
                        .get(new String(aToyModelDataRecord2.getZone_Code()));
            } else {
                aZoneCode = String.valueOf(aToyModelDataRecord2.getZone_Code());
            }
            if (aToyModelDataRecord2.SEX == 0) {
                tFemaleCount = 1;
                if (aToyModelDataRecord2.tHouseholdID != -9) {
                    tFemaleHPCount = 1;
                    tFemaleCEPCount = 0;
                } else {
                    tFemaleHPCount = 0;
                    tFemaleCEPCount = 1;
                }
                tMaleCount = 0;
                tMaleHPCount = 0;
                tMaleCEPCount = 0;
            } else {
                tMaleCount = 1;
                if (aToyModelDataRecord2.tHouseholdID != -9) {
                    tMaleHPCount = 1;
                    tMaleCEPCount = 0;
                } else {
                    tMaleHPCount = 0;
                    tMaleCEPCount = 1;
                }
                tFemaleCount = 0;
                tFemaleHPCount = 0;
                tFemaleCEPCount = 0;
            }
            if (result.containsKey(aZoneCode)) {
                aCounts = (Counts) result.get(aZoneCode);
                result.remove(aZoneCode);
                aCounts.addToCounts(tMaleCount, tMaleCEPCount, tMaleHPCount, tFemaleCount, tFemaleCEPCount,
                        tFemaleHPCount);
                result.put(aZoneCode, aCounts);
            } else {
                aCounts = new Counts();
                aCounts.addToCounts(tMaleCount, tMaleCEPCount, tMaleHPCount, tFemaleCount, tFemaleCEPCount,
                        tFemaleHPCount);
                result.put(aZoneCode, aCounts);
            }
        }
        tokenType = tStreamTokenizer.nextToken();
    }
    Iterator aIterator = result.keySet().iterator();
    Object key;
    while (aIterator.hasNext()) {
        key = aIterator.next();
        aCounts = (Counts) result.get(key);
        aZoneCode = (String) key;
        aCASUV003DataRecord = (CASUV003DataRecord) tCASUV003DataHandler.getDataRecord(aZoneCode);
        aCAS001DataRecord = (CAS001DataRecord) tCAS001DataHandler.getDataRecord(aZoneCode);
        tToyModelDataRecordMaleFemaleComparisonFilePrintWriter.println("" + aZoneCode + ", "
                + aCAS001DataRecord.getHouseholdResidentsFemales() + ", "
                + aCAS001DataRecord.getCommunalEstablishmentResidentsFemales() + ", "
                + (aCAS001DataRecord.getHouseholdResidentsFemales()
                        + aCAS001DataRecord.getCommunalEstablishmentResidentsFemales())
                + ", " + aCASUV003DataRecord.getFemales() + ", " + aCounts.tFemaleCount + ", "
                + aCounts.tFemaleHPCount + ", " + aCounts.tFemaleCEPCount + ", "
                + aCAS001DataRecord.getHouseholdResidentsMales() + ", "
                + aCAS001DataRecord.getCommunalEstablishmentResidentsMales() + ", "
                + (aCAS001DataRecord.getHouseholdResidentsMales()
                        + aCAS001DataRecord.getCommunalEstablishmentResidentsMales())
                + ", " + aCASUV003DataRecord.getMales() + ", " + aCounts.tMaleCount + ", "
                + aCounts.tMaleHPCount + ", " + aCounts.tMaleCEPCount);
    }
    tBufferedReader.close();
    tToyModelDataRecordMaleFemaleComparisonFilePrintWriter.close();
}

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java

public void writeAggregateStatisticsForOptimisationConstraints_ISARHP_ISARCEP(String a_OutputDir_String)
        throws Exception {
    HashMap a_ID_RecordID_HashMap = _ISARDataHandler.get_ID_RecordID_HashMap();
    File optimisationConstraints_SARs = new File(a_OutputDir_String, "OptimisationConstraints_SARs.csv");
    FileOutputStream a_FileOutputStream = new FileOutputStream(optimisationConstraints_SARs);
    OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEPHeader(a_FileOutputStream);
    a_FileOutputStream.flush();/*from  w  ww . jav  a  2s . c  o  m*/
    Object[] fitnessCounts;
    HashMap<String, Integer> a_SARCounts = null;
    TreeSet<String> a_LADCodes_TreeSet = _CASDataHandler.getLADCodes_TreeSet();
    String s2;
    String s1;
    Iterator<String> a_Iterator_String = a_LADCodes_TreeSet.iterator();
    while (a_Iterator_String.hasNext()) {
        // Need to reorder data for each LAD as OAs not necessarily returned
        // in any order and an ordered result is wanted
        TreeMap<String, HashMap<String, Integer>> resultsForLAD = new TreeMap<String, HashMap<String, Integer>>();
        boolean setPrevious_OA_String = true;
        s1 = a_Iterator_String.next();
        s2 = s1.substring(0, 3);
        File resultsFile = new File(a_OutputDir_String + s2 + "/" + s1 + "/population.csv");
        // A few results are missing
        if (resultsFile.exists()) {
            System.out.println(resultsFile.toString() + " exists");
            String previous_OA_String = "";
            BufferedReader aBufferedReader = new BufferedReader(
                    new InputStreamReader(new FileInputStream(resultsFile)));
            StreamTokenizer aStreamTokenizer = new StreamTokenizer(aBufferedReader);
            Generic_StaticIO.setStreamTokenizerSyntax1(aStreamTokenizer);
            String line = "";
            int tokenType = aStreamTokenizer.nextToken();
            while (tokenType != StreamTokenizer.TT_EOF) {
                switch (tokenType) {
                case StreamTokenizer.TT_EOL:
                    //System.out.println(line);
                    String[] lineFields = line.split(",");
                    String a_OA_String = lineFields[0];
                    if (previous_OA_String.equalsIgnoreCase(a_OA_String)) {
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsHP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                        }
                    } else {
                        // Store result
                        if (setPrevious_OA_String) {
                            previous_OA_String = a_OA_String;
                            setPrevious_OA_String = false;
                        } else {
                            // Store
                            resultsForLAD.put(previous_OA_String, a_SARCounts);
                        }
                        // Initialise/Re-initialise
                        CASDataRecord a_CASDataRecord = (CASDataRecord) _CASDataHandler
                                .getDataRecord(a_OA_String);
                        fitnessCounts = GeneticAlgorithm_ISARHP_ISARCEP.getFitnessCounts(a_CASDataRecord);
                        a_SARCounts = (HashMap<String, Integer>) fitnessCounts[1];
                        // Start a new aggregation
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsHP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_ISARDataRecord.toString());
                        }
                        //a_OA_String = lineFields[0];
                    }
                    previous_OA_String = a_OA_String;
                    break;
                case StreamTokenizer.TT_WORD:
                    line = aStreamTokenizer.sval;
                    break;
                }
                tokenType = aStreamTokenizer.nextToken();
            }
        } else {
            System.out.println(resultsFile.toString() + " !exists");
        }
        Iterator<String> string_Iterator = resultsForLAD.keySet().iterator();
        while (string_Iterator.hasNext()) {
            String oa_Code = string_Iterator.next();
            a_SARCounts = resultsForLAD.get(oa_Code);
            //GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(null, a_ID_RecordID_HashMap, _Random)
            OutputDataHandler_OptimisationConstraints.writeISARHP_ISARCEP(a_SARCounts, oa_Code,
                    a_FileOutputStream);
        }
    }
    a_FileOutputStream.close();
}

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java

public void writeAggregateStatisticsForOptimisationConstraints_HSARHP_ISARCEP(String a_OutputDir_String)
        throws Exception {
    HashMap a_HID_HSARDataRecordVector_HashMap = _HSARDataHandler.get_HID_HSARDataRecordVector_HashMap();
    HashMap a_ID_RecordID_HashMap = _ISARDataHandler.get_ID_RecordID_HashMap();
    File optimisationConstraints_SARs = new File(a_OutputDir_String, "OptimisationConstraints_SARs.csv");
    FileOutputStream a_FileOutputStream = new FileOutputStream(optimisationConstraints_SARs);
    OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEPHeader(a_FileOutputStream);
    a_FileOutputStream.flush();//from  w w  w  . j a  v a2  s  . c  o  m
    HashMap<String, Integer> a_SARCounts = null;
    CASDataRecord a_CASDataRecord;
    TreeSet<String> a_LADCodes_TreeSet = _CASDataHandler.getLADCodes_TreeSet();
    String s2;
    String s1;
    Iterator<String> a_Iterator_String = a_LADCodes_TreeSet.iterator();
    while (a_Iterator_String.hasNext()) {
        // Need to reorder data for each LAD as OAs not necessarily returned
        // in any order and an ordered result is wanted
        TreeMap<String, HashMap<String, Integer>> resultsForLAD = new TreeMap<String, HashMap<String, Integer>>();
        boolean setPrevious_OA_String = true;
        s1 = a_Iterator_String.next();
        s2 = s1.substring(0, 3);
        File resultsFile = new File(a_OutputDir_String + s2 + "/" + s1 + "/population.csv");
        // A few results are missing
        if (resultsFile.exists()) {
            System.out.println(resultsFile.toString() + " exists");
            String previous_OA_String = "";
            BufferedReader aBufferedReader = new BufferedReader(
                    new InputStreamReader(new FileInputStream(resultsFile)));
            StreamTokenizer aStreamTokenizer = new StreamTokenizer(aBufferedReader);
            Generic_StaticIO.setStreamTokenizerSyntax1(aStreamTokenizer);
            String line = "";
            int tokenType = aStreamTokenizer.nextToken();
            while (tokenType != StreamTokenizer.TT_EOF) {
                switch (tokenType) {
                case StreamTokenizer.TT_EOL:
                    //System.out.println(line);
                    String[] lineFields = line.split(",");
                    String a_OA_String = lineFields[0];
                    if (previous_OA_String.equalsIgnoreCase(a_OA_String)) {
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            // From the id of a household get a Vector 
                            // of HSARDataRecords
                            Vector household = (Vector) a_HID_HSARDataRecordVector_HashMap
                                    .get(new Integer(lineFields[2]));
                            HSARDataRecord a_HSARDataRecord;
                            for (int i = 0; i < household.size(); i++) {
                                a_HSARDataRecord = (HSARDataRecord) household.elementAt(i);
                                GeneticAlgorithm_HSARHP_ISARCEP.addToCounts(a_HSARDataRecord, a_SARCounts,
                                        _Random);
                            }
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_HSARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                        }
                    } else {
                        // Store result
                        if (setPrevious_OA_String) {
                            previous_OA_String = a_OA_String;
                            setPrevious_OA_String = false;
                        } else {
                            // Store
                            resultsForLAD.put(previous_OA_String, a_SARCounts);
                        }
                        // Initialise/Re-initialise
                        a_CASDataRecord = (CASDataRecord) _CASDataHandler.getDataRecord(a_OA_String);
                        Object[] fitnessCounts = GeneticAlgorithm_HSARHP_ISARCEP
                                .getFitnessCounts(a_CASDataRecord);
                        a_SARCounts = (HashMap<String, Integer>) fitnessCounts[1];
                        // Start a new aggregation
                        if (lineFields[1].equalsIgnoreCase("HP")) {
                            //System.out.println("HP");
                            // From the id of a household get a Vector
                            // of HSARDataRecords
                            Vector household = (Vector) a_HID_HSARDataRecordVector_HashMap
                                    .get(new Integer(lineFields[2]));
                            HSARDataRecord a_HSARDataRecord;
                            for (int i = 0; i < household.size(); i++) {
                                a_HSARDataRecord = (HSARDataRecord) household.elementAt(i);
                                GeneticAlgorithm_HSARHP_ISARCEP.addToCounts(a_HSARDataRecord, a_SARCounts,
                                        _Random);
                            }
                            //System.out.println(a_HSARDataRecord.toString());
                        } else {
                            //System.out.println("CEP");
                            // From the id of the ISARDataRecord get the
                            // ISARRecordID.
                            long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2]));
                            ISARDataRecord a_ISARDataRecord = _ISARDataHandler
                                    .getISARDataRecord(a_ISARRecordID);
                            GeneticAlgorithm_HSARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts,
                                    _Random);
                            //System.out.println(a_ISARDataRecord.toString());
                        }
                        //a_OA_String = lineFields[0];
                    }
                    previous_OA_String = a_OA_String;
                    break;
                case StreamTokenizer.TT_WORD:
                    line = aStreamTokenizer.sval;
                    break;
                }
                tokenType = aStreamTokenizer.nextToken();
            }
        } else {
            System.out.println(resultsFile.toString() + " !exists");
        }
        Iterator<String> string_Iterator = resultsForLAD.keySet().iterator();
        while (string_Iterator.hasNext()) {
            String oa_Code = string_Iterator.next();
            OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEP(resultsForLAD.get(oa_Code), oa_Code,
                    a_FileOutputStream);
        }
    }
    a_FileOutputStream.close();
}

From source file:com.feilong.core.bean.ConvertUtilTest.java

/**
 * TestConvertUtilTest.//from   w  w  w.  j a va2s.  co  m
 * 
 * @throws IOException
 */
@Test
public void testConvertUtilTest5() throws IOException {
    StreamTokenizer streamTokenizer = new StreamTokenizer(new StringReader("abaBc^babac^cb//ab/*test*/"));
    streamTokenizer.whitespaceChars('^', '^'); // Set the delimiters
    streamTokenizer.lowerCaseMode(true);

    streamTokenizer.slashSlashComments(false);
    streamTokenizer.slashStarComments(false);
    // Split comma-delimited tokens into a List
    List<String> list = new ArrayList<String>();
    while (true) {
        int ttype = streamTokenizer.nextToken();
        if ((ttype == StreamTokenizer.TT_WORD) || (ttype > 0)) {
            if (streamTokenizer.sval != null) {
                list.add(streamTokenizer.sval);
            }
        } else if (ttype == StreamTokenizer.TT_EOF) {
            break;
        }
    }

    LOGGER.debug(JsonUtil.format(list));
}

From source file:Matrix.java

/**
 * Read a matrix from a stream. The format is the same the print method, so
 * printed matrices can be read back in (provided they were printed using US
 * Locale). Elements are separated by whitespace, all the elements for each
 * row appear on a single line, the last row is followed by a blank line.
 * //from w ww  . j  a  v a  2 s  .  c om
 * @param input
 *            the input stream.
 */

public static Matrix read(BufferedReader input) throws java.io.IOException {
    StreamTokenizer tokenizer = new StreamTokenizer(input);

    // Although StreamTokenizer will parse numbers, it doesn't recognize
    // scientific notation (E or D); however, Double.valueOf does.
    // The strategy here is to disable StreamTokenizer's number parsing.
    // We'll only get whitespace delimited words, EOL's and EOF's.
    // These words should all be numbers, for Double.valueOf to parse.

    tokenizer.resetSyntax();
    tokenizer.wordChars(0, 255);
    tokenizer.whitespaceChars(0, ' ');
    tokenizer.eolIsSignificant(true);
    java.util.Vector v = new java.util.Vector();

    // Ignore initial empty lines
    while (tokenizer.nextToken() == StreamTokenizer.TT_EOL)
        ;
    if (tokenizer.ttype == StreamTokenizer.TT_EOF)
        throw new java.io.IOException("Unexpected EOF on matrix read.");
    do {
        v.addElement(Double.valueOf(tokenizer.sval)); // Read & store 1st
        // row.
    } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD);

    int n = v.size(); // Now we've got the number of columns!
    double row[] = new double[n];
    for (int j = 0; j < n; j++)
        // extract the elements of the 1st row.
        row[j] = ((Double) v.elementAt(j)).doubleValue();
    v.removeAllElements();
    v.addElement(row); // Start storing rows instead of columns.
    while (tokenizer.nextToken() == StreamTokenizer.TT_WORD) {
        // While non-empty lines
        v.addElement(row = new double[n]);
        int j = 0;
        do {
            if (j >= n)
                throw new java.io.IOException("Row " + v.size() + " is too long.");
            row[j++] = Double.valueOf(tokenizer.sval).doubleValue();
        } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD);
        if (j < n)
            throw new java.io.IOException("Row " + v.size() + " is too short.");
    }
    int m = v.size(); // Now we've got the number of rows.
    double[][] A = new double[m][];
    v.copyInto(A); // copy the rows out of the vector
    return new Matrix(A);
}

From source file:com.ecyrd.jspwiki.plugin.PluginManager.java

/**
 *  Parses plugin arguments.  Handles quotes and all other kewl stuff.
 *
 *  <h3>Special parameters</h3>
 *  The plugin body is put into a special parameter defined by {@link #PARAM_BODY};
 *  the plugin's command line into a parameter defined by {@link #PARAM_CMDLINE};
 *  and the bounds of the plugin within the wiki page text by a parameter defined
 *  by {@link #PARAM_BOUNDS}, whose value is stored as a two-element int[] array,
 *  i.e., <tt>[start,end]</tt>.
 *
 * @param argstring The argument string to the plugin.  This is
 *  typically a list of key-value pairs, using "'" to escape
 *  spaces in strings, followed by an empty line and then the
 *  plugin body.  In case the parameter is null, will return an
 *  empty parameter list./*from   ww  w .  j  a va2 s.c o m*/
 *
 * @return A parsed list of parameters.
 *
 * @throws IOException If the parsing fails.
 */
public Map parseArgs(String argstring) throws IOException {
    HashMap<String, Object> arglist = new HashMap<String, Object>();

    //
    //  Protection against funny users.
    //
    if (argstring == null)
        return arglist;

    arglist.put(PARAM_CMDLINE, argstring);

    StringReader in = new StringReader(argstring);
    StreamTokenizer tok = new StreamTokenizer(in);
    int type;

    String param = null;
    String value = null;

    tok.eolIsSignificant(true);

    boolean potentialEmptyLine = false;
    boolean quit = false;

    while (!quit) {
        String s;

        type = tok.nextToken();

        switch (type) {
        case StreamTokenizer.TT_EOF:
            quit = true;
            s = null;
            break;

        case StreamTokenizer.TT_WORD:
            s = tok.sval;
            potentialEmptyLine = false;
            break;

        case StreamTokenizer.TT_EOL:
            quit = potentialEmptyLine;
            potentialEmptyLine = true;
            s = null;
            break;

        case StreamTokenizer.TT_NUMBER:
            s = Integer.toString((int) tok.nval);
            potentialEmptyLine = false;
            break;

        case '\'':
            s = tok.sval;
            break;

        default:
            s = null;
        }

        //
        //  Assume that alternate words on the line are
        //  parameter and value, respectively.
        //
        if (s != null) {
            if (param == null) {
                param = s;
            } else {
                value = s;

                arglist.put(param, value);

                // log.debug("ARG: "+param+"="+value);
                param = null;
            }
        }
    }

    //
    //  Now, we'll check the body.
    //

    if (potentialEmptyLine) {
        StringWriter out = new StringWriter();
        FileUtil.copyContents(in, out);

        String bodyContent = out.toString();

        if (bodyContent != null) {
            arglist.put(PARAM_BODY, bodyContent);
        }
    }

    return arglist;
}

From source file:com.rapidminer.tools.Tools.java

/** Delivers the next token and checks for an unexpected end of line or file. */
public static void getNextToken(StreamTokenizer tokenizer) throws IOException {
    if (tokenizer.nextToken() == StreamTokenizer.TT_EOL) {
        throw new IOException("unexpected end of line " + tokenizer.lineno());
    }/*from   w ww .  j  a va2  s.  c o  m*/

    if (tokenizer.ttype == StreamTokenizer.TT_EOF) {
        throw new IOException("unexpected end of file in line " + tokenizer.lineno());
    } else if (tokenizer.ttype == '\'' || tokenizer.ttype == '"') {
        tokenizer.ttype = StreamTokenizer.TT_WORD;
    } else if (tokenizer.ttype == StreamTokenizer.TT_WORD && tokenizer.sval.equals("?")) {
        tokenizer.ttype = '?';
    }
}