Example usage for java.io StreamTokenizer TT_EOF

List of usage examples for java.io StreamTokenizer TT_EOF

Introduction

In this page you can find the example usage for java.io StreamTokenizer TT_EOF.

Prototype

int TT_EOF

To view the source code for java.io StreamTokenizer TT_EOF.

Click Source Link

Document

A constant indicating that the end of the stream has been read.

Usage

From source file:Counter.java

public void countWords() {
    try {//  ww w.ja va 2s. c o  m
        while (st.nextToken() != StreamTokenizer.TT_EOF) {
            String s;
            switch (st.ttype) {
            case StreamTokenizer.TT_EOL:
                s = new String("EOL");
                break;
            case StreamTokenizer.TT_NUMBER:
                s = Double.toString(st.nval);
                break;
            case StreamTokenizer.TT_WORD:
                s = st.sval; // Already a String
                break;
            default: // single character in ttype
                s = String.valueOf((char) st.ttype);
            }
            if (counts.containsKey(s))
                ((Counter) counts.get(s)).increment();
            else
                counts.put(s, new Counter());
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.denimgroup.threadfix.framework.filefilter.ClassAnnotationBasedFileFilter.java

@Override
public boolean accept(@Nullable File file) {
    boolean returnValue = false;
    boolean hasArroba = false;

    if (file != null && file.exists() && file.isFile() && file.getName().endsWith(".java")) {
        Reader reader = null;/*from  w  w  w. j a  va2  s . c  o  m*/

        try {

            reader = new InputStreamReader(new FileInputStream(file), "UTF-8");

            StreamTokenizer tokenizer = new StreamTokenizer(reader);
            tokenizer.slashSlashComments(true);
            tokenizer.slashStarComments(true);

            while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) {
                if (hasArroba && tokenizer.sval != null && getClassAnnotations().contains(tokenizer.sval)) {
                    returnValue = true;
                    break;
                } else if (tokenizer.sval != null && tokenizer.sval.equals("class")) {
                    // we've gone too far
                    break;
                }

                hasArroba = tokenizer.ttype == '@';
            }
        } catch (IOException e) {
            log.warn("Encountered IOException while tokenizing file.", e);
        } finally {
            if (reader != null) {
                try {
                    reader.close();
                } catch (IOException e) {
                    log.error("Encountered IOException while attempting to close file.", e);
                }
            }
        }
    }

    return returnValue;
}

From source file:ScanStreamTok.java

protected void process() throws IOException {
    String s = null;/*ww w .  ja  va 2 s.c o  m*/
    int i;

    while ((i = tf.nextToken()) != StreamTokenizer.TT_EOF) {
        switch (i) {
        case StreamTokenizer.TT_EOF:
            System.out.println("End of file");
            break;
        case StreamTokenizer.TT_EOL:
            System.out.println("End of line");
            break;
        case StreamTokenizer.TT_NUMBER:
            System.out.println("Number " + tf.nval);
            break;
        case StreamTokenizer.TT_WORD:
            System.out.println("Word, length " + tf.sval.length() + "->" + tf.sval);
            break;
        default:
            System.out.println("What is it? i = " + i);
        }
    }
}

From source file:SimpleCalcStreamTok.java

protected void doCalc() throws IOException {
    int iType;//  w  w w  .  j  ava 2 s.  co m
    double tmp;

    while ((iType = tf.nextToken()) != StreamTokenizer.TT_EOF) {
        switch (iType) {
        case StreamTokenizer.TT_NUMBER: // Found a number, push value to
            // stack
            push(tf.nval);
            break;
        case StreamTokenizer.TT_WORD:
            // Found a variable, save its name. Not used here.
            variable = tf.sval;
            break;
        case '+':
            // + operator is commutative.
            push(pop() + pop());
            break;
        case '-':
            // - operator: order matters.
            tmp = pop();
            push(pop() - tmp);
            break;
        case '*':
            // Multiply is commutative
            push(pop() * pop());
            break;
        case '/':
            // Handle division carefully: order matters!
            tmp = pop();
            push(pop() / tmp);
            break;
        case '=':
            out.println(peek());
            break;
        default:
            out.println("What's this? iType = " + iType);
        }
    }
}

From source file:com.feilong.core.bean.ConvertUtilTest.java

/**
 * TestConvertUtilTest./*from w  ww. j av  a2 s.c o m*/
 * 
 * @throws IOException
 */
@Test
public void testConvertUtilTest5() throws IOException {
    StreamTokenizer streamTokenizer = new StreamTokenizer(new StringReader("abaBc^babac^cb//ab/*test*/"));
    streamTokenizer.whitespaceChars('^', '^'); // Set the delimiters
    streamTokenizer.lowerCaseMode(true);

    streamTokenizer.slashSlashComments(false);
    streamTokenizer.slashStarComments(false);
    // Split comma-delimited tokens into a List
    List<String> list = new ArrayList<String>();
    while (true) {
        int ttype = streamTokenizer.nextToken();
        if ((ttype == StreamTokenizer.TT_WORD) || (ttype > 0)) {
            if (streamTokenizer.sval != null) {
                list.add(streamTokenizer.sval);
            }
        } else if (ttype == StreamTokenizer.TT_EOF) {
            break;
        }
    }

    LOGGER.debug(JsonUtil.format(list));
}

From source file:net.duckling.ddl.service.render.dml.ParseHtmlImg.java

public Map parseArgs(String argstring) throws IOException {
    HashMap<String, String> arglist = new HashMap<String, String>();

    ///* www .  ja v a2 s  .c o m*/
    //  Protection against funny users.
    //
    if (argstring == null) {
        return arglist;
    }

    StringReader in = new StringReader(argstring);
    StreamTokenizer tok = new StreamTokenizer(in);
    int type;

    String param = null;
    String value = null;

    tok.eolIsSignificant(true);

    boolean potentialEmptyLine = false;
    boolean quit = false;

    while (!quit) {
        String s;

        type = tok.nextToken();

        switch (type) {
        case StreamTokenizer.TT_EOF:
            quit = true;
            s = null;
            break;

        case StreamTokenizer.TT_WORD:
            s = tok.sval;
            potentialEmptyLine = false;
            break;

        case StreamTokenizer.TT_EOL:
            quit = potentialEmptyLine;
            potentialEmptyLine = true;
            s = null;
            break;

        case StreamTokenizer.TT_NUMBER:
            s = Integer.toString(new Double(tok.nval).intValue());
            potentialEmptyLine = false;
            break;

        case '\'':
            s = tok.sval;
            break;

        default:
            s = null;
        }

        //
        //  Assume that alternate words on the line are
        //  parameter and value, respectively.
        //
        if (s != null) {
            if (param == null) {
                param = s;
            } else {
                value = s;
                arglist.put(param, value);
                param = null;
            }
        }
    }

    //
    //  Now, we'll check the body.
    //

    if (potentialEmptyLine) {
        StringWriter out = new StringWriter();
        FileUtil.copyContents(in, out);

        String bodyContent = out.toString();

        if (bodyContent != null) {
            arglist.put(PARAM_BODY, bodyContent);
        }
    }

    return arglist;
}

From source file:gda.device.detector.mythen.data.MythenDataFileUtils.java

protected static double[][] getDataFromReaderUsingStreamTokenizer(Reader r, FileType type) throws IOException {
    try {/*  w  w  w.  j  ava  2  s. com*/
        List<double[]> data = new Vector<double[]>();
        StreamTokenizer st = new StreamTokenizer(r);
        while (st.nextToken() != StreamTokenizer.TT_EOF) {
            double angle = st.nval;
            st.nextToken();
            double count = st.nval;
            //st.nextToken();
            if (type == FileType.PROCESSED) {
                st.nextToken();
                double error = st.nval;
                data.add(new double[] { angle, count, error });
            } else if (type == FileType.PROCESSED_WITH_CHANNELS) {
                st.nextToken();
                double error = st.nval;
                double channel = st.nval;
                data.add(new double[] { angle, count, error, channel });
            } else {
                data.add(new double[] { angle, count });
            }
        }
        return data.toArray(new double[data.size()][]);
    } finally {
        try {
            r.close();
        } catch (IOException e) {
            // ignore
        }
    }
}

From source file:com.github.lindenb.jvarkit.tools.biostar.Biostar103303.java

private void readGTF(String uri, SAMSequenceDictionary dict) throws IOException {
    int count_exons = 0;
    final Set<String> unknown = new HashSet<String>();
    LOG.info("Reading " + uri);
    final Pattern tab = Pattern.compile("[\t]");
    final Map<String, GTFGene> transcript2gene = new HashMap<String, GTFGene>();
    LineIterator iter = IOUtils.openURIForLineIterator(uri);
    while (iter.hasNext()) {
        String line = iter.next();
        if (line.startsWith("#"))
            continue;
        String tokens[] = tab.split(line);
        if (tokens.length < 9)
            continue;
        if (!tokens[2].equals("exon"))
            continue;
        if (dict.getSequence(tokens[0]) == null) {
            if (!unknown.contains(tokens[0])) {
                LOG.warn("chromosome in " + line + " not in SAMSequenceDictionary ");
                unknown.add(tokens[0]);/*w w w  .  j  a va2s. c  o  m*/
            }
            continue;
        }
        String transcript_id = null, gene_id = null, gene_name = null, exon_id = null;
        StreamTokenizer st = new StreamTokenizer(new StringReader(tokens[8]));
        st.wordChars('_', '_');
        String key = null;
        while (st.nextToken() != StreamTokenizer.TT_EOF) {
            String s = null;
            switch (st.ttype) {
            case StreamTokenizer.TT_NUMBER:
                s = String.valueOf(st.nval);
                break;
            case '"':
            case '\'':
            case StreamTokenizer.TT_WORD:
                s = st.sval;
                break;
            case ';':
                break;
            default:
                break;
            }
            if (s == null)
                continue;
            if (key == null) {
                key = s;
            } else {
                if (key.equals("transcript_id")) {
                    transcript_id = s;
                } else if (key.equals("gene_id")) {
                    gene_id = s;
                } else if (key.equals("gene_name")) {
                    gene_name = s;
                } else if (key.equals("exon_id")) {
                    exon_id = s;
                }
                key = null;
            }
        }
        if (transcript_id == null || transcript_id.isEmpty())
            continue;
        GTFGene gene = transcript2gene.get(tokens[0] + " " + transcript_id);
        if (gene == null) {
            gene = new GTFGene();
            gene.transcript_id = transcript_id;
            gene.gene_id = gene_id;
            gene.gene_name = gene_name;
            gene.chrom = tokens[0];
            transcript2gene.put(tokens[0] + " " + transcript_id, gene);
        }
        GTFGene.Exon exon = gene.createExon(Integer.parseInt(tokens[3]), Integer.parseInt(tokens[4]));
        exon.exon_id = exon_id;
    }
    CloserUtil.close(iter);

    for (GTFGene g : transcript2gene.values()) {
        Collections.sort(g.exons, new Comparator<GTFGene.Exon>() {
            @Override
            public int compare(GTFGene.Exon o1, GTFGene.Exon o2) {
                return o1.start - o2.start;
            }
        });
        for (int i = 0; i < g.exons.size(); ++i) {

            GTFGene.Exon exon = g.exons.get(i);
            exon.index = i;

            if (i > 0) {
                GTFGene.Exon prev = g.exons.get(i - 1);
                if (prev.end >= exon.start) {
                    throw new IOException("exons " + (i) + " and " + (i + 1) + " overlap in " + g);
                }
            }

            Interval interval = new Interval(g.chrom, exon.start, exon.end);
            List<GTFGene.Exon> L = exonMap.get(interval);
            if (L == null) {
                L = new ArrayList<GTFGene.Exon>(1);
                exonMap.put(interval, L);
            }
            L.add(exon);
            ++count_exons;
        }
    }
    LOG.info("End Reading " + uri + " N=" + count_exons);
}

From source file:com.xpn.xwiki.util.Util.java

/**
 * Create a Map from a string holding a space separated list of key=value pairs. If keys or values must contain
 * spaces, they can be placed inside quotes, like <code>"this key"="a larger value"</code>. To use a quote as part
 * of a key/value, use <code>%_Q_%</code>.
 * /*from   ww  w . j  av a2 s.  c  o m*/
 * @param mapString The string that must be parsed.
 * @return A Map containing the keys and values. If a key is defined more than once, the last value is used.
 */
public static Hashtable<String, String> keyValueToHashtable(String mapString) throws IOException {
    Hashtable<String, String> result = new Hashtable<String, String>();
    StreamTokenizer st = new StreamTokenizer(new BufferedReader(new StringReader(mapString)));
    st.resetSyntax();
    st.quoteChar('"');
    st.wordChars('a', 'z');
    st.wordChars('A', 'Z');
    st.whitespaceChars(' ', ' ');
    st.whitespaceChars('=', '=');
    while (st.nextToken() != StreamTokenizer.TT_EOF) {
        String key = st.sval;
        st.nextToken();
        String value = (st.sval != null) ? st.sval : "";
        result.put(key, restoreValue(value));
    }
    return result;
}

From source file:keel.Algorithms.Decision_Trees.C45.C45.java

/** Function to read the options from the execution file and assign the values to the parameters.
 *
 * @param options       The StreamTokenizer that reads the parameters file.
 *
 * @throws Exception   If the format of the file is not correct.
 *//*w w  w .  j a  v a2 s.c o m*/
protected void setOptions(StreamTokenizer options) throws Exception {
    options.nextToken();

    /* Checks that the file starts with the token algorithm */
    if (options.sval.equalsIgnoreCase("algorithm")) {
        options.nextToken();
        options.nextToken();

        //if (!options.sval.equalsIgnoreCase( "C4.5" ) )
        //   throw new Exception( "The name of the algorithm is not correct." );

        options.nextToken();
        options.nextToken();
        options.nextToken();
        options.nextToken();

        /* Reads the names of the input files*/
        if (options.sval.equalsIgnoreCase("inputData")) {
            options.nextToken();
            options.nextToken();
            modelFileName = options.sval;

            if (options.nextToken() != StreamTokenizer.TT_EOL) {
                trainFileName = options.sval;
                options.nextToken();
                testFileName = options.sval;
                if (options.nextToken() != StreamTokenizer.TT_EOL) {
                    trainFileName = modelFileName;
                    options.nextToken();
                }
            }

        } else {
            throw new Exception("No file test provided.");
        }

        /* Reads the names of the output files*/
        while (true) {
            if (options.nextToken() == StreamTokenizer.TT_EOF) {
                throw new Exception("No output file provided.");
            }

            if (options.sval == null) {
                continue;
            } else if (options.sval.equalsIgnoreCase("outputData")) {
                break;
            }
        }

        options.nextToken();
        options.nextToken();
        trainOutputFileName = options.sval;
        options.nextToken();
        testOutputFileName = options.sval;
        options.nextToken();
        resultFileName = options.sval;

        if (!getNextToken(options)) {
            return;
        }

        while (options.ttype != StreamTokenizer.TT_EOF) {
            /* Reads the prune parameter */
            if (options.sval.equalsIgnoreCase("pruned")) {
                options.nextToken();
                options.nextToken();

                if (options.sval.equalsIgnoreCase("TRUE")) {
                    prune = true;
                } else {
                    //prune = false;
                    prune = true;
                }
            }

            /* Reads the confidence parameter */
            if (options.sval.equalsIgnoreCase("confidence")) {
                if (!prune) {
                    throw new Exception("Doesn't make sense to change confidence for prune " + "tree!");
                }

                options.nextToken();
                options.nextToken();

                /* Checks that the confidence threshold is between 0 and 1. */
                float cf = Float.parseFloat(options.sval);

                if (cf <= 1 || cf >= 0) {
                    confidence = Float.parseFloat(options.sval);
                }
            }

            /* Reads the itemsets per leaf parameter */
            if (options.sval.equalsIgnoreCase("itemsetsPerLeaf")) {
                options.nextToken();
                options.nextToken();

                if (Integer.parseInt(options.sval) > 0) {
                    minItemsets = Integer.parseInt(options.sval);
                }
            }

            getNextToken(options);
        }
    }
}