Example usage for org.apache.hadoop.mapred Reporter Reporter

List of usage examples for org.apache.hadoop.mapred Reporter Reporter

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred Reporter Reporter.

Prototype

Reporter

Source Link

Usage

From source file:edu.uci.ics.asterix.external.indexing.input.AbstractHDFSReader.java

License:Apache License

protected Reporter getReporter() {
    Reporter reporter = new Reporter() {

        @Override//from  w w  w . ja  v a 2s.  c o  m
        public Counter getCounter(Enum<?> arg0) {
            return null;
        }

        @Override
        public Counter getCounter(String arg0, String arg1) {
            return null;
        }

        @Override
        public InputSplit getInputSplit() throws UnsupportedOperationException {
            return null;
        }

        @Override
        public void incrCounter(Enum<?> arg0, long arg1) {
        }

        @Override
        public void incrCounter(String arg0, String arg1, long arg2) {
        }

        @Override
        public void setStatus(String arg0) {
        }

        @Override
        public void progress() {
        }

        public float getProgress() {
            return 0.0f;
        }
    };

    return reporter;
}

From source file:edu.uci.ics.asterix.external.indexing.input.TextualFullScanDataReader.java

License:Apache License

private Reporter getReporter() {
    Reporter reporter = new Reporter() {

        @Override//w  ww  .ja v a2  s  .  com
        public Counter getCounter(Enum<?> arg0) {
            return null;
        }

        @Override
        public Counter getCounter(String arg0, String arg1) {
            return null;
        }

        @Override
        public InputSplit getInputSplit() throws UnsupportedOperationException {
            return null;
        }

        @Override
        public void incrCounter(Enum<?> arg0, long arg1) {
        }

        @Override
        public void incrCounter(String arg0, String arg1, long arg2) {
        }

        @Override
        public void setStatus(String arg0) {
        }

        @Override
        public void progress() {
        }

        @Override
        public float getProgress() {
            return 0.0f;
        }
    };

    return reporter;
}

From source file:edu.uci.ics.hyracks.dataflow.hadoop.AbstractHadoopOperatorDescriptor.java

License:Apache License

protected Reporter createReporter() {
    return new Reporter() {
        @Override/*from w ww . j  a v a2  s .c  om*/
        public Counter getCounter(Enum<?> name) {
            return null;
        }

        @Override
        public Counter getCounter(String group, String name) {
            return null;
        }

        @Override
        public InputSplit getInputSplit() throws UnsupportedOperationException {
            return null;
        }

        @Override
        public void incrCounter(Enum<?> key, long amount) {

        }

        @Override
        public void incrCounter(String group, String counter, long amount) {

        }

        @Override
        public void progress() {

        }

        @Override
        public void setStatus(String status) {

        }

        @Override
        public float getProgress() {
            return 0.0f;
        }
    };
}

From source file:edu.uci.ics.hyracks.dataflow.hadoop.HadoopReadOperatorDescriptor.java

License:Apache License

protected Reporter createReporter() {
    return new Reporter() {
        @Override/*w w  w  .  j av a 2 s.  co m*/
        public Counter getCounter(Enum<?> name) {
            return null;
        }

        @Override
        public Counter getCounter(String group, String name) {
            return null;
        }

        @Override
        public InputSplit getInputSplit() throws UnsupportedOperationException {
            return null;
        }

        @Override
        public void incrCounter(Enum<?> key, long amount) {

        }

        @Override
        public void incrCounter(String group, String counter, long amount) {

        }

        @Override
        public void progress() {

        }

        @Override
        public void setStatus(String status) {

        }

        @Override
        public float getProgress() {
            // TODO Auto-generated method stub
            return 0;
        }
    };
}

From source file:edu.uci.ics.hyracks.hadoop.compat.util.Utilities.java

License:Apache License

public static Reporter createReporter() {
    Reporter reporter = new Reporter() {

        @Override// w ww  .ja  v a  2  s . c o m
        public void progress() {

        }

        @Override
        public void setStatus(String arg0) {

        }

        @Override
        public void incrCounter(String arg0, String arg1, long arg2) {

        }

        @Override
        public void incrCounter(Enum<?> arg0, long arg1) {

        }

        @Override
        public InputSplit getInputSplit() throws UnsupportedOperationException {
            return null;
        }

        @Override
        public Counter getCounter(String arg0, String arg1) {
            return null;
        }

        @Override
        public Counter getCounter(Enum<?> arg0) {
            return null;
        }

        @Override
        public float getProgress() {
            // TODO Auto-generated method stub
            return 0f;
        }
    };
    return reporter;
}

From source file:org.terrier.structures.indexing.singlepass.hadoop.BitPostingIndexInputFormat.java

License:Mozilla Public License

/** Test method, runs splits for inverted/lexicon with the command line specified index */
public static void main(String[] args) throws Exception {
    Index.setIndexLoadingProfileAsRetrieval(false);
    IndexOnDisk index = Index.createIndex(args[1], args[2]);
    if (args[0].equals("--splits")) {
        JobConf job = HadoopPlugin.getJobFactory(BitPostingIndexInputFormat.class.getSimpleName()).newJob();
        HadoopUtility.toHConfiguration(index, job);
        setStructures(job, "inverted", "lexicon");
        index.close();/*from  w  w w .j  av a 2s.co m*/
        new BitPostingIndexInputFormat().getSplits(job, 100);
    } else {
        JobConf job = HadoopPlugin.getJobFactory(BitPostingIndexInputFormat.class.getSimpleName()).newJob();
        setStructures(job, "linksin", "linksin-lookup");
        HadoopUtility.toHConfiguration(index, job);
        index.close();
        InputSplit s = new BitPostingIndexInputSplit(new Path(args[3]), Long.parseLong(args[4]),
                Long.parseLong(args[5]), new String[0], Integer.parseInt(args[6]), Integer.parseInt(args[7]));
        RecordReader<IntWritable, IntObjectWrapper<IterablePosting>> rr = new BitPostingIndexInputFormat()
                .getRecordReader(s, job, new Reporter() {
                    public InputSplit getInputSplit() throws UnsupportedOperationException {
                        return null;
                    }

                    @SuppressWarnings({ "rawtypes" })
                    public void incrCounter(Enum arg0, long arg1) {
                    }

                    public void incrCounter(String arg0, String arg1, long arg2) {
                    }

                    @SuppressWarnings({ "rawtypes" })
                    public org.apache.hadoop.mapred.Counters.Counter getCounter(Enum arg0) {
                        return null;
                    }

                    public org.apache.hadoop.mapred.Counters.Counter getCounter(String arg0, String arg1) {
                        return null;
                    }

                    public void setStatus(String arg0) {
                    }

                    public void progress() {
                    }
                });
        IntWritable key = rr.createKey();
        IntObjectWrapper<IterablePosting> value = rr.createValue();
        long pointers = 0;
        int lastId = 0;
        int nonZeroEntryCount = 0;
        float maxProgress = 0;
        while (rr.next(key, value)) {
            IterablePosting ip = value.getObject();
            lastId = key.get();
            while (ip.next() != IterablePosting.EOL) {
                pointers++;
            }
            nonZeroEntryCount++;
            if (rr.getProgress() > maxProgress)
                maxProgress = rr.getProgress();
        }
        rr.close();
        System.out.println("maxProgress=" + maxProgress + " Lastid=" + lastId + " nonZeroEntryCount="
                + nonZeroEntryCount + " postings=" + pointers);
    }
}

From source file:org.wikimedia.wikihadoop.TestStreamWikiDumpInputFormat.java

License:Apache License

private static Reporter getStderrReporter() {
    return new Reporter() {
        @Override/* w  w w  .  jav  a2s.c o  m*/
        public void setStatus(String s) {
            System.err.println(s);
        }

        @Override
        public void progress() {
        }

        public float getProgress() {
            return 0;
        }

        @Override
        public Counters.Counter getCounter(Enum<?> name) {
            return null;
        }

        @Override
        public Counters.Counter getCounter(String group, String name) {
            return null;
        }

        @Override
        public void incrCounter(Enum<?> key, long amount) {
            //System.err.println(key.toString() + " is incremented by " + amount);
        }

        @Override
        public void incrCounter(String group, String counter, long amount) {
            //System.err.println(group.toString() + " " + counter + " is incremented by " + amount);
        }

        @Override
        public InputSplit getInputSplit() throws UnsupportedOperationException {
            throw new UnsupportedOperationException();
        }
    };
}