Example usage for org.apache.hadoop.io LongWritable get

List of usage examples for org.apache.hadoop.io LongWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io LongWritable get.

Prototype

public long get() 

Source Link

Document

Return the value of this LongWritable.

Usage

From source file:com.facebook.LinkBench.LinkBenchDriverMR.java

License:Apache License

/**
 * read output from the map reduce job// w ww  .j a v a2  s .c  o m
 * @param fs the DFS FileSystem
 * @param jobconf configuration of the map reduce job
 */
public static long readOutput(FileSystem fs, JobConf jobconf) throws IOException, InterruptedException {
    //read outputs
    final Path outdir = new Path(TMP_DIR, "out");
    Path infile = new Path(outdir, "reduce-out");
    IntWritable nworkers = new IntWritable();
    LongWritable result = new LongWritable();
    long output = 0;
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, infile, jobconf);
    try {
        reader.next(nworkers, result);
        output = result.get();
    } finally {
        reader.close();
    }
    return output;
}

From source file:com.facebook.presto.hive.DwrfHiveRecordCursor.java

License:Apache License

private void parseLongColumn(int column) {
    // don't include column number in message because it causes boxing which is expensive here
    checkArgument(!isPartitionColumn[column], "Column is a partition key");

    loaded[column] = true;/*from   www.j a va 2  s  .c o m*/
    Object object = getMaterializedValue(column);
    if (object == null) {
        nulls[column] = true;
    } else {
        nulls[column] = false;

        HiveType type = hiveTypes[column];
        if (hiveTypes[column].equals(HIVE_SHORT)) {
            ShortWritable shortWritable = checkWritable(object, ShortWritable.class);
            longs[column] = shortWritable.get();
        } else if (hiveTypes[column].equals(HIVE_TIMESTAMP)) {
            TimestampWritable timestampWritable = (TimestampWritable) object;
            long seconds = timestampWritable.getSeconds();
            int nanos = timestampWritable.getNanos();
            longs[column] = (seconds * 1000) + (nanos / 1_000_000) + timeZoneCorrection;
        } else if (hiveTypes[column].equals(HIVE_BYTE)) {
            ByteWritable byteWritable = checkWritable(object, ByteWritable.class);
            longs[column] = byteWritable.get();
        } else if (hiveTypes[column].equals(HIVE_INT)) {
            IntWritable intWritable = checkWritable(object, IntWritable.class);
            longs[column] = intWritable.get();
        } else if (hiveTypes[column].equals(HIVE_LONG)) {
            LongWritable longWritable = checkWritable(object, LongWritable.class);
            longs[column] = longWritable.get();
        } else {
            throw new RuntimeException(String.format("%s is not a valid LONG type", type));
        }
    }
}

From source file:com.facebook.presto.hive.orc.OrcHiveRecordCursor.java

License:Apache License

private void parseLongColumn(int column) {
    // don't include column number in message because it causes boxing which is expensive here
    checkArgument(!isPartitionColumn[column], "Column is a partition key");

    loaded[column] = true;/* ww  w .  j  a  va 2 s  .  co  m*/
    Object object = getFieldValue(row, hiveColumnIndexes[column]);
    if (object == null) {
        nulls[column] = true;
    } else {
        nulls[column] = false;

        HiveType type = hiveTypes[column];
        if (hiveTypes[column].equals(HIVE_SHORT)) {
            ShortWritable shortWritable = (ShortWritable) object;
            longs[column] = shortWritable.get();
        } else if (hiveTypes[column].equals(HIVE_DATE)) {
            longs[column] = ((DateWritable) object).getDays();
        } else if (hiveTypes[column].equals(HIVE_TIMESTAMP)) {
            TimestampWritable timestampWritable = (TimestampWritable) object;
            long seconds = timestampWritable.getSeconds();
            int nanos = timestampWritable.getNanos();
            longs[column] = (seconds * 1000) + (nanos / 1_000_000) + timeZoneCorrection;
        } else if (hiveTypes[column].equals(HIVE_BYTE)) {
            ByteWritable byteWritable = (ByteWritable) object;
            longs[column] = byteWritable.get();
        } else if (hiveTypes[column].equals(HIVE_INT)) {
            IntWritable intWritable = (IntWritable) object;
            longs[column] = intWritable.get();
        } else if (hiveTypes[column].equals(HIVE_LONG)) {
            LongWritable longWritable = (LongWritable) object;
            longs[column] = longWritable.get();
        } else {
            throw new RuntimeException(String.format("%s is not a valid LONG type", type));
        }
    }
}

From source file:com.facebook.presto.hive.OrcHiveRecordCursor.java

License:Apache License

private void parseLongColumn(int column) {
    // don't include column number in message because it causes boxing which is expensive here
    checkArgument(!isPartitionColumn[column], "Column is a partition key");

    loaded[column] = true;//from  ww w.  j a  v  a  2  s  . com
    Object object = getFieldValue(row, hiveColumnIndexes[column]);
    if (object == null) {
        nulls[column] = true;
    } else {
        nulls[column] = false;

        HiveType type = hiveTypes[column];
        if (hiveTypes[column].equals(HIVE_SHORT)) {
            ShortWritable shortWritable = (ShortWritable) object;
            longs[column] = shortWritable.get();
        } else if (hiveTypes[column].equals(HIVE_DATE)) {
            longs[column] = ((DateWritable) object).getDays() * MILLIS_IN_DAY;
        } else if (hiveTypes[column].equals(HIVE_TIMESTAMP)) {
            TimestampWritable timestampWritable = (TimestampWritable) object;
            long seconds = timestampWritable.getSeconds();
            int nanos = timestampWritable.getNanos();
            longs[column] = (seconds * 1000) + (nanos / 1_000_000) + timeZoneCorrection;
        } else if (hiveTypes[column].equals(HIVE_BYTE)) {
            ByteWritable byteWritable = (ByteWritable) object;
            longs[column] = byteWritable.get();
        } else if (hiveTypes[column].equals(HIVE_INT)) {
            IntWritable intWritable = (IntWritable) object;
            longs[column] = intWritable.get();
        } else if (hiveTypes[column].equals(HIVE_LONG)) {
            LongWritable longWritable = (LongWritable) object;
            longs[column] = longWritable.get();
        } else {
            throw new RuntimeException(String.format("%s is not a valid LONG type", type));
        }
    }
}

From source file:com.foursquare.twofishes.io.MapFileConcurrentReader.java

License:Apache License

private void readIndex() throws IOException {
    // read the index entirely into memory
    if (this.keys != null)
        return;//  w w  w.  jav a  2  s  .co m
    this.count = 0;

    this.positions = new long[1024];

    try {
        int skip = INDEX_SKIP;
        LongWritable position = new LongWritable();
        WritableComparable lastKey = null;
        long lastIndex = -1;
        ArrayList<WritableComparable> keyBuilder = new ArrayList<WritableComparable>(1024);
        while (true) {
            WritableComparable k = comparator.newKey();

            if (!index.next(k, position))
                break;

            // check order to make sure comparator is compatible
            if (lastKey != null && comparator.compare(lastKey, k) > 0)
                throw new IOException("key out of order: " + k + " after " + lastKey);
            lastKey = k;
            if (skip > 0) {
                skip--;
                continue; // skip this entry
            } else {
                skip = INDEX_SKIP; // reset skip
            }

            // don't read an index that is the same as the previous one. Block
            // compressed map files used to do this (multiple entries would point
            // at the same block)
            if (position.get() == lastIndex)
                continue;

            if (count == positions.length) {
                positions = Arrays.copyOf(positions, positions.length * 2);
            }

            keyBuilder.add(k);
            positions[count] = position.get();
            count++;
        }

        this.keys = keyBuilder.toArray(new WritableComparable[count]);
        positions = Arrays.copyOf(positions, count);
    } catch (EOFException e) {
        LOG.warn("Unexpected EOF reading " + index + " at entry #" + count + ".  Ignoring.");
    } finally {
        indexClosed = true;
        index.close();
    }
}

From source file:com.github.gaoyangthu.demo.mapred.PiEstimator.java

License:Apache License

/**
 * Run a map/reduce job for estimating Pi.
 *
 * @return the estimated value of Pi//  ww  w  . j ava2 s. c  om
 */
public static BigDecimal estimate(int numMaps, long numPoints, JobConf jobConf) throws IOException {
    //setup job conf
    jobConf.setJobName(PiEstimator.class.getSimpleName());

    jobConf.setInputFormat(SequenceFileInputFormat.class);

    jobConf.setOutputKeyClass(BooleanWritable.class);
    jobConf.setOutputValueClass(LongWritable.class);
    jobConf.setOutputFormat(SequenceFileOutputFormat.class);

    jobConf.setMapperClass(PiMapper.class);
    jobConf.setNumMapTasks(numMaps);

    jobConf.setReducerClass(PiReducer.class);
    jobConf.setNumReduceTasks(1);

    // turn off speculative execution, because DFS doesn't handle
    // multiple writers to the same file.
    jobConf.setSpeculativeExecution(false);

    //setup input/output directories
    final Path inDir = new Path(TMP_DIR, "in");
    final Path outDir = new Path(TMP_DIR, "out");
    FileInputFormat.setInputPaths(jobConf, inDir);
    FileOutputFormat.setOutputPath(jobConf, outDir);

    final FileSystem fs = FileSystem.get(jobConf);
    if (fs.exists(TMP_DIR)) {
        throw new IOException(
                "Tmp directory " + fs.makeQualified(TMP_DIR) + " already exists.  Please remove it first.");
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Cannot create input directory " + inDir);
    }

    try {
        //generate an input file for each map task
        for (int i = 0; i < numMaps; ++i) {
            final Path file = new Path(inDir, "part" + i);
            final LongWritable offset = new LongWritable(i * numPoints);
            final LongWritable size = new LongWritable(numPoints);
            final SequenceFile.Writer writer = SequenceFile.createWriter(fs, jobConf, file, LongWritable.class,
                    LongWritable.class, CompressionType.NONE);
            try {
                writer.append(offset, size);
            } finally {
                writer.close();
            }
            System.out.println("Wrote input for Map #" + i);
        }

        //start a map/reduce job
        System.out.println("Starting Job");
        final long startTime = System.currentTimeMillis();
        JobClient.runJob(jobConf);
        final double duration = (System.currentTimeMillis() - startTime) / 1000.0;
        System.out.println("Job Finished in " + duration + " seconds");

        //read outputs
        Path inFile = new Path(outDir, "reduce-out");
        LongWritable numInside = new LongWritable();
        LongWritable numOutside = new LongWritable();
        SequenceFile.Reader reader = new SequenceFile.Reader(fs, inFile, jobConf);
        try {
            reader.next(numInside, numOutside);
        } finally {
            reader.close();
        }

        //compute estimated value
        return BigDecimal.valueOf(4).setScale(20).multiply(BigDecimal.valueOf(numInside.get()))
                .divide(BigDecimal.valueOf(numMaps)).divide(BigDecimal.valueOf(numPoints));
    } finally {
        fs.delete(TMP_DIR, true);
    }
}

From source file:com.gotometrics.orderly.BigDecimalRowKey.java

License:Apache License

@Override
public Object deserialize(ImmutableBytesWritable w) throws IOException {
    byte[] b = w.get();
    int offset = w.getOffset();

    if (w.getLength() <= 0)
        return null;

    byte h = deserializeHeader(b[offset]);
    LongWritable o = (LongWritable) expKey.deserialize(w);
    if (o == null)
        return h == HEADER_NULL ? null : BigDecimal.ZERO;

    long exp = o.get();
    String s = deserializeBCD(w);

    int precision = s.length(), scale = (int) (exp - precision + 1L);

    BigInteger i = new BigInteger(h == HEADER_POSITIVE ? s : '-' + s);
    return new BigDecimal(i, -scale);
}

From source file:com.gotometrics.orderly.FixedLongRowKey.java

License:Apache License

@Override
public Object deserialize(ImmutableBytesWritable w) throws IOException {
    LongWritable lw = (LongWritable) super.deserialize(w);
    if (lw == null)
        return lw;

    return Long.valueOf(lw.get());
}

From source file:com.gotometrics.orderly.FixedUnsignedLongWritableRowKey.java

License:Apache License

protected LongWritable invertSign(LongWritable lw) {
    lw.set(lw.get() ^ Long.MIN_VALUE);
    return lw;
}

From source file:com.gsvic.csmr.io.InputData.java

License:Apache License

/**
 * Reads the Document-Frequency file//from   ww  w  .  ja  v a 2  s. c  o  m
 * @param conf
 * @param dfFile
 * @return Returns the Document-Frequency data in a HashMap
 * @throws IOException 
 */
public static HashMap<IntWritable, LongWritable> readDf(Configuration conf, Path dfFile) throws IOException {

    FileSystem filesystem = FileSystem.get(conf);
    SequenceFile.Reader reader;
    reader = new SequenceFile.Reader(filesystem, dfFile, conf);

    HashMap<IntWritable, LongWritable> dcf = new HashMap<>();
    IntWritable key = new IntWritable();
    LongWritable value = new LongWritable();

    while (reader.next(key, value)) {
        dcf.put(new IntWritable(key.get()), new LongWritable(value.get()));
    }

    return dcf;
}