Example usage for org.apache.hadoop.io LongWritable get

List of usage examples for org.apache.hadoop.io LongWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io LongWritable get.

Prototype

public long get() 

Source Link

Document

Return the value of this LongWritable.

Usage

From source file:crunch.MaxTemperature.java

License:Apache License

private void checkRecord(int record, RecordReader<LongWritable, Text> recordReader, long expectedKey,
            String expectedValue) throws IOException {
        LongWritable key = new LongWritable();
        Text value = new Text();
        assertThat(recordReader.next(key, value), is(true));
        assertThat("Record " + record, value.toString(), is(expectedValue));
        assertThat("Record " + record, key.get(), is(expectedKey));
    }// w  w w .ja va 2  s.  c  o m

From source file:cs698.giraph.kmode.KMeansVertex.java

License:Apache License

@Override
public void compute(Vertex<LongWritable, NodeState, NullWritable> vertex, Iterable<LongWritable> messages)
        throws IOException {
    // In the first superstep, we compute the ranges of the dimensions 
    if (getSuperstep() == 0) {
        aggregate(Constants.MAX, vertex.getValue().getPoint());
        aggregate(Constants.MIN, vertex.getValue().getPoint());
        return;//from   w w  w .j  av  a2  s .  c o m
    } else {

        // If there were no cluster reassignments in the previous superstep, we're done.
        // (Other stopping criteria (not implemented here) could include a fixed number of
        // iterations, cluster centres that are not moving, or the Residual Sum of Squares
        // (RSS) is below a certain threshold.
        if (getSuperstep() > 1) {
            LongWritable updates = getAggregatedValue(Constants.UPDATES);
            if (updates.get() == 0) {
                vertex.voteToHalt();
                return;
            }
        }

        // If we're not stopping, we need to compute the closest cluster to this node
        int k = (int) K.get(getConf());
        PointWritable[] means = new PointWritable[k];
        int closest = -1;
        int closestDistance = Integer.MAX_VALUE;
        for (int i = 0; i < k; i++) {
            means[i] = getAggregatedValue(Constants.POINT_PREFIX + i);
            int d = distance(vertex.getValue().getPoint().getData(), means[i].getData());
            if (d < closestDistance) {
                closestDistance = d;
                closest = i;
            }
        }

        // If the choice of cluster has changed, aggregate an update so the we recompute
        // on the next iteration.
        if (closest != vertex.getValue().getCluster()) {
            aggregate(Constants.UPDATES, one);
        }

        // Ensure that the closest cluster position is updated, irrespective of whether or
        // not the choice of cluster has changed.
        NodeState state = vertex.getValue();
        state.setCluster(closest);
        state.setClusterCentre(means[closest]);
        vertex.setValue(state);

        // Prepare the next iteration by aggregating this point into the closest cluster.
        aggregate(Constants.POINT_PREFIX + closest, vertex.getValue().getPoint());
    }

}

From source file:de.tudarmstadt.ukp.dkpro.c4corpus.hadoop.deduplication.DocumentInfo.java

License:Apache License

public void setDocSimHash(LongWritable docSimHash) {
    this.docSimHash = new LongWritable(docSimHash.get());
}

From source file:de.tudarmstadt.ukp.dkpro.c4corpus.hadoop.statistics.helper.DistributionReducer.java

License:Apache License

@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context)
        throws IOException, InterruptedException {
    //// w  ww  .  j  av  a2s .c  o  m
    Map<Long, Long> counts = new TreeMap<>();

    for (LongWritable intWritable : values) {
        // bin = 100, 200, 300, etc. kB
        long bin = ((intWritable.get() / 100000) + 1) * 100;

        if (!counts.containsKey(bin)) {
            counts.put(bin, 1L);
        } else {
            counts.put(bin, counts.get(bin) + 1);
        }
    }

    for (Map.Entry<Long, Long> entry : counts.entrySet()) {
        context.write(key, new Text(entry.getKey() + "\t" + entry.getValue()));
    }
}

From source file:de.tudarmstadt.ukp.dkpro.c4corpus.hadoop.statistics.helper.TextLongCountingReducer.java

License:Apache License

@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context)
        throws IOException, InterruptedException {
    long sum = 0;
    for (LongWritable intWritable : values) {
        sum += intWritable.get();
    }// w  w w.  jav  a 2  s.com

    context.write(key, new LongWritable(sum));
}

From source file:de.unileipzig.dbs.giraph.algorithms.adaptiverepartitioning.ARPComputation.java

License:Open Source License

/**
 * Calculates the partition frequencies among neighbour vertices.
 * Returns a field where element i represents the number of neighbours in
 * partition i.//from  ww w .j a v  a2 s .  co m
 *
 * @param messages messages sent to the vertex
 * @return partition frequency
 */
private long[] getPartitionFrequencies(final Iterable<LongWritable> messages) {
    long[] result = new long[k];
    for (LongWritable message : messages) {
        result[(int) message.get()]++;
    }
    return result;
}

From source file:de.unileipzig.dbs.giraph.algorithms.adaptiverepartitioning.ARPComputation.java

License:Open Source License

/**
 * Returns the demand for the given partition.
 *
 * @param partition partition id/*  w  ww . jav  a  2s .com*/
 * @return demand for partition
 */
private long getPartitionDemand(long partition) {
    LongWritable demandWritable = getAggregatedValue(DEMAND_AGGREGATOR_PREFIX + partition);
    return demandWritable.get();
}

From source file:de.unileipzig.dbs.giraph.algorithms.adaptiverepartitioning.ARPComputation.java

License:Open Source License

/**
 * Returns the current load of the given partition.
 *
 * @param partition partition id//from   w w  w . java  2  s  . c  o  m
 * @return load of partition
 */
private long getPartitionLoad(long partition) {
    LongWritable loadWritable = getAggregatedValue(CAPACITY_AGGREGATOR_PREFIX + partition);
    return loadWritable.get();
}

From source file:de.unileipzig.dbs.giraph.algorithms.adaptiverepartitioning.ARPVertexValue.java

License:Open Source License

/**
 * Method to set the current partition//from   w  w  w  . j  a v  a 2 s .  c om
 *
 * @param currentPartition current partition
 */
public void setCurrentPartition(LongWritable currentPartition) {
    this.currentPartition = currentPartition.get();
}

From source file:de.unileipzig.dbs.giraph.algorithms.adaptiverepartitioning.ARPVertexValue.java

License:Open Source License

/**
 * Method to set the lastValue of the vertex
 *
 * @param desiredPartition the desired Partition
 *//*from  w  ww .  jav  a 2 s.c o  m*/
public void setDesiredPartition(LongWritable desiredPartition) {
    this.desiredPartition = desiredPartition.get();
}