Example usage for org.apache.hadoop.io LongWritable get

List of usage examples for org.apache.hadoop.io LongWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io LongWritable get.

Prototype

public long get() 

Source Link

Document

Return the value of this LongWritable.

Usage

From source file:smile.wide.algorithms.SMILEBSMapper.java

License:Apache License

/**Mapper: extracts random seed from input, initializes SMILE BayesianSearch algorithm with
 * seed, runs BayesianSearch, stores network and Bayesian score to be sent to reducer.
 *///from w  w w.jav a2  s .  c om
@Override
protected void map(LongWritable key, Void value, Context context) throws IOException, InterruptedException {
    System.err.println("Running map code");
    //get seeds
    long k = key.get();
    randSeed = (int) (k & 0xffffffffL);
    double score = Double.NEGATIVE_INFINITY;

    BayesianSearch bs = new BayesianSearch();
    //Initialize the BS parameters
    bs.setRandSeed(randSeed);
    bs.setIterationCount(iterationCount);
    bs.setPriorSampleSize(priorSampleSize);
    bs.setLinkProbability(linkProbability);
    bs.setPriorLinkProbability(priorLinkProbability);
    bs.setMaxSearchTime(maxSearchTime);
    bs.setMaxParents(maxParents);

    //Do Bayesian search
    Network bnet = bs.learn(ds);
    //Save loglikelihood and the network
    score = bs.getLastScore();
    result.setLogLike(score);
    result.setNW(bnet.writeString());//double check the string command

    context.write(mkey, result);
    //cleanup
    bnet.dispose();
    bs.dispose();
}

From source file:smile.wide.obsolete.InferenceMapper.java

License:Apache License

@Override
public void map(LongWritable offset, Text value, Context context) throws IOException, InterruptedException {

    s_logger.debug(String.format("map(offset=%d    value=%s)", offset.get(), value.toString()));

    String line = value.toString();

    // =====================================================================
    // the part where we read in a line of text
    // parse the line string, check length
    String[] tokens = line.split("\t");

    if (tokens[0].equals("ID")) {
        // this is the header line. Should be compared with
        // the metadata in the real version.
        return;/*from   w  ww.  j ava2  s. c o  m*/
    }

    // figure out the instance id
    int id = Integer.parseInt(tokens[0]);

    Integer[] attributes = new Integer[tokens.length - attributesStart];
    for (int i = attributesStart; i < tokens.length; ++i) {
        // i indexes tokens
        // attrIdx indexes attributes (nodes in the network)
        // j indexes outcome values

        int attrIdx = i - attributesStart;

        // pull out the outcome IDs if we don't have them
        if (outcomeIds[attrIdx] == null) {
            outcomeIds[attrIdx] = theNet.getOutcomeIds(nodeHandles[i]);
        }

        // search linearly for the outcome named like the token
        attributes[attrIdx] = Integer.MIN_VALUE; // initialize as invalid 
        for (int j = 0; j < outcomeIds[attrIdx].length; ++j) {
            if (tokens[i].equals(outcomeIds[attrIdx][j])) {
                attributes[attrIdx] = j;
                break;
            }

            if (tokens[i].equals("*")) {
                // * denotes missing value
                attributes[attrIdx] = null;
                break;
            }
        }
        if (attributes[attrIdx] != null && attributes[attrIdx].equals(Integer.MIN_VALUE)) {
            System.err.printf("Aaaargh! Unknown attribute value '%s'\n", tokens[i]);
            System.err.printf("Valid attribute values are:\n");
            for (int j = 0; j < outcomeIds[attrIdx].length; ++j) {
                System.err.printf("\t %s\n", outcomeIds[attrIdx][j]);
            }
        }
    }

    s_logger.debug("map() - line parsed");

    // =========================================================================
    // the part where we run the inference

    for (int k = attributesStart; k < tokens.length; ++k) {
        int attrIdx = k - attributesStart;
        if (attributes[attrIdx] != null) {
            theNet.setEvidence(theNet.getNode(columnNames[k]), attributes[attrIdx]);
        }
    }

    s_logger.debug("map() - evidence set");

    theNet.updateBeliefs();

    s_logger.debug("map() - inference done");

    double[] posterior = theNet.getNodeValue(theNet.getNode("Class"));

    s_logger.debug("map() - posteriors pulled");

    // =========================================================================      
    // write to the context
    DoubleWritable[] fw = new DoubleWritable[posterior.length];
    for (int z = 0; z < posterior.length; ++z) {
        fw[z] = new DoubleWritable(posterior[z]);
    }
    DoubleArrayWritable post = new DoubleArrayWritable(fw);
    context.write(new IntWritable(id), post);

    s_logger.debug("map() - all done");
}

From source file:statics.UDAFPercentile.java

License:Apache License

/**
 * Increment the State object with o as the key, and i as the count.
 *//*from   w w w  .j a  v  a 2  s  . c  o m*/
private static void increment(State s, LongWritable o, long i) {
    couter++;
    if (s.counts == null) {
        s.counts = new HashMap<LongWritable, LongWritable>();
    }
    LongWritable count = s.counts.get(o);
    if (count == null) {
        // We have to create a new object, because the object o belongs
        // to the code that creates it and may get its value changed.
        LongWritable key = new LongWritable();
        key.set(o.get());
        s.counts.put(key, new LongWritable(i));
    } else {
        count.set(count.get() + i);
    }
}

From source file:tachyon.hadoop.fs.IOMapperBase.java

License:Apache License

/**
 * Map file name and offset into statistical data.
 * <p>/*from   w w w  . ja va 2 s . c o m*/
 * The map task is to get the <tt>key</tt>, which contains the file name, and the <tt>value</tt>,
 * which is the offset within the file.
 *
 * The parameters are passed to the abstract method
 * {@link #doIO(Reporter, String,long)}, which performs the io operation,
 * usually read or write data, and then
 * {@link #collectStats(OutputCollector, String,long, Object)} is called
 * to prepare stat data for a subsequent reducer.
 */
public void map(Text key, LongWritable value, OutputCollector<Text, Text> output, Reporter reporter)
        throws IOException {
    String name = key.toString();
    long longValue = value.get();

    reporter.setStatus("starting " + name + " ::host = " + mHostname);

    this.mStream = getIOStream(name);
    T statValue = null;
    long tStart = System.currentTimeMillis();
    try {
        statValue = doIO(reporter, name, longValue);
    } finally {
        if (mStream != null) {
            mStream.close();
        }
    }
    long tEnd = System.currentTimeMillis();
    long execTime = tEnd - tStart;
    collectStats(output, name, execTime, statValue);

    reporter.setStatus("finished " + name + " ::host = " + mHostname);
}

From source file:top10flight.SortKeyComparator.java

/**
 * Need to implement our sorting mechanism.
 *//*  w  ww.  j a v  a  2s .  c o  m*/
@Override
public int compare(WritableComparable a, WritableComparable b) {
    LongWritable key1 = (LongWritable) a;
    LongWritable key2 = (LongWritable) b;

    // Implemet sorting in descending order
    int result = key1.get() < key2.get() ? 1 : key1.get() == key2.get() ? 0 : -1;
    return result;
}