Example usage for org.apache.hadoop.io IntWritable toString

List of usage examples for org.apache.hadoop.io IntWritable toString

Introduction

In this page you can find the example usage for org.apache.hadoop.io IntWritable toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:org.plista.kornakapi.core.training.SemanticModel.java

License:Apache License

/**
 * method to load model from squence file
 * @throws IOException//from  w w w  .ja  v  a 2  s.  com
 */
public void read() throws IOException {
    Path indexPath = path.suffix("/indexItem.model");
    if (fs.exists(indexPath)) {
        indexItem = new HashMap<Integer, String>();
        Reader reader = new SequenceFile.Reader(fs, indexPath, lconf);
        IntWritable key = new IntWritable();
        Text val = new Text();
        while (reader.next(key, val)) {
            indexItem.put(key.get(), val.toString());
        }
        Closeables.close(reader, false);
    }

    Path itemIndexPath = path.suffix("/itemIndex.model");
    if (fs.exists(itemIndexPath)) {
        itemIndex = new HashMap<String, Integer>();
        Reader reader = new SequenceFile.Reader(fs, itemIndexPath, lconf);
        IntWritable val = new IntWritable();
        Text key = new Text();
        while (reader.next(key, val)) {
            itemIndex.put(key.toString(), val.get());
        }
        Closeables.close(reader, false);
    }

    Path featurePath = path.suffix("/itemFeature.model");
    if (fs.exists(featurePath)) {
        Reader reader = new SequenceFile.Reader(fs, featurePath, lconf);
        itemFeatures = new HashMap<String, Vector>();
        Text key = new Text();
        VectorWritable val = new VectorWritable();
        while (reader.next(key, val)) {
            itemFeatures.put(key.toString(), val.get());
        }
        Closeables.close(reader, false);
    }
    if (log.isInfoEnabled()) {
        log.info("LDA Model Read");
    }

}

From source file:org.rad.pnf.reduce.PrimeNumberReducer.java

License:Open Source License

@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context)
        throws IOException, InterruptedException {
    List<BigInteger> primeNumbers = new ArrayList<BigInteger>();

    for (IntWritable value : values) {
        primeNumbers.add(new BigInteger(value.toString()));
    }//from  ww w .j ava2 s .  co  m

    context.write(key, primeNumbers);
}

From source file:sigis.kmeansmultiplek.AnotherKmeans.java

private void readAndPrintOutputValues(final Configuration configuration) throws IOException {
    final Path input = new Path(OUTPUT_PATH + "/" + Cluster.CLUSTERED_POINTS_DIR + "/part-m-00000");
    //final Path input = new Path(OUTPUT_PATH + "/" + Cluster.FINAL_ITERATION_SUFFIX + "/part-r-00000");
    System.out.println(Cluster.FINAL_ITERATION_SUFFIX);
    System.out.println(Cluster.CLUSTERED_POINTS_DIR);

    final SequenceFile.Reader reader = new SequenceFile.Reader(configuration, SequenceFile.Reader.file(input));

    final IntWritable key = new IntWritable();
    final WeightedPropertyVectorWritable value = new WeightedPropertyVectorWritable();

    while (reader.next(key, value)) {
        LOG.info("{} belongs to cluster {}", value.toString(), key.toString());
        System.out.println(value.toString().substring(18, 28));
        LOG.info("belongs to cluster {}", key.toString());

    }/*  w w w.ja  va 2 s . c  om*/
    reader.close();
}

From source file:uk.bl.wa.hadoop.indexer.WARCIndexerReducer.java

License:Open Source License

@Override
public void reduce(IntWritable key, Iterator<WritableSolrRecord> values, OutputCollector<Text, Text> output,
        Reporter reporter) throws IOException {
    WctEnricher wct;/*ww w  . jav a 2 s .  co  m*/
    WritableSolrRecord wsr;
    SolrRecord solr;

    // Get the slice number, but counting from 1 instead of 0:
    int slice = key.get() + 1;

    // Go through the documents for this shard:
    long noValues = 0;
    while (values.hasNext()) {
        wsr = values.next();
        solr = wsr.getSolrRecord();
        noValues++;

        // Add additional metadata for WCT Instances.
        if (solr.containsKey(WctFields.WCT_INSTANCE_ID)) {
            wct = new WctEnricher(key.toString());
            wct.addWctMetadata(solr);
        }
        if (!dummyRun) {
            docs.add(solr.getSolrDocument());
            // Have we exceeded the batchSize?
            checkSubmission(docs, batchSize, reporter);
        } else {
            log.info("DUMMY_RUN: Skipping addition of doc: " + solr.getField("id").getFirstValue());
        }

        // Occasionally update application-level status:
        if ((noValues % 1000) == 0) {
            reporter.setStatus(this.shardPrefix + slice + ": processed " + noValues + ", dropped "
                    + reporter.getCounter(MyCounters.NUM_DROPPED_RECORDS).getValue());
        }
        if (this.exportXml && solr.getSolrDocument().getFieldValue(SolrFields.SOLR_URL_TYPE) != null
                && solr.getSolrDocument().getFieldValue(SolrFields.SOLR_URL_TYPE)
                        .equals(SolrFields.SOLR_URL_TYPE_SLASHPAGE)) {
            output.collect(new Text(""),
                    new Text(MetadataBuilder.SolrDocumentToElement(solr.getSolrDocument())));
        }
    }

    try {
        /**
         * If we have at least one document unsubmitted, make sure we submit
         * it.
         */
        checkSubmission(docs, 1, reporter);

        // If we are indexing to HDFS, shut the shard down:
        if (useEmbeddedServer) {
            // Commit, and block until the changes have been flushed.
            solrServer.commit(true, false);
            // And shut it down.
            solrServer.shutdown();
        }

    } catch (Exception e) {
        log.error("ERROR on commit: " + e);
        e.printStackTrace();
    }

}