Example usage for org.apache.hadoop.io FloatWritable FloatWritable

List of usage examples for org.apache.hadoop.io FloatWritable FloatWritable

Introduction

In this page you can find the example usage for org.apache.hadoop.io FloatWritable FloatWritable.

Prototype

public FloatWritable(float value) 

Source Link

Usage

From source file:org.apache.nutch.hostdb.UpdateHostDbMapper.java

License:Apache License

/**
  * Mapper ingesting records from the HostDB, CrawlDB and plaintext host
  * scores file. Statistics and scores are passed on.
  */*from   w w  w. jav  a  2 s .  c o  m*/
  * @param key
  * @param value
  * @param context
  */
@Override
public void map(Text key, Writable value, Context context) throws IOException, InterruptedException {

    // Get the key!
    String keyStr = key.toString();

    // Check if we process records from the CrawlDB
    if (key instanceof Text && value instanceof CrawlDatum) {
        // Get the normalized and filtered host of this URL
        buffer = filterNormalize(URLUtil.getHost(keyStr));

        // Filtered out?
        if (buffer == null) {
            context.getCounter("UpdateHostDb", "filtered_records").increment(1);
            LOG.info("UpdateHostDb: " + URLUtil.getHost(keyStr) + " crawldatum has been filtered");
            return;
        }

        // Set the host of this URL
        host.set(buffer);
        crawlDatum = (CrawlDatum) value;
        hostDatum = new HostDatum();

        /**
          * TODO: fix multi redirects: host_a => host_b/page => host_c/page/whatever
          * http://www.ferienwohnung-armbruster.de/
          * http://www.ferienwohnung-armbruster.de/website/
          * http://www.ferienwohnung-armbruster.de/website/willkommen.php
          *
          * We cannot reresolve redirects for host objects as CrawlDatum metadata is
          * not available. We also cannot reliably use the reducer in all cases
          * since redirects may be across hosts or even domains. The example
          * above has redirects that will end up in the same reducer. During that
          * phase, however, we do not know which URL redirects to the next URL.
          */
        // Do not resolve homepages when the root URL is unfetched
        if (crawlDatum.getStatus() != CrawlDatum.STATUS_DB_UNFETCHED) {
            // Get the protocol
            String protocol = URLUtil.getProtocol(keyStr);

            // Get the proposed homepage URL
            String homepage = protocol + "://" + buffer + "/";

            // Check if the current key is equals the host
            if (keyStr.equals(homepage)) {
                // Check if this is a redirect to the real home page
                if (crawlDatum.getStatus() == CrawlDatum.STATUS_DB_REDIR_PERM
                        || crawlDatum.getStatus() == CrawlDatum.STATUS_DB_REDIR_TEMP) {

                    // Obtain the repr url for this redirect via protocolstatus from the metadata
                    ProtocolStatus z = (ProtocolStatus) crawlDatum.getMetaData()
                            .get(Nutch.WRITABLE_PROTO_STATUS_KEY);

                    // Get the protocol status' arguments
                    args = z.getArgs();

                    // ..and the possible redirect URL
                    reprUrl = args[0];

                    // Am i a redirect?
                    if (reprUrl != null) {
                        LOG.info("UpdateHostDb: homepage: " + keyStr + " redirects to: " + args[0]);
                        context.write(host, new NutchWritable(hostDatum));
                        hostDatum.setHomepageUrl(reprUrl);
                    } else {
                        LOG.info("UpdateHostDb: homepage: " + keyStr + " redirects to: " + args[0]
                                + " but has been filtered out");
                    }
                } else {
                    hostDatum.setHomepageUrl(homepage);
                    context.write(host, new NutchWritable(hostDatum));
                    LOG.info("UpdateHostDb: homepage: " + homepage);
                }
            }
        }

        // Always emit crawl datum
        context.write(host, new NutchWritable(crawlDatum));
    }

    // Check if we got a record from the hostdb
    if (key instanceof Text && value instanceof HostDatum) {
        buffer = filterNormalize(keyStr);

        // Filtered out?
        if (buffer == null) {
            context.getCounter("UpdateHostDb", "filtered_records").increment(1);
            LOG.info("UpdateHostDb: " + key.toString() + " hostdatum has been filtered");
            return;
        }

        // Get a HostDatum
        hostDatum = (HostDatum) value;
        key.set(buffer);

        // If we're also reading CrawlDb entries, reset db_* statistics because
        // we're aggregating them from CrawlDB anyway
        if (readingCrawlDb) {
            hostDatum.resetStatistics();
        }

        context.write(key, new NutchWritable(hostDatum));
    }

    // Check if we got a record with host scores
    if (key instanceof Text && value instanceof Text) {
        buffer = filterNormalize(keyStr);

        // Filtered out?
        if (buffer == null) {
            context.getCounter("UpdateHostDb", "filtered_records").increment(1);
            LOG.info("UpdateHostDb: " + key.toString() + " score has been filtered");
            return;
        }

        key.set(buffer);

        context.write(key, new NutchWritable(new FloatWritable(Float.parseFloat(value.toString()))));
    }
}

From source file:org.apache.nutch.hostdb.UpdateHostDbReducer.java

License:Apache License

/**
  */*from   w w w  .j av a2 s  .  c  o  m*/
  */
@Override
public void reduce(Text key, Iterable<NutchWritable> values, Context context)
        throws IOException, InterruptedException {

    Map<String, Map<String, Long>> stringCounts = new HashMap<>();
    Map<String, Float> maximums = new HashMap<>();
    Map<String, Float> sums = new HashMap<>(); // used to calc averages
    Map<String, Long> counts = new HashMap<>(); // used to calc averages
    Map<String, Float> minimums = new HashMap<>();
    Map<String, TDigest> tdigests = new HashMap<String, TDigest>();

    HostDatum hostDatum = new HostDatum();
    float score = 0;

    if (stringFields != null) {
        for (int i = 0; i < stringFields.length; i++) {
            stringCounts.put(stringFields[i], new HashMap<>());
        }
    }

    // Loop through all values until we find a non-empty HostDatum or use
    // an empty if this is a new host for the host db
    for (NutchWritable val : values) {
        final Writable value = val.get(); // unwrap

        // Count crawl datum status's and collect metadata from fields
        if (value instanceof CrawlDatum) {
            CrawlDatum buffer = (CrawlDatum) value;

            // Set the correct status field
            switch (buffer.getStatus()) {
            case CrawlDatum.STATUS_DB_UNFETCHED:
                hostDatum.setUnfetched(hostDatum.getUnfetched() + 1l);
                break;

            case CrawlDatum.STATUS_DB_FETCHED:
                hostDatum.setFetched(hostDatum.getFetched() + 1l);
                break;

            case CrawlDatum.STATUS_DB_GONE:
                hostDatum.setGone(hostDatum.getGone() + 1l);
                break;

            case CrawlDatum.STATUS_DB_REDIR_TEMP:
                hostDatum.setRedirTemp(hostDatum.getRedirTemp() + 1l);
                break;

            case CrawlDatum.STATUS_DB_REDIR_PERM:
                hostDatum.setRedirPerm(hostDatum.getRedirPerm() + 1l);
                break;

            case CrawlDatum.STATUS_DB_NOTMODIFIED:
                hostDatum.setNotModified(hostDatum.getNotModified() + 1l);
                break;
            }

            // Record connection failures
            if (buffer.getRetriesSinceFetch() != 0) {
                hostDatum.incConnectionFailures();
            }

            // Only gather metadata statistics for proper fetched pages
            if (buffer.getStatus() == CrawlDatum.STATUS_DB_FETCHED
                    || buffer.getStatus() == CrawlDatum.STATUS_DB_NOTMODIFIED) {
                // Deal with the string fields
                if (stringFields != null) {
                    for (int i = 0; i < stringFields.length; i++) {
                        // Does this field exist?
                        if (buffer.getMetaData().get(stringFieldWritables[i]) != null) {
                            // Get it!
                            String metadataValue = null;
                            try {
                                metadataValue = buffer.getMetaData().get(stringFieldWritables[i]).toString();
                            } catch (Exception e) {
                                LOG.error("Metadata field " + stringFields[i]
                                        + " is probably not a numeric value");
                            }

                            // Does the value exist?
                            if (stringCounts.get(stringFields[i]).containsKey(metadataValue)) {
                                // Yes, increment it
                                stringCounts.get(stringFields[i]).put(metadataValue,
                                        stringCounts.get(stringFields[i]).get(metadataValue) + 1l);
                            } else {
                                // Create it!
                                stringCounts.get(stringFields[i]).put(metadataValue, 1l);
                            }
                        }
                    }
                }

                // Deal with the numeric fields
                if (numericFields != null) {
                    for (int i = 0; i < numericFields.length; i++) {
                        // Does this field exist?
                        if (buffer.getMetaData().get(numericFieldWritables[i]) != null) {
                            try {
                                // Get it!
                                Float metadataValue = Float.parseFloat(
                                        buffer.getMetaData().get(numericFieldWritables[i]).toString());

                                // Does the median value exist?
                                if (tdigests.containsKey(numericFields[i])) {
                                    tdigests.get(numericFields[i]).add(metadataValue);
                                } else {
                                    // Create it!
                                    TDigest tdigest = TDigest.createDigest(100);
                                    tdigest.add((double) metadataValue);
                                    tdigests.put(numericFields[i], tdigest);
                                }

                                // Does the minimum value exist?
                                if (minimums.containsKey(numericFields[i])) {
                                    // Write if this is lower than existing value
                                    if (metadataValue < minimums.get(numericFields[i])) {
                                        minimums.put(numericFields[i], metadataValue);
                                    }
                                } else {
                                    // Create it!
                                    minimums.put(numericFields[i], metadataValue);
                                }

                                // Does the maximum value exist?
                                if (maximums.containsKey(numericFields[i])) {
                                    // Write if this is lower than existing value
                                    if (metadataValue > maximums.get(numericFields[i])) {
                                        maximums.put(numericFields[i], metadataValue);
                                    }
                                } else {
                                    // Create it!
                                    maximums.put(numericFields[i], metadataValue);
                                }

                                // Sum it up!
                                if (sums.containsKey(numericFields[i])) {
                                    // Increment
                                    sums.put(numericFields[i], sums.get(numericFields[i]) + metadataValue);
                                    counts.put(numericFields[i], counts.get(numericFields[i]) + 1l);
                                } else {
                                    // Create it!
                                    sums.put(numericFields[i], metadataValue);
                                    counts.put(numericFields[i], 1l);
                                }
                            } catch (Exception e) {
                                LOG.error(e.getMessage() + " when processing values for " + key.toString());
                            }
                        }
                    }
                }
            }
        }

        // 
        else if (value instanceof HostDatum) {
            HostDatum buffer = (HostDatum) value;

            // Check homepage URL
            if (buffer.hasHomepageUrl()) {
                hostDatum.setHomepageUrl(buffer.getHomepageUrl());
            }

            // Check lastCheck timestamp
            if (!buffer.isEmpty()) {
                hostDatum.setLastCheck(buffer.getLastCheck());
            }

            // Check and set DNS failures
            if (buffer.getDnsFailures() > 0) {
                hostDatum.setDnsFailures(buffer.getDnsFailures());
            }

            // Check and set connection failures
            if (buffer.getConnectionFailures() > 0) {
                hostDatum.setConnectionFailures(buffer.getConnectionFailures());
            }

            // Check metadata
            if (!buffer.getMetaData().isEmpty()) {
                hostDatum.setMetaData(buffer.getMetaData());
            }

            // Check and set score (score from Web Graph has precedence)
            if (buffer.getScore() > 0) {
                hostDatum.setScore(buffer.getScore());
            }
        }

        // Check for the score
        else if (value instanceof FloatWritable) {
            FloatWritable buffer = (FloatWritable) value;
            score = buffer.get();
        } else {
            LOG.error("Class {} not handled", value.getClass());
        }
    }

    // Check if score was set from Web Graph
    if (score > 0) {
        hostDatum.setScore(score);
    }

    // Set metadata
    for (Map.Entry<String, Map<String, Long>> entry : stringCounts.entrySet()) {
        for (Map.Entry<String, Long> subEntry : entry.getValue().entrySet()) {
            hostDatum.getMetaData().put(new Text(entry.getKey() + "." + subEntry.getKey()),
                    new LongWritable(subEntry.getValue()));
        }
    }
    for (Map.Entry<String, Float> entry : maximums.entrySet()) {
        hostDatum.getMetaData().put(new Text("max." + entry.getKey()), new FloatWritable(entry.getValue()));
    }
    for (Map.Entry<String, Float> entry : sums.entrySet()) {
        hostDatum.getMetaData().put(new Text("avg." + entry.getKey()),
                new FloatWritable(entry.getValue() / counts.get(entry.getKey())));
    }
    for (Map.Entry<String, TDigest> entry : tdigests.entrySet()) {
        // Emit all percentiles
        for (int i = 0; i < percentiles.length; i++) {
            hostDatum.getMetaData().put(new Text("pct" + Long.toString(percentiles[i]) + "." + entry.getKey()),
                    new FloatWritable((float) entry.getValue().quantile(0.5)));
        }
    }
    for (Map.Entry<String, Float> entry : minimums.entrySet()) {
        hostDatum.getMetaData().put(new Text("min." + entry.getKey()), new FloatWritable(entry.getValue()));
    }

    context.getCounter("UpdateHostDb", "total_hosts").increment(1);

    // See if this record is to be checked
    if (shouldCheck(hostDatum)) {
        // Make an entry
        resolverThread = new ResolverThread(key.toString(), hostDatum, context, purgeFailedHostsThreshold);

        // Add the entry to the queue (blocking)
        try {
            queue.put(resolverThread);
        } catch (InterruptedException e) {
            LOG.error("UpdateHostDb: " + StringUtils.stringifyException(e));
        }

        // Do not progress, the datum will be written in the resolver thread
        return;
    } else {
        context.getCounter("UpdateHostDb", "skipped_not_eligible").increment(1);
        LOG.info("UpdateHostDb: " + key.toString() + ": skipped_not_eligible");
    }

    // Write the host datum if it wasn't written by the resolver thread
    context.write(key, hostDatum);
}

From source file:org.apache.nutch.searcher.IndexSearcher.java

License:Apache License

private Hits translateHits(TopDocs topDocs, String dedupField, String sortField) throws IOException {

    String[] dedupValues = null;//from   w  ww  . j ava  2s .  c  om
    if (dedupField != null) {
        dedupValues = FieldCache.DEFAULT.getStrings(reader, dedupField);
    }

    ScoreDoc[] scoreDocs = topDocs.scoreDocs;
    int length = scoreDocs.length;
    Hit[] hits = new Hit[length];
    for (int i = 0; i < length; i++) {
        WritableComparable sortValue = new FloatWritable(scoreDocs[i].score);
        String dedupValue = (dedupValues == null) ? null : dedupValues[scoreDocs[i].doc];
        hits[i] = new Hit(scoreDocs[i].doc, sortValue, dedupValue);
    }
    return new Hits(topDocs.totalHits, hits);
}

From source file:org.apache.nutch.searcher.SolrBean.java

License:Apache License

public Hits search(Query query) throws IOException {
    // filter query string
    //    final BooleanQuery bQuery = filters.filter(query);

    //    final SolrQuery solrQuery = new SolrQuery(stringify(bQuery));
    final SolrQuery solrQuery = new SolrQuery();

    solrQuery.set("q", query.getQuery());
    String a = query.getParams().getSortField();
    solrQuery.setRows(query.getParams().getNumHits());

    if (query.getParams().getSortField() == null) {
        //      solrQuery.setFields(query.getParams().getDedupField(), "score", searchUID);
        query.getParams().setSortField("score");
    } else {/*from  w  w w.j  a  v  a2  s  .co  m*/
        //      solrQuery.setFields(query.getParams().getDedupField(), query
        //          .getParams().getSortField(), searchUID);
        //      solrQuery.setSortField(query.getParams().getSortField(), query
        //          .getParams().isReverse() ? ORDER.asc : ORDER.desc);

        solrQuery.setSort(query.getParams().getSortField(),
                query.getParams().isReverse() ? ORDER.asc : ORDER.desc);
    }

    solrQuery.set("fl", "id,url,title,tstamp,type,content,segment,score");
    solrQuery.setHighlight(true);
    solrQuery.set("hl.fl", "title,content");
    solrQuery.set("hl.simple.pre", "<span class=highlight>");
    solrQuery.set("hl.simple.post", "</span>");
    solrQuery.set("defType", "edismax");
    solrQuery.set("qf", "title^4 content");

    QueryResponse response;
    try {
        response = solr.query(solrQuery);
    } catch (final SolrServerException e) {
        throw makeIOException(e);
    }

    final SolrDocumentList docList = response.getResults();

    Map<String, Map<String, List<String>>> highlights = response.getHighlighting();

    int qtime = response.getQTime();

    final Hit[] hitArr = new Hit[docList.size()];
    for (int i = 0; i < hitArr.length; i++) {
        final SolrDocument solrDoc = docList.get(i);

        String url = (String) solrDoc.getFieldValue("url");
        String title = (String) solrDoc.getFieldValue("title");
        String content = (String) solrDoc.getFieldValue("content");

        final Object raw = solrDoc.getFirstValue(query.getParams().getSortField());
        WritableComparable sortValue;

        if (raw instanceof Integer) {
            sortValue = new IntWritable(((Integer) raw).intValue());
        } else if (raw instanceof Float) {
            sortValue = new FloatWritable(((Float) raw).floatValue());
        } else if (raw instanceof String) {
            sortValue = new Text((String) raw);
        } else if (raw instanceof Long) {
            sortValue = new LongWritable(((Long) raw).longValue());
        } else {
            throw new RuntimeException("Unknown sort value type!");
        }

        final String dedupValue = (String) solrDoc.getFirstValue(query.getParams().getDedupField());

        final String uniqueKey = (String) solrDoc.getFirstValue(searchUID);

        //    hitArr[i] = new Hit(uniqueKey, sortValue, dedupValue);
        SolrHit hit = new SolrHit(uniqueKey, sortValue, dedupValue);
        SolrHitDetails details = buildDetails(solrDoc);
        details.setHit(hit);
        hit.setHitDetails(details);

        hit.setTitleHighlighted(title);
        int len = (content.length() > 100 ? 100 : content.length());
        Summary.Fragment f = new Summary.Fragment(content.substring(0, len));
        Summary summary = new Summary();
        summary.add(f);
        hit.setSummary(summary);

        String titleHighlighted = "";
        if (highlights.containsKey(url)) {
            Map<String, List<String>> snippets = highlights.get(url);
            if (snippets.containsKey("title")) {
                titleHighlighted = snippets.get("title").get(0);
                hit.setTitleHighlighted(titleHighlighted);
            }

            if (snippets.containsKey("content")) {
                f = new Summary.Fragment(snippets.get("content").get(0));
                summary = new Summary();
                summary.add(f);
                hit.setSummary(summary);
            }
        }

        hitArr[i] = hit;
    }

    return new Hits(docList.getNumFound(), hitArr);
}

From source file:org.apache.nutch.searcher.SolrSearchBean.java

License:Apache License

public Hits search(Query query) throws IOException {
    // filter query string
    final BooleanQuery bQuery = filters.filter(query);

    final SolrQuery solrQuery = new SolrQuery(stringify(bQuery));

    solrQuery.setRows(query.getParams().getNumHits());

    if (query.getParams().getSortField() == null) {
        solrQuery.setFields(query.getParams().getDedupField(), "score", searchUID);
        query.getParams().setSortField("score");
    } else {/*from   w w w.j  a va  2  s.c o m*/
        solrQuery.setFields(query.getParams().getDedupField(), query.getParams().getSortField(), searchUID);
        solrQuery.setSortField(query.getParams().getSortField(),
                query.getParams().isReverse() ? ORDER.asc : ORDER.desc);
    }

    QueryResponse response;
    try {
        response = solr.query(solrQuery);
    } catch (final SolrServerException e) {
        throw SolrWriter.makeIOException(e);
    }

    final SolrDocumentList docList = response.getResults();

    final Hit[] hitArr = new Hit[docList.size()];
    for (int i = 0; i < hitArr.length; i++) {
        final SolrDocument solrDoc = docList.get(i);

        final Object raw = solrDoc.getFirstValue(query.getParams().getSortField());
        WritableComparable sortValue;

        if (raw instanceof Integer) {
            sortValue = new IntWritable(((Integer) raw).intValue());
        } else if (raw instanceof Float) {
            sortValue = new FloatWritable(((Float) raw).floatValue());
        } else if (raw instanceof String) {
            sortValue = new Text((String) raw);
        } else if (raw instanceof Long) {
            sortValue = new LongWritable(((Long) raw).longValue());
        } else {
            throw new RuntimeException("Unknown sort value type!");
        }

        final String dedupValue = (String) solrDoc.getFirstValue(query.getParams().getDedupField());

        final String uniqueKey = (String) solrDoc.getFirstValue(searchUID);

        hitArr[i] = new Hit(uniqueKey, sortValue, dedupValue);
    }

    return new Hits(docList.getNumFound(), hitArr);
}

From source file:org.apache.orc.mapred.TestOrcOutputFormat.java

License:Apache License

@Test
public void testAllTypes() throws Exception {
    conf.set("mapreduce.task.attempt.id", "attempt_20160101_0001_m_000001_0");
    conf.setOutputCommitter(NullOutputCommitter.class);
    final String typeStr = "struct<b1:binary,b2:boolean,b3:tinyint,"
            + "c:char(10),d1:date,d2:decimal(20,5),d3:double,fff:float,int:int,"
            + "l:array<bigint>,map:map<smallint,string>,"
            + "str:struct<u:uniontype<timestamp,varchar(100)>>,ts:timestamp>";
    OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, typeStr);
    FileOutputFormat.setOutputPath(conf, workDir);
    TypeDescription type = TypeDescription.fromString(typeStr);

    // build a row object
    OrcStruct row = (OrcStruct) OrcStruct.createValue(type);
    ((BytesWritable) row.getFieldValue(0)).set(new byte[] { 1, 2, 3, 4 }, 0, 4);
    ((BooleanWritable) row.getFieldValue(1)).set(true);
    ((ByteWritable) row.getFieldValue(2)).set((byte) 23);
    ((Text) row.getFieldValue(3)).set("aaabbbcccddd");
    SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd");
    ((DateWritable) row.getFieldValue(4)).set(DateWritable.millisToDays(format.parse("2016-04-01").getTime()));
    ((HiveDecimalWritable) row.getFieldValue(5)).set(new HiveDecimalWritable("1.23"));
    ((DoubleWritable) row.getFieldValue(6)).set(1.5);
    ((FloatWritable) row.getFieldValue(7)).set(4.5f);
    ((IntWritable) row.getFieldValue(8)).set(31415);
    OrcList<LongWritable> longList = (OrcList<LongWritable>) row.getFieldValue(9);
    longList.add(new LongWritable(123));
    longList.add(new LongWritable(456));
    OrcMap<ShortWritable, Text> map = (OrcMap<ShortWritable, Text>) row.getFieldValue(10);
    map.put(new ShortWritable((short) 1000), new Text("aaaa"));
    map.put(new ShortWritable((short) 123), new Text("bbbb"));
    OrcStruct struct = (OrcStruct) row.getFieldValue(11);
    OrcUnion union = (OrcUnion) struct.getFieldValue(0);
    union.set((byte) 1, new Text("abcde"));
    ((OrcTimestamp) row.getFieldValue(12)).set("1996-12-11 15:00:00");
    NullWritable nada = NullWritable.get();
    RecordWriter<NullWritable, OrcStruct> writer = new OrcOutputFormat<OrcStruct>().getRecordWriter(fs, conf,
            "all.orc", Reporter.NULL);
    for (int r = 0; r < 10; ++r) {
        row.setFieldValue(8, new IntWritable(r * 10));
        writer.write(nada, row);//from   w w w. ja v a2  s  .  co  m
    }
    union.set((byte) 0, new OrcTimestamp("2011-12-25 12:34:56"));
    for (int r = 0; r < 10; ++r) {
        row.setFieldValue(8, new IntWritable(r * 10 + 100));
        writer.write(nada, row);
    }
    OrcStruct row2 = new OrcStruct(type);
    writer.write(nada, row2);
    row.setFieldValue(8, new IntWritable(210));
    writer.write(nada, row);
    writer.close(Reporter.NULL);

    FileSplit split = new FileSplit(new Path(workDir, "all.orc"), 0, 100000, new String[0]);
    RecordReader<NullWritable, OrcStruct> reader = new OrcInputFormat<OrcStruct>().getRecordReader(split, conf,
            Reporter.NULL);
    nada = reader.createKey();
    row = reader.createValue();
    for (int r = 0; r < 22; ++r) {
        assertEquals(true, reader.next(nada, row));
        if (r == 20) {
            for (int c = 0; c < 12; ++c) {
                assertEquals(null, row.getFieldValue(c));
            }
        } else {
            assertEquals(new BytesWritable(new byte[] { 1, 2, 3, 4 }), row.getFieldValue(0));
            assertEquals(new BooleanWritable(true), row.getFieldValue(1));
            assertEquals(new ByteWritable((byte) 23), row.getFieldValue(2));
            assertEquals(new Text("aaabbbcccd"), row.getFieldValue(3));
            assertEquals(new DateWritable(DateWritable.millisToDays(format.parse("2016-04-01").getTime())),
                    row.getFieldValue(4));
            assertEquals(new HiveDecimalWritable("1.23"), row.getFieldValue(5));
            assertEquals(new DoubleWritable(1.5), row.getFieldValue(6));
            assertEquals(new FloatWritable(4.5f), row.getFieldValue(7));
            assertEquals(new IntWritable(r * 10), row.getFieldValue(8));
            assertEquals(longList, row.getFieldValue(9));
            assertEquals(map, row.getFieldValue(10));
            if (r < 10) {
                union.set((byte) 1, new Text("abcde"));
            } else {
                union.set((byte) 0, new OrcTimestamp("2011-12-25 12:34:56"));
            }
            assertEquals("row " + r, struct, row.getFieldValue(11));
            assertEquals("row " + r, new OrcTimestamp("1996-12-11 15:00:00"), row.getFieldValue(12));
        }
    }
    assertEquals(false, reader.next(nada, row));
}

From source file:org.apache.phoenix.hive.objectinspector.PhoenixFloatObjectInspector.java

License:Apache License

@Override
public FloatWritable getPrimitiveWritableObject(Object o) {
    return new FloatWritable(get(o));
}

From source file:org.apache.phoenix.hive.util.HiveTypeUtil.java

License:Apache License

/**
 * This method returns the most appropriate Writable associated with the incoming sql type name.
 * @param hiveType,Object//from   w  ww  .  j ava  2s  .  c  om
 * @return Wrtiable
 */
// TODO awkward logic revisit
public static Writable SQLType2Writable(String hiveType, Object o) throws SerDeException {
    String lctype = hiveType.toLowerCase();
    if ("string".equals(lctype))
        return new Text(o.toString());
    if ("varchar".equals(lctype))
        return new HiveVarcharWritable(new HiveVarchar(o.toString(), o.toString().length()));
    if ("char".equals(lctype))
        return new HiveCharWritable(new HiveChar(o.toString(), o.toString().length()));
    if ("float".equals(lctype))
        return new FloatWritable(((Float) o).floatValue());
    if ("double".equals(lctype))
        return new DoubleWritable(((Double) o).doubleValue());
    if ("boolean".equals(lctype))
        return new BooleanWritable(((Boolean) o).booleanValue());
    if ("tinyint".equals(lctype))
        return new ShortWritable(((Integer) o).shortValue());
    if ("smallint".equals(lctype))
        return new ShortWritable(((Integer) o).shortValue());
    if ("int".equals(lctype))
        return new IntWritable(((Integer) o).intValue());
    if ("bigint".equals(lctype))
        return new LongWritable(((Long) o).longValue());
    if ("timestamp".equals(lctype))
        return new TimestampWritable((Timestamp) o);
    if ("binary".equals(lctype))
        return new Text(o.toString());
    if ("date".equals(lctype))
        return new DateWritable(new Date((long) o));
    if ("array".equals(lctype))
        ;
    throw new SerDeException("Phoenix unrecognized column type: " + hiveType);
}

From source file:org.apache.pig.impl.io.NullableFloatWritable.java

License:Apache License

/**
 * @param value
 */
public NullableFloatWritable(float value) {
    mValue = new FloatWritable(value);
}

From source file:org.apache.tajo.plan.util.WritableTypeConverter.java

License:Apache License

public static Writable convertDatum2Writable(Datum value) {
    switch (value.kind()) {
    case INT1:// w  ww. ja v a2s  .co m
        return new ByteWritable(value.asByte());
    case INT2:
        return new ShortWritable(value.asInt2());
    case INT4:
        return new IntWritable(value.asInt4());
    case INT8:
        return new LongWritable(value.asInt8());

    case FLOAT4:
        return new FloatWritable(value.asFloat4());
    case FLOAT8:
        return new DoubleWritable(value.asFloat8());

    // NOTE: value should be DateDatum
    case DATE:
        return new DateWritable(value.asInt4() - DateTimeConstants.UNIX_EPOCH_JDATE);

    // NOTE: value should be TimestampDatum
    case TIMESTAMP:
        TimestampWritable result = new TimestampWritable();
        result.setTime(DateTimeUtil.julianTimeToJavaTime(value.asInt8()));
        return result;

    case CHAR: {
        String str = value.asChars();
        return new HiveCharWritable(new HiveChar(str, str.length()));
    }
    case TEXT:
        return new Text(value.asChars());
    case VARBINARY:
        return new BytesWritable(value.asByteArray());

    case NULL_TYPE:
        return null;
    }

    throw new TajoRuntimeException(new NotImplementedException(TypeStringEncoder.encode(value.type())));
}