List of usage examples for org.apache.hadoop.io Writable interface-usage
From source file com.tuplejump.calliope.hadoop.ColumnFamilySplit.java
public class ColumnFamilySplit extends InputSplit implements Writable, org.apache.hadoop.mapred.InputSplit { private String startToken; private String endToken; private long length; private String[] dataNodes;
From source file com.twitter.distributedlog.mapreduce.LogSegmentSplit.java
/** * A input split that reads from a log segment. */ public class LogSegmentSplit extends InputSplit implements Writable { private LogSegmentMetadata logSegmentMetadata;
From source file com.twitter.elephanttwin.io.ListLongPair.java
/** * A Writable class to write a list of LongWritable values. */ public class ListLongPair implements Writable { private List<LongPairWritable> list;
From source file com.twitter.elephanttwin.io.ListLongWritable.java
/** * A Writable class to write a list of LongWritable values. */ public class ListLongWritable implements Writable { private List<LongWritable> list;
From source file com.twitter.elephanttwin.lucene.indexing.HadoopSplitDocument.java
/**
* <p>A "document" used to represent a Hadoop Split, which can be
* indexed by an arbitrary number of fields. The idea is that in
* cases when some "field" in a file has extremely high cardinality --
* say, IP addresses in web logs of a busy website -- you can "index"
* the files based on values of these fields in a given Hadoop file split,
From source file com.twitter.elephanttwin.retrieval.CountTimestampSamplesWritable.java
/** * @author Alex Levenson */ public class CountTimestampSamplesWritable implements Writable { private CountTimestampWritable countTimestampWritable; private List<Long> samples;
From source file com.twitter.elephanttwin.retrieval.CountTimestampWritable.java
/** * A writable that holds a count and a timestamp * * @author Alex Levenson */ public class CountTimestampWritable implements Writable {
From source file com.twitter.hraven.etl.JobFile.java
/** * Class that handles Job files, whether confs files or history files. * */ public class JobFile implements Writable {
From source file com.twitter.hraven.QualifiedJobId.java
/**
* The job ID should be relatively unique, unless two clusters start at the same
* time. However, given a jobId it is not immediately clear which cluster a job
* ran on (unless the cluster has not been restarted and the prefix is still the
* current one). This class represents the fully qualified job identifier.
*
From source file com.vertica.hivestoragehandler.DbRecordWritable.java
public class DbRecordWritable implements Writable, DBWritable { private Object[] columnValues; // primitive java Object or java.util.List private int[] columnTypes; public DbRecordWritable() {