Example usage for org.apache.hadoop.mapreduce TaskAttemptContext getConfiguration

List of usage examples for org.apache.hadoop.mapreduce TaskAttemptContext getConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskAttemptContext getConfiguration.

Prototype

public Configuration getConfiguration();

Source Link

Document

Return the configuration for the job.

Usage

From source file:co.nubetech.hiho.mapreduce.lib.output.AppendSequenceFileOutputFormat.java

License:Apache License

@Override
public Path getDefaultWorkFile(TaskAttemptContext context, String extension) throws IOException {
    Path p1;/*from w  w w  .  j ava 2s. c  om*/
    isAppend = context.getConfiguration().get(HIHOConf.IS_APPEND, "false");
    if (isAppend.equalsIgnoreCase("false")) {
        p1 = super.getDefaultWorkFile(context, extension);
    } else {
        FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(context);
        Path p = committer.getWorkPath();
        fileCount = p.getFileSystem(context.getConfiguration()).getContentSummary(getOutputPath(context))
                .getFileCount();
        if (fileCount > 1)
            fileCount = fileCount - 1;
        p1 = new Path(committer.getWorkPath(), getUniqueFile(context, "part", extension));
    }
    return p1;
}

From source file:co.nubetech.hiho.mapreduce.lib.output.AppendTextOutputFormat.java

License:Apache License

@Override
public Path getDefaultWorkFile(TaskAttemptContext context, String extension) throws IOException {
    Path p1;/*from  w  w w . java 2  s  .  c  om*/
    isAppend = context.getConfiguration().get(HIHOConf.IS_APPEND);
    if (isAppend.equalsIgnoreCase("false")) {
        p1 = super.getDefaultWorkFile(context, extension);
    } else {
        FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(context);
        Path p = committer.getWorkPath();
        fileCount = p.getFileSystem(context.getConfiguration()).getContentSummary(getOutputPath(context))
                .getFileCount();
        if (fileCount > 1) {
            fileCount = fileCount - 1;
        }
        p1 = new Path(committer.getWorkPath(), getUniqueFile(context, "part", extension));
    }
    return p1;
}

From source file:co.nubetech.hiho.mapreduce.lib.output.FTPTextOutputFormat.java

License:Apache License

@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {

    Configuration conf = job.getConfiguration();

    String ip = conf.get(HIHOConf.FTP_ADDRESS);
    String portno = conf.get(HIHOConf.FTP_PORT);
    String usr = conf.get(HIHOConf.FTP_USER);
    String pwd = conf.get(HIHOConf.FTP_PASSWORD);
    String dir = getOutputPath(job).toString();
    System.out.println("\n\ninside ftpoutputformat" + ip + " " + portno + " " + usr + " " + pwd + " " + dir);
    String keyValueSeparator = conf.get("mapred.textoutputformat.separator", "\t");
    FTPClient f = new FTPClient();
    f.connect(ip, Integer.parseInt(portno));
    f.login(usr, pwd);/*from w  w w.j av a 2 s . c  o  m*/
    f.changeWorkingDirectory(dir);
    f.setFileType(FTP.BINARY_FILE_TYPE);

    boolean isCompressed = getCompressOutput(job);
    CompressionCodec codec = null;
    String extension = "";
    if (isCompressed) {
        Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
        codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
        extension = codec.getDefaultExtension();
    }
    Path file = getDefaultWorkFile(job, extension);
    FileSystem fs = file.getFileSystem(conf);
    String filename = file.getName();
    if (!isCompressed) {
        // FSDataOutputStream fileOut = fs.create(file, false);
        OutputStream os = f.appendFileStream(filename);
        DataOutputStream fileOut = new DataOutputStream(os);
        return new FTPLineRecordWriter<K, V>(fileOut, new String(keyValueSeparator), f);

    } else {
        // FSDataOutputStream fileOut = fs.create(file, false);
        OutputStream os = f.appendFileStream(filename);
        DataOutputStream fileOut = new DataOutputStream(os);
        return new FTPLineRecordWriter<K, V>(new DataOutputStream(codec.createOutputStream(fileOut)),
                keyValueSeparator, f);
    }
}

From source file:co.nubetech.hiho.mapreduce.lib.output.NoKeyOnlyValueOutputFormat.java

License:Apache License

public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context) throws IOException {
    boolean isCompressed = getCompressOutput(context);
    Configuration conf = context.getConfiguration();
    String ext = "";
    CompressionCodec codec = null;// w ww .  j a v a 2  s. c  o m

    if (isCompressed) {
        // create the named codec
        Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(context, GzipCodec.class);
        codec = ReflectionUtils.newInstance(codecClass, conf);

        ext = codec.getDefaultExtension();
    }

    Path file = getDefaultWorkFile(context, ext);
    FileSystem fs = file.getFileSystem(conf);
    FSDataOutputStream fileOut = fs.create(file, false);
    DataOutputStream ostream = fileOut;

    if (isCompressed) {
        ostream = new DataOutputStream(codec.createOutputStream(fileOut));
    }

    return new NoKeyRecordWriter<K, V>(ostream);
}

From source file:com.abel.hwfs.custom.output.SetSizeDBOutputFormat.java

License:Apache License

/** {@inheritDoc} */
@Override/*from  ww  w .j a  va2 s  . c  o m*/
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context) throws IOException {
    MyDBConfiguration dbConf = new MyDBConfiguration(context.getConfiguration());
    String tableName = dbConf.getOutputTableName();
    String[] fieldNames = dbConf.getOutputFieldNames();
    int maxBufferSize = dbConf.getOutputBufferLimit();

    if (fieldNames == null) {
        fieldNames = new String[dbConf.getOutputFieldCount()];
    }

    try {
        Connection connection = dbConf.getConnection();
        PreparedStatement statement = null;

        statement = connection.prepareStatement(constructQuery(tableName, fieldNames));
        return new DBRecordWriter(connection, statement, maxBufferSize);
    } catch (Exception ex) {
        throw new IOException(ex.getMessage());
    }
}

From source file:com.aerospike.hadoop.mapreduce.AerospikeOutputFormat.java

License:Apache License

@SuppressWarnings("unchecked")
@Override//from   w  ww.  j  av a 2 s  . co m
public RecordWriter<KK, VV> getRecordWriter(TaskAttemptContext context) {
    Configuration conf = context.getConfiguration();
    return (RecordWriter<KK, VV>) getAerospikeRecordWriter(conf, context);
}

From source file:com.alectenharmsel.research.WholeBlockRecordReader.java

License:Apache License

public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    fileSplit = (FileSplit) split;//from   w  ww .  j  av  a  2 s  .  co  m
    conf = context.getConfiguration();
    //blockSize = conf.getInt("dfs.block.size", 134217728);
    //128MB seems too big
    blockSize = 1024 * 1024; //1MB Blocks
    //blockSize = 1024; //Testing ONLY
    fileLength = (int) fileSplit.getLength();

    currKey = createKey();
    currValue = createValue();
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreOutputFormat.java

License:Apache License

@Override
public RecordWriter<Writable, BatchWriteWritable> getRecordWriter(TaskAttemptContext context)
        throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    String outputTable = conf.get(OUTPUT_TABLE);
    Preconditions.checkNotNull(outputTable, "Output table must be set.");
    SyncClientInterface ots = TableStore.newOtsClient(conf);
    int maxBatchSize = conf.getInt(MAX_UPDATE_BATCH_SIZE, 0);
    if (maxBatchSize == 0) {
        return new TableStoreRecordWriter(ots, outputTable);
    } else {//from w w  w  . j  a v a2  s .c o m
        return new TableStoreRecordWriter(ots, outputTable, maxBatchSize);
    }
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreRecordReader.java

License:Apache License

@Override
public void initialize(InputSplit split, TaskAttemptContext ctx) {
    initialize(split, ctx.getConfiguration());
}

From source file:com.ambiata.ivory.operation.hadoop.DelegatingRecordReader.java

License:Apache License

/**
 * Constructs the DelegatingRecordReader.
 *
 * @param split TaggegInputSplit object//  w ww. ja v  a 2s.co m
 * @param context TaskAttemptContext object
 *
 * @throws IOException
 * @throws InterruptedException
 */
@SuppressWarnings("unchecked")
public DelegatingRecordReader(InputSplit split, TaskAttemptContext context)
        throws IOException, InterruptedException {
    // Find the InputFormat and then the RecordReader from the
    // TaggedInputSplit.
    TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split;
    InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils
            .newInstance(taggedInputSplit.getInputFormatClass(), context.getConfiguration());
    originalRR = inputFormat.createRecordReader(taggedInputSplit.getInputSplit(), context);
}