Java tutorial
/* * Copyright 2014 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package co.cask.cdap.internal.app.runtime.batch.dataset; import co.cask.cdap.api.data.batch.BatchWritable; import co.cask.cdap.common.logging.LoggingContextAccessor; import co.cask.cdap.internal.app.runtime.batch.BasicMapReduceContext; import co.cask.cdap.internal.app.runtime.batch.MapReduceContextProvider; import com.google.common.base.Throwables; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; final class DataSetRecordWriter<KEY, VALUE> extends RecordWriter<KEY, VALUE> { private static final Logger LOG = LoggerFactory.getLogger(DataSetRecordWriter.class); private final BatchWritable<KEY, VALUE> batchWritable; private final BasicMapReduceContext mrContext; private final MapReduceContextProvider mrContextProvider; public DataSetRecordWriter(final BatchWritable<KEY, VALUE> batchWritable, final MapReduceContextProvider mrContextProvider) { this.batchWritable = batchWritable; this.mrContextProvider = mrContextProvider; this.mrContext = mrContextProvider.get(); // hack: making sure logging context is set on the thread that accesses the runtime context LoggingContextAccessor.setLoggingContext(mrContext.getLoggingContext()); } @Override public void write(final KEY key, final VALUE value) throws IOException { batchWritable.write(key, value); } @Override public void close(final TaskAttemptContext context) throws IOException, InterruptedException { // transaction is not finished, but we want all operations to be dispatched (some could be buffered in memory by tx // agent) try { mrContext.flushOperations(); } catch (Exception e) { LOG.error("Failed to flush operations at the end of reducer of " + mrContext.toString()); throw Throwables.propagate(e); } finally { try { mrContext.close(); } finally { mrContextProvider.stop(); } } } }