Example usage for org.apache.hadoop.mapreduce TaskInputOutputContext getConfiguration

List of usage examples for org.apache.hadoop.mapreduce TaskInputOutputContext getConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskInputOutputContext getConfiguration.

Prototype

public Configuration getConfiguration();

Source Link

Document

Return the configuration for the job.

Usage

From source file:org.kiji.mapreduce.context.HFileWriterContext.java

License:Apache License

/**
 * Constructs a new context that can write cells to an HFile that can be loaded into an HBase
 * table./*from  w  ww . j  a  v a 2  s . co  m*/
 *
 * @param hadoopContext is the Hadoop {@link TaskInputOutputContext} that will be used to perform
 *     the writes.
 * @throws IOException on I/O error.
 */
public HFileWriterContext(TaskInputOutputContext<?, ?, ?, ?> hadoopContext) throws IOException {
    super(hadoopContext);
    final Configuration conf = new Configuration(hadoopContext.getConfiguration());
    final KijiURI outputURI = KijiURI.newBuilder(conf.get(KijiConfKeys.KIJI_OUTPUT_TABLE_URI)).build();
    mKiji = Kiji.Factory.open(outputURI, conf);
    mTable = mKiji.openTable(outputURI.getTable());
    mColumnNameTranslator = new ColumnNameTranslator(mTable.getLayout());
    mEntityIdFactory = mTable.getEntityIdFactory();
}

From source file:org.kiji.mapreduce.context.InternalKijiContext.java

License:Apache License

/**
 * Constructs a new implementation of {@link KijiContext}.
 *
 * @param context is the Hadoop {@link TaskInputOutputContext} that will back the new
 *    {@link KijiContext}//w w w.ja  v  a 2 s  .  c o m
 * @throws IOException on I/O error.
 */
InternalKijiContext(TaskInputOutputContext context) throws IOException {
    mHadoopContext = context;
    mKeyValueStoreFactory = KeyValueStoreReaderFactory.create(context.getConfiguration());
}

From source file:org.kiji.mapreduce.impl.DirectKijiTableWriterContext.java

License:Apache License

/**
 * Constructs a new context that can write cells directly to a Kiji table.
 *
 * @param hadoopContext is the Hadoop {@link TaskInputOutputContext} that will be used to perform
 *     the writes.//w ww. j a va2 s. c o  m
 * @throws IOException on I/O error.
 */
public DirectKijiTableWriterContext(TaskInputOutputContext<?, ?, ?, ?> hadoopContext) throws IOException {
    super(hadoopContext);
    final Configuration conf = new Configuration(hadoopContext.getConfiguration());
    final KijiURI outputURI = KijiURI.newBuilder(conf.get(KijiConfKeys.KIJI_OUTPUT_TABLE_URI)).build();
    mKiji = Kiji.Factory.open(outputURI, conf);
    mTable = mKiji.openTable(outputURI.getTable());
    mPutter = mTable.getWriterFactory().openBufferedWriter();
    mEntityIdFactory = EntityIdFactory.getFactory(mTable.getLayout());
}

From source file:org.kiji.mapreduce.impl.HFileWriterContext.java

License:Apache License

/**
 * Constructs a new context that can write cells to an HFile that can be loaded into an HBase
 * table.//from   w ww .  j a v a  2 s. com
 *
 * @param hadoopContext is the Hadoop {@link TaskInputOutputContext} that will be used to perform
 *     the writes.
 * @throws IOException on I/O error.
 */
public HFileWriterContext(TaskInputOutputContext<?, ?, ?, ?> hadoopContext) throws IOException {
    super(hadoopContext);
    final Configuration conf = new Configuration(hadoopContext.getConfiguration());
    final KijiURI outputURI = KijiURI.newBuilder(conf.get(KijiConfKeys.KIJI_OUTPUT_TABLE_URI)).build();
    mKiji = Kiji.Factory.open(outputURI, conf);
    mTable = mKiji.openTable(outputURI.getTable());
    mColumnNameTranslator = new ColumnNameTranslator(mTable.getLayout());
    mEntityIdFactory = EntityIdFactory.getFactory(mTable.getLayout());
}

From source file:org.kiji.mapreduce.impl.InternalKijiContext.java

License:Apache License

/**
 * Constructs a new implementation of {@link KijiContext}.
 *
 * @param context is the Hadoop {@link TaskInputOutputContext} that will back the new
 *    {@link KijiContext}//from  ww w  .  ja v a  2 s . c  o  m
 * @throws IOException on I/O error.
 */
protected InternalKijiContext(TaskInputOutputContext context) throws IOException {
    mHadoopContext = context;
    mKeyValueStoreFactory = KeyValueStoreReaderFactory.create(context.getConfiguration());
}

From source file:org.kiji.mapreduce.impl.KijiTableContextFactory.java

License:Apache License

/**
 * Instantiates the configured KijiTableContext.
 *
 * @param taskContext Hadoop task context.
 * @return the configured KijiTableContext.
 * @throws IOException on I/O error.//from ww  w  .  ja v a 2 s  . com
 */
public static KijiTableContext create(TaskInputOutputContext taskContext) throws IOException {
    final Configuration conf = taskContext.getConfiguration();
    final String className = conf.get(KijiConfKeys.KIJI_TABLE_CONTEXT_CLASS);
    if (className == null) {
        throw new IOException(String.format("KijiTableContext class missing from configuration (key '%s').",
                KijiConfKeys.KIJI_TABLE_CONTEXT_CLASS));
    }

    Throwable throwable = null;
    try {
        final Class<?> genericClass = Class.forName(className);
        final Class<? extends KijiTableContext> klass = genericClass.asSubclass(KijiTableContext.class);
        final Constructor<? extends KijiTableContext> constructor = klass
                .getConstructor(TaskInputOutputContext.class);
        final KijiTableContext context = constructor.newInstance(taskContext);
        return context;
    } catch (ClassCastException cce) {
        throwable = cce;
    } catch (ClassNotFoundException cnfe) {
        throwable = cnfe;
    } catch (NoSuchMethodException nsme) {
        throwable = nsme;
    } catch (InvocationTargetException ite) {
        throwable = ite;
    } catch (IllegalAccessException iae) {
        throwable = iae;
    } catch (InstantiationException ie) {
        throwable = ie;
    }
    throw new IOException(String.format("Error instantiating KijiTableWriter class '%s': %s.", className,
            throwable.getMessage()), throwable);
}

From source file:org.kiji.mapreduce.util.SerializeLoggerAspect.java

License:Apache License

/**
 * Logic to serialize collected profiling content to a file on HDFS. The files are stored
 * in the current working directory for this context, in a folder specified by STATS_DIR. The per
 * task file is named by the task attempt id.
 * We obtain the profiling stats collected by the LogTimerAspect in KijiSchema. The format of the
 * file is as follows: Job Name, Job ID, Task Attempt, Function Signature,
 * Aggregate Time (nanoseconds), Number of Invocations, Time per call (nanoseconds)'\n'
 *
 * @param context The {@link TaskInputOutputContext} for this job.
 * @throws IOException If the writes to HDFS fail.
 *///  w  w w  .  j  a  v  a  2s . c om
private void serializeToFile(TaskInputOutputContext context) throws IOException {
    Path parentPath = new Path(context.getWorkingDirectory(), STATS_DIR);
    FileSystem fs = FileSystem.get(context.getConfiguration());
    fs.mkdirs(parentPath);
    Path path = new Path(parentPath, context.getTaskAttemptID().toString());
    OutputStreamWriter out = new OutputStreamWriter(fs.create(path, true), "UTF-8");
    try {
        out.write("Job Name, Job ID, Task Attempt, Function Signature, Aggregate Time (nanoseconds), "
                + "Number of Invocations, Time per call (nanoseconds)\n");

        ConcurrentHashMap<String, LoggingInfo> signatureTimeMap = mLogTimerAspect.getSignatureTimeMap();
        for (Map.Entry<String, LoggingInfo> entrySet : signatureTimeMap.entrySet()) {
            writeProfileInformation(out, context, entrySet.getKey(), entrySet.getValue());
        }

        signatureTimeMap = mMRLogTimerAspect.getSignatureTimeMap();
        for (Map.Entry<String, LoggingInfo> entrySet : signatureTimeMap.entrySet()) {
            writeProfileInformation(out, context, entrySet.getKey(), entrySet.getValue());
        }
    } finally {
        out.close();
    }
}

From source file:simsql.runtime.JoinReducerInnards.java

License:Apache License

public void setup(TaskInputOutputContext<?, ?, WritableKey, WritableValue> context, int whichMapper)
        throws IOException, InterruptedException {

    // get the join type
    String myJoinType = context.getConfiguration().get("simsql.joinType", "natural").toUpperCase();
    joinType = JoinOp.JoinType.valueOf(myJoinType);

    // if necessary, we set up the iterator over the sorted file here... see if we have a sorted file 
    String whichFile = context.getConfiguration().get("simsql.sortedFileName", "");
    if (!whichFile.equals("") || whichMapper >= 0) {

        // this will let the join know that we have a merge on one side
        myTable = new RecordHashTable(0.60);

        // now we need to figure out the suffix of the file that we are going to process
        // if we are on the mapper, then we will get the name of the file that is being mapped...
        // by extracting the suffix of that file, we can figure out the suffix of the file we need
        // to merge with
        int whichOne;
        if (whichMapper >= 0) {
            whichOne = whichMapper;//from   ww w  .j  av  a  2s.c  o m
        } else {
            whichOne = context.getConfiguration().getInt("mapred.task.partition", -1);
        }

        // get the other info about the file to proess
        short whichTypeCode = (short) context.getConfiguration().getInt("simsql.sortedFileTypeCode", -1);
        int howManyAtts = context.getConfiguration().getInt("simsql.sortedFileNumAtts", -1);

        // now we need ro find the corresponding input flie
        FileSystem fs = FileSystem.get(context.getConfiguration());
        Path path = new Path(whichFile);
        FileStatus[] fstatus = fs.listStatus(path, new TableFileFilter());

        // this is the file suffix we are trying to find
        String names = "";
        String suffix = RecordOutputFormat.getFileNumber(whichOne);
        for (FileStatus f : fstatus) {

            // first we see if this thing ends with the correct suffix
            names += f.getPath().getName() + ";";
            if (f.getPath().getName().contains(suffix)) {

                // we found the file to open, so go ahead and do it
                IteratorFactory myFactory = new IteratorFactory();
                smallFile = myFactory.getIterableForFile(f.getPath().toString(), howManyAtts, whichTypeCode)
                        .iterator();
                return;
            }
        }

        smallFile = new EmptyRecordIterator();
    }

}