List of usage examples for org.apache.hadoop.mapreduce JobContext getConfiguration
public Configuration getConfiguration();
From source file:com.linkedin.json.JsonSequenceFileInputFormat.java
License:Apache License
@Override protected List<FileStatus> listStatus(JobContext job) throws IOException { String dirs = job.getConfiguration().get("mapred.input.dir", ""); String[] list = StringUtils.split(dirs); List<FileStatus> status = new ArrayList<FileStatus>(); for (int i = 0; i < list.length; i++) { status.addAll(getAllSubFileStatus(job, new Path(list[i]))); }//from ww w. jav a2 s .c o m return status; }
From source file:com.linkedin.json.JsonSequenceFileInputFormat.java
License:Apache License
private List<FileStatus> getAllSubFileStatus(JobContext jobContext, Path filterMemberPath) throws IOException { List<FileStatus> list = new ArrayList<FileStatus>(); FileSystem fs = filterMemberPath.getFileSystem(jobContext.getConfiguration()); FileStatus[] subFiles = fs.listStatus(filterMemberPath); if (null != subFiles) { if (fs.getFileStatus(filterMemberPath).isDir()) { for (FileStatus subFile : subFiles) { if (!subFile.getPath().getName().startsWith("_")) { list.addAll(getAllSubFileStatus(jobContext, subFile.getPath())); }// w w w . j a v a 2s.c om } } else { if (subFiles.length > 0 && !subFiles[0].getPath().getName().startsWith("_")) { list.add(subFiles[0]); } } } return list; }
From source file:com.linkedin.pinot.hadoop.io.JsonPinotOutputFormat.java
License:Apache License
public static void setJsonReaderClass(JobContext context, Class<?> clazz) { context.getConfiguration().set(JSON_READER_CLASS, clazz.getName()); }
From source file:com.linkedin.pinot.hadoop.io.PinotOutputFormat.java
License:Apache License
public static String getTempSegmentDir(JobContext job) { return job.getConfiguration().get(PinotOutputFormat.TEMP_SEGMENT_DIR, ".data_" + getTableName(job)); }
From source file:com.linkedin.pinot.hadoop.io.PinotOutputFormat.java
License:Apache License
public static String getTableName(JobContext job) { String table = job.getConfiguration().get(PinotOutputFormat.TABLE_NAME); if (table == null) { throw new RuntimeException("pinot table name not set."); }// w w w. j a va 2 s. co m return table; }
From source file:com.linkedin.pinot.hadoop.io.PinotOutputFormat.java
License:Apache License
public static String getSegmentName(JobContext context) { String segment = context.getConfiguration().get(PinotOutputFormat.SEGMENT_NAME); if (segment == null) { throw new RuntimeException("pinot segment name not set."); }/*from w w w . j a v a 2 s . c o m*/ return segment; }
From source file:com.linkedin.pinot.hadoop.io.PinotOutputFormat.java
License:Apache License
public static String getSchema(JobContext context) { String schemaFile = context.getConfiguration().get(PinotOutputFormat.SCHEMA); if (schemaFile == null) { throw new RuntimeException("pinot schema file not set"); }/* ww w .ja v a 2 s . c o m*/ return schemaFile; }
From source file:com.linkedin.pinot.hadoop.io.PinotOutputFormat.java
License:Apache License
public static String getReaderConfig(JobContext context) { return context.getConfiguration().get(PinotOutputFormat.READER_CONFIG); }
From source file:com.linkedin.pinot.hadoop.io.PinotOutputFormat.java
License:Apache License
public static boolean getEnableStarTreeIndex(JobContext context) { return context.getConfiguration().getBoolean(PinotOutputFormat.ENABLE_STAR_TREE_INDEX, false); }
From source file:com.linkedin.pinot.hadoop.io.PinotOutputFormat.java
License:Apache License
public static String getStarTreeIndexSpec(JobContext context) { return context.getConfiguration().get(PinotOutputFormat.STAR_TREE_INDEX_SPEC); }