List of usage examples for org.apache.hadoop.security UserGroupInformation doAs
@InterfaceAudience.Public @InterfaceStability.Evolving public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException
From source file:org.apache.nifi.processors.hadoop.PutHDFS.java
License:Apache License
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final FlowFile flowFile = session.get(); if (flowFile == null) { return;//from w w w . j ava 2 s . c o m } final FileSystem hdfs = getFileSystem(); final Configuration configuration = getConfiguration(); final UserGroupInformation ugi = getUserGroupInformation(); if (configuration == null || hdfs == null || ugi == null) { getLogger().error("HDFS not configured properly"); session.transfer(flowFile, REL_FAILURE); context.yield(); return; } ugi.doAs(new PrivilegedAction<Object>() { @Override public Object run() { Path tempDotCopyFile = null; FlowFile putFlowFile = flowFile; try { final String dirValue = context.getProperty(DIRECTORY).evaluateAttributeExpressions(putFlowFile) .getValue(); final Path configuredRootDirPath = new Path(dirValue); final String conflictResponse = context.getProperty(CONFLICT_RESOLUTION).getValue(); final Double blockSizeProp = context.getProperty(BLOCK_SIZE).asDataSize(DataUnit.B); final long blockSize = blockSizeProp != null ? blockSizeProp.longValue() : hdfs.getDefaultBlockSize(configuredRootDirPath); final Double bufferSizeProp = context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B); final int bufferSize = bufferSizeProp != null ? bufferSizeProp.intValue() : configuration.getInt(BUFFER_SIZE_KEY, BUFFER_SIZE_DEFAULT); final Integer replicationProp = context.getProperty(REPLICATION_FACTOR).asInteger(); final short replication = replicationProp != null ? replicationProp.shortValue() : hdfs.getDefaultReplication(configuredRootDirPath); final CompressionCodec codec = getCompressionCodec(context, configuration); final String filename = codec != null ? putFlowFile.getAttribute(CoreAttributes.FILENAME.key()) + codec.getDefaultExtension() : putFlowFile.getAttribute(CoreAttributes.FILENAME.key()); final Path tempCopyFile = new Path(configuredRootDirPath, "." + filename); final Path copyFile = new Path(configuredRootDirPath, filename); // Create destination directory if it does not exist try { if (!hdfs.getFileStatus(configuredRootDirPath).isDirectory()) { throw new IOException( configuredRootDirPath.toString() + " already exists and is not a directory"); } } catch (FileNotFoundException fe) { if (!hdfs.mkdirs(configuredRootDirPath)) { throw new IOException(configuredRootDirPath.toString() + " could not be created"); } changeOwner(context, hdfs, configuredRootDirPath, flowFile); } final boolean destinationExists = hdfs.exists(copyFile); // If destination file already exists, resolve that based on processor configuration if (destinationExists) { switch (conflictResponse) { case REPLACE_RESOLUTION: if (hdfs.delete(copyFile, false)) { getLogger().info("deleted {} in order to replace with the contents of {}", new Object[] { copyFile, putFlowFile }); } break; case IGNORE_RESOLUTION: session.transfer(putFlowFile, REL_SUCCESS); getLogger().info( "transferring {} to success because file with same name already exists", new Object[] { putFlowFile }); return null; case FAIL_RESOLUTION: session.transfer(session.penalize(putFlowFile), REL_FAILURE); getLogger().warn( "penalizing {} and routing to failure because file with same name already exists", new Object[] { putFlowFile }); return null; default: break; } } // Write FlowFile to temp file on HDFS final StopWatch stopWatch = new StopWatch(true); session.read(putFlowFile, new InputStreamCallback() { @Override public void process(InputStream in) throws IOException { OutputStream fos = null; Path createdFile = null; try { if (conflictResponse.equals(APPEND_RESOLUTION_AV.getValue()) && destinationExists) { fos = hdfs.append(copyFile, bufferSize); } else { fos = hdfs.create(tempCopyFile, true, bufferSize, replication, blockSize); } if (codec != null) { fos = codec.createOutputStream(fos); } createdFile = tempCopyFile; BufferedInputStream bis = new BufferedInputStream(in); StreamUtils.copy(bis, fos); bis = null; fos.flush(); } finally { try { if (fos != null) { fos.close(); } } catch (RemoteException re) { // when talking to remote HDFS clusters, we don't notice problems until fos.close() if (createdFile != null) { try { hdfs.delete(createdFile, false); } catch (Throwable ignore) { } } throw re; } catch (Throwable ignore) { } fos = null; } } }); stopWatch.stop(); final String dataRate = stopWatch.calculateDataRate(putFlowFile.getSize()); final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS); tempDotCopyFile = tempCopyFile; if (!conflictResponse.equals(APPEND_RESOLUTION_AV.getValue()) || (conflictResponse.equals(APPEND_RESOLUTION_AV.getValue()) && !destinationExists)) { boolean renamed = false; for (int i = 0; i < 10; i++) { // try to rename multiple times. if (hdfs.rename(tempCopyFile, copyFile)) { renamed = true; break;// rename was successful } Thread.sleep(200L);// try waiting to let whatever might cause rename failure to resolve } if (!renamed) { hdfs.delete(tempCopyFile, false); throw new ProcessException("Copied file to HDFS but could not rename dot file " + tempCopyFile + " to its final filename"); } changeOwner(context, hdfs, copyFile, flowFile); } getLogger().info("copied {} to HDFS at {} in {} milliseconds at a rate of {}", new Object[] { putFlowFile, copyFile, millis, dataRate }); final String newFilename = copyFile.getName(); final String hdfsPath = copyFile.getParent().toString(); putFlowFile = session.putAttribute(putFlowFile, CoreAttributes.FILENAME.key(), newFilename); putFlowFile = session.putAttribute(putFlowFile, ABSOLUTE_HDFS_PATH_ATTRIBUTE, hdfsPath); final Path qualifiedPath = copyFile.makeQualified(hdfs.getUri(), hdfs.getWorkingDirectory()); session.getProvenanceReporter().send(putFlowFile, qualifiedPath.toString()); session.transfer(putFlowFile, REL_SUCCESS); } catch (final Throwable t) { if (tempDotCopyFile != null) { try { hdfs.delete(tempDotCopyFile, false); } catch (Exception e) { getLogger().error("Unable to remove temporary file {} due to {}", new Object[] { tempDotCopyFile, e }); } } getLogger().error("Failed to write to HDFS due to {}", new Object[] { t }); session.transfer(session.penalize(putFlowFile), REL_FAILURE); context.yield(); } return null; } }); }
From source file:org.apache.nifi.util.hive.HiveWriter.java
License:Apache License
protected RecordWriter getRecordWriter(HiveEndPoint endPoint, UserGroupInformation ugi, HiveConf hiveConf) throws StreamingException, IOException, InterruptedException { if (ugi == null) { return new StrictJsonWriter(endPoint, hiveConf); } else {/*from w ww . java2 s . c o m*/ try { return ugi.doAs((PrivilegedExceptionAction<StrictJsonWriter>) () -> new StrictJsonWriter(endPoint, hiveConf)); } catch (UndeclaredThrowableException e) { Throwable cause = e.getCause(); if (cause instanceof StreamingException) { throw (StreamingException) cause; } else { throw e; } } } }
From source file:org.apache.oozie.action.hadoop.FsActionExecutor.java
License:Apache License
/** * Delete path//from w ww . jav a 2 s . c o m * * @param context * @param fsConf * @param nameNodePath * @param path * @throws ActionExecutorException */ public void delete(Context context, XConfiguration fsConf, Path nameNodePath, Path path, boolean skipTrash) throws ActionExecutorException { URI uri = path.toUri(); URIHandler handler; try { handler = Services.get().get(URIHandlerService.class).getURIHandler(uri); if (handler instanceof FSURIHandler) { // Use legacy code to handle hdfs partition deletion path = resolveToFullPath(nameNodePath, path, true); final FileSystem fs = getFileSystemFor(path, context, fsConf); Path[] pathArr = FileUtil.stat2Paths(fs.globStatus(path)); if (pathArr != null && pathArr.length > 0) { checkGlobMax(pathArr); for (final Path p : pathArr) { if (fs.exists(p)) { if (!skipTrash) { // Moving directory/file to trash of user. UserGroupInformationService ugiService = Services.get() .get(UserGroupInformationService.class); UserGroupInformation ugi = ugiService .getProxyUser(fs.getConf().get(OozieClient.USER_NAME)); ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { @Override public FileSystem run() throws Exception { Trash trash = new Trash(fs.getConf()); if (!trash.moveToTrash(p)) { throw new ActionExecutorException( ActionExecutorException.ErrorType.ERROR, "FS005", "Could not move path [{0}] to trash on delete", p); } return null; } }); } else if (!fs.delete(p, true)) { throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "FS005", "delete, path [{0}] could not delete path", p); } } } } } else { handler.delete(uri, handler.getContext(uri, fsConf, context.getWorkflow().getUser(), false)); } } catch (Exception ex) { throw convertException(ex); } }
From source file:org.apache.oozie.action.hadoop.HDFSCredentials.java
License:Apache License
private void obtainTokensForNamenodes(final Credentials credentials, final Configuration config, final UserGroupInformation ugi, final Path[] paths) throws IOException, InterruptedException { LOG.info(String.format("\"%s\" is present in workflow configuration. Obtaining tokens for NameNode(s) [%s]", MRJobConfig.JOB_NAMENODES, config.get(MRJobConfig.JOB_NAMENODES))); ugi.doAs(new PrivilegedExceptionAction<Void>() { @Override/*from ww w . j av a 2 s . c om*/ public Void run() throws Exception { TokenCache.obtainTokensForNamenodes(credentials, paths, config); return null; } }); }
From source file:org.apache.oozie.action.hadoop.JHSCredentials.java
License:Apache License
/** * Create an MRClientProtocol to the JHS * Copied over from ClientCache in Hadoop. * @return the protocol that can be used to get a token with * @throws IOException/* ww w .j av a2 s . com*/ */ private MRClientProtocol instantiateHistoryProxy(final Configuration configuration, final ActionExecutor.Context context) throws IOException { final String serviceAddr = configuration.get(JHAdminConfig.MR_HISTORY_ADDRESS); if (StringUtils.isEmpty(serviceAddr)) { return null; } LOG.debug("Connecting to JHS at: " + serviceAddr); final YarnRPC rpc = YarnRPC.create(configuration); LOG.debug("Connected to JHS at: " + serviceAddr); UserGroupInformation currentUser = Services.get().get(UserGroupInformationService.class) .getProxyUser(context.getWorkflow().getUser()); return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() { @Override public MRClientProtocol run() { return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, NetUtils.createSocketAddr(serviceAddr), configuration); } }); }
From source file:org.apache.oozie.action.hadoop.KerberosDoAs.java
License:Open Source License
public Void call() throws Exception { final Callable<Void> callable = getCallable(); UserGroupInformation ugi = UserGroupInformation.createProxyUser(getUser(), UserGroupInformation.getLoginUser()); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { callable.call();/* ww w. j av a 2s. c o m*/ return null; } }); return null; }
From source file:org.apache.oozie.action.hadoop.LauncherAM.java
License:Apache License
public static void main(String[] args) throws Exception { final LocalFsOperations localFsOperations = new LocalFsOperations(); final Configuration launcherConf = readLauncherConfiguration(localFsOperations); UserGroupInformation.setConfiguration(launcherConf); // MRAppMaster adds this call as well, but it's included only in Hadoop 2.9+ // SecurityUtil.setConfiguration(launcherConf); UserGroupInformation ugi = getUserGroupInformation(launcherConf); printTokens("Executing Oozie Launcher with tokens:", ugi.getTokens()); // Executing code inside a doAs with an ugi equipped with correct tokens. ugi.doAs(new PrivilegedExceptionAction<Object>() { @Override/*from w w w . j a v a 2 s . com*/ public Object run() throws Exception { LauncherAM launcher = new LauncherAM(new AMRMClientAsyncFactory(), new AMRMCallBackHandler(), new HdfsOperations(new SequenceFileWriterFactory()), new LocalFsOperations(), new PrepareActionsHandler(new LauncherURIHandlerFactory(null)), new LauncherAMCallbackNotifierFactory(), new LauncherSecurityManager(), sysenv.getenv(ApplicationConstants.Environment.CONTAINER_ID.name()), launcherConf); launcher.run(); return null; } }); }
From source file:org.apache.oozie.action.hadoop.LauncherAM.java
License:Apache License
public void run() throws Exception { final ErrorHolder errorHolder = new ErrorHolder(); OozieActionResult actionResult = OozieActionResult.FAILED; boolean backgroundAction = false; try {/* ww w.j a v a2s . c o m*/ actionDir = new Path(launcherConf.get(OOZIE_ACTION_DIR_PATH)); registerWithRM(amrmCallBackHandler); // Run user code without the AM_RM_TOKEN so users can't request containers UserGroupInformation ugi = getUserGroupInformation(launcherConf, AMRMTokenIdentifier.KIND_NAME); printTokens("Executing Action Main with tokens:", ugi.getTokens()); ugi.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { executePrepare(errorHolder); printDebugInfo(); setupMainConfiguration(); runActionMain(errorHolder); return null; } }); if (!errorHolder.isPopulated()) { handleActionData(); if (actionData.get(ACTION_DATA_OUTPUT_PROPS) != null) { System.out.println(); System.out.println("Oozie Launcher, capturing output data:"); System.out.println("======================="); System.out.println(actionData.get(ACTION_DATA_OUTPUT_PROPS)); System.out.println(); System.out.println("======================="); System.out.println(); } if (actionData.get(ACTION_DATA_NEW_ID) != null) { System.out.println(); System.out.println("Oozie Launcher, propagating new Hadoop job id to Oozie"); System.out.println("======================="); System.out.println(actionData.get(ACTION_DATA_NEW_ID)); System.out.println("======================="); System.out.println(); backgroundAction = true; } } } catch (Exception e) { System.out.println("Launcher AM execution failed"); System.err.println("Launcher AM execution failed"); e.printStackTrace(System.out); e.printStackTrace(System.err); if (!errorHolder.isPopulated()) { errorHolder.setErrorCause(e); errorHolder.setErrorMessage(e.getMessage()); } throw e; } finally { try { ErrorHolder callbackErrorHolder = amrmCallBackHandler.getError(); if (!errorHolder.isPopulated()) { actionResult = backgroundAction ? OozieActionResult.RUNNING : OozieActionResult.SUCCEEDED; } if (errorHolder.isPopulated()) { updateActionDataWithFailure(errorHolder, actionData); } else if (callbackErrorHolder != null) { // async error from the callback actionResult = OozieActionResult.FAILED; updateActionDataWithFailure(callbackErrorHolder, actionData); } actionData.put(ACTION_DATA_FINAL_STATUS, actionResult.toString()); hdfsOperations.uploadActionDataToHDFS(launcherConf, actionDir, actionData); } finally { try { unregisterWithRM(actionResult, errorHolder.getErrorMessage()); } finally { LauncherAMCallbackNotifier cn = callbackNotifierFactory.createCallbackNotifier(launcherConf); cn.notifyURL(actionResult); } } } }
From source file:org.apache.oozie.action.hadoop.LauncherHelper.java
License:Apache License
/** * Utility function to load the contents of action data sequence file into * memory object/* w ww .j a v a 2 s . c o m*/ * * @param fs Action Filesystem * @param actionDir Path * @param conf Configuration * @return Map action data * @throws IOException if an IO error occurred * @throws InterruptedException if UGI action is interrupted */ public static Map<String, String> getActionData(final FileSystem fs, final Path actionDir, final Configuration conf) throws IOException, InterruptedException { UserGroupInformationService ugiService = Services.get().get(UserGroupInformationService.class); UserGroupInformation ugi = ugiService.getProxyUser(conf.get(OozieClient.USER_NAME)); return ugi.doAs(new PrivilegedExceptionAction<Map<String, String>>() { @Override public Map<String, String> run() throws IOException { Map<String, String> ret = new HashMap<>(); Path seqFilePath = getActionDataSequenceFilePath(actionDir); if (fs.exists(seqFilePath)) { SequenceFile.Reader seqFile = new SequenceFile.Reader(fs, seqFilePath, conf); Text key = new Text(), value = new Text(); while (seqFile.next(key, value)) { ret.put(key.toString(), value.toString()); } seqFile.close(); } else { // maintain backward-compatibility. to be deprecated org.apache.hadoop.fs.FileStatus[] files = fs.listStatus(actionDir); InputStream is; BufferedReader reader; Properties props; if (files != null && files.length > 0) { for (FileStatus fileStatus : files) { Path path = fileStatus.getPath(); if (path.equals(new Path(actionDir, "externalChildIds.properties"))) { is = fs.open(path); reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); ret.put(LauncherAMUtils.ACTION_DATA_EXTERNAL_CHILD_IDS, IOUtils.getReaderAsString(reader, -1)); } else if (path.equals(new Path(actionDir, "newId.properties"))) { is = fs.open(path); reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); props = PropertiesUtils.readProperties(reader, -1); ret.put(LauncherAMUtils.ACTION_DATA_NEW_ID, props.getProperty("id")); } else if (path.equals(new Path(actionDir, LauncherAMUtils.ACTION_DATA_OUTPUT_PROPS))) { int maxOutputData = conf.getInt(LauncherAMUtils.CONF_OOZIE_ACTION_MAX_OUTPUT_DATA, 2 * 1024); is = fs.open(path); reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); ret.put(LauncherAMUtils.ACTION_DATA_OUTPUT_PROPS, PropertiesUtils .propertiesToString(PropertiesUtils.readProperties(reader, maxOutputData))); } else if (path.equals(new Path(actionDir, LauncherAMUtils.ACTION_DATA_STATS))) { int statsMaxOutputData = conf.getInt( LauncherAMUtils.CONF_OOZIE_EXTERNAL_STATS_MAX_SIZE, Integer.MAX_VALUE); is = fs.open(path); reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); ret.put(LauncherAMUtils.ACTION_DATA_STATS, PropertiesUtils.propertiesToString( PropertiesUtils.readProperties(reader, statsMaxOutputData))); } else if (path.equals(new Path(actionDir, LauncherAMUtils.ACTION_DATA_ERROR_PROPS))) { is = fs.open(path); reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); ret.put(LauncherAMUtils.ACTION_DATA_ERROR_PROPS, IOUtils.getReaderAsString(reader, -1)); } } } } return ret; } }); }
From source file:org.apache.oozie.action.hadoop.LauncherMapperHelper.java
License:Apache License
/** * Utility function to load the contents of action data sequence file into * memory object//from ww w . j av a 2s. co m * * @param fs Action Filesystem * @param actionDir Path * @param conf Configuration * @return Map action data * @throws IOException * @throws InterruptedException */ public static Map<String, String> getActionData(final FileSystem fs, final Path actionDir, final Configuration conf) throws IOException, InterruptedException { UserGroupInformationService ugiService = Services.get().get(UserGroupInformationService.class); UserGroupInformation ugi = ugiService.getProxyUser(conf.get(OozieClient.USER_NAME)); return ugi.doAs(new PrivilegedExceptionAction<Map<String, String>>() { @Override public Map<String, String> run() throws IOException { Map<String, String> ret = new HashMap<String, String>(); Path seqFilePath = getActionDataSequenceFilePath(actionDir); if (fs.exists(seqFilePath)) { SequenceFile.Reader seqFile = new SequenceFile.Reader(fs, seqFilePath, conf); Text key = new Text(), value = new Text(); while (seqFile.next(key, value)) { ret.put(key.toString(), value.toString()); } seqFile.close(); } else { // maintain backward-compatibility. to be deprecated org.apache.hadoop.fs.FileStatus[] files = fs.listStatus(actionDir); InputStream is; BufferedReader reader = null; Properties props; if (files != null && files.length > 0) { for (int x = 0; x < files.length; x++) { Path file = files[x].getPath(); if (file.equals(new Path(actionDir, "externalChildIds.properties"))) { is = fs.open(file); reader = new BufferedReader(new InputStreamReader(is)); ret.put(LauncherMapper.ACTION_DATA_EXTERNAL_CHILD_IDS, IOUtils.getReaderAsString(reader, -1)); } else if (file.equals(new Path(actionDir, "newId.properties"))) { is = fs.open(file); reader = new BufferedReader(new InputStreamReader(is)); props = PropertiesUtils.readProperties(reader, -1); ret.put(LauncherMapper.ACTION_DATA_NEW_ID, props.getProperty("id")); } else if (file.equals(new Path(actionDir, LauncherMapper.ACTION_DATA_OUTPUT_PROPS))) { int maxOutputData = conf.getInt(LauncherMapper.CONF_OOZIE_ACTION_MAX_OUTPUT_DATA, 2 * 1024); is = fs.open(file); reader = new BufferedReader(new InputStreamReader(is)); ret.put(LauncherMapper.ACTION_DATA_OUTPUT_PROPS, PropertiesUtils .propertiesToString(PropertiesUtils.readProperties(reader, maxOutputData))); } else if (file.equals(new Path(actionDir, LauncherMapper.ACTION_DATA_STATS))) { int statsMaxOutputData = conf.getInt( LauncherMapper.CONF_OOZIE_EXTERNAL_STATS_MAX_SIZE, Integer.MAX_VALUE); is = fs.open(file); reader = new BufferedReader(new InputStreamReader(is)); ret.put(LauncherMapper.ACTION_DATA_STATS, PropertiesUtils.propertiesToString( PropertiesUtils.readProperties(reader, statsMaxOutputData))); } else if (file.equals(new Path(actionDir, LauncherMapper.ACTION_DATA_ERROR_PROPS))) { is = fs.open(file); reader = new BufferedReader(new InputStreamReader(is)); ret.put(LauncherMapper.ACTION_DATA_ERROR_PROPS, IOUtils.getReaderAsString(reader, -1)); } } } } return ret; } }); }