Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:gobblin.yarn.YarnSecurityManagerTest.java

License:Apache License

@Test(dependsOnMethods = "testWriteDelegationTokenToFile")
public void testYarnContainerSecurityManager() throws IOException {
    Collection<Token<?>> tokens = this.yarnContainerSecurityManager.readDelegationTokens(this.tokenFilePath);
    assertToken(tokens);/*from  ww w. ja  v  a2  s. c  om*/
    this.yarnContainerSecurityManager.addDelegationTokens(tokens);
    assertToken(UserGroupInformation.getCurrentUser().getTokens());
}

From source file:gobblin.yarn.YarnService.java

License:Apache License

private ByteBuffer getSecurityTokens() throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    Closer closer = Closer.create();/*from  w  ww  .ja  v a 2s.  com*/
    try {
        DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
        credentials.writeTokenStorageToStream(dataOutputBuffer);

        // Remove the AM->RM token so that containers cannot access it
        Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
        while (tokenIterator.hasNext()) {
            Token<?> token = tokenIterator.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                tokenIterator.remove();
            }
        }

        return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:hydrograph.engine.utilities.HiveMetastoreTokenProvider.java

License:Apache License

public static void obtainTokenForHiveMetastore(Configuration conf) throws TException, IOException {
    conf.addResource(new Path(HiveConfigurationMapping.getHiveConf("path_to_hive_site_xml")));
    HiveConf hiveConf = new HiveConf();
    hiveConf.addResource(conf);//from   w  w  w.jav a 2 s. c om
    try {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        HiveMetaStoreClient hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);

        if (UserGroupInformation.isSecurityEnabled()) {
            String metastore_uri = conf.get("hive.metastore.uris");

            LOG.trace("Metastore URI:" + metastore_uri);

            // Check for local metastore
            if (metastore_uri != null && metastore_uri.length() > 0) {
                String principal = conf.get("hive.metastore.kerberos.principal");
                String username = ugi.getUserName();

                if (principal != null && username != null) {
                    LOG.debug("username: " + username);
                    LOG.debug("principal: " + principal);

                    String tokenStr;
                    try {
                        // Get a delegation token from the Metastore.
                        tokenStr = hiveMetaStoreClient.getDelegationToken(username, principal);
                        // LOG.debug("Token String: " + tokenStr);
                    } catch (TException e) {
                        LOG.error(e.getMessage(), e);
                        throw new RuntimeException(e);
                    }

                    // Create the token from the token string.
                    Token<DelegationTokenIdentifier> hmsToken = new Token<DelegationTokenIdentifier>();
                    hmsToken.decodeFromUrlString(tokenStr);
                    // LOG.debug("Hive Token: " + hmsToken);

                    // Add the token to the credentials.
                    ugi.addToken(new Text("hive.metastore.delegation.token"), hmsToken);
                    LOG.trace("Added hive.metastore.delegation.token to conf.");
                } else {
                    LOG.debug("Username or principal == NULL");
                    LOG.debug("username= " + username);
                    LOG.debug("principal= " + principal);
                    throw new IllegalArgumentException("username and/or principal is equal to null!");
                }

            } else {
                LOG.info("HiveMetaStore configured in local mode");
            }
        }
    } catch (IOException e) {
        LOG.error(e.getMessage(), e);
        throw new RuntimeException(e);
    } catch (MetaException e) {
        LOG.error(e.getMessage(), e);
        throw new RuntimeException(e);
    }
}

From source file:io.druid.security.kerberos.DruidKerberosUtil.java

License:Apache License

public static void authenticateIfRequired(AuthenticationKerberosConfig config) throws IOException {
    String principal = config.getPrincipal();
    String keytab = config.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        Configuration conf = new Configuration();
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        try {//from   w w  w  . java 2s. com
            if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false
                    || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                log.info("trying to authenticate user [%s] with keytab [%s]", principal, keytab);
                UserGroupInformation.loginUserFromKeytab(principal, keytab);
            }
        } catch (IOException e) {
            throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab);
        }
    }
}

From source file:io.druid.security.kerberos.KerberosEscalator.java

License:Apache License

@Override
public org.eclipse.jetty.client.HttpClient createEscalatedJettyClient(
        org.eclipse.jetty.client.HttpClient baseClient) {
    baseClient.getAuthenticationStore().addAuthentication(new Authentication() {
        @Override/*from   ww  w  .j ava 2  s  . c o m*/
        public boolean matches(String type, URI uri, String realm) {
            return true;
        }

        @Override
        public Result authenticate(final Request request, ContentResponse response,
                Authentication.HeaderInfo headerInfo, Attributes context) {
            return new Result() {
                @Override
                public URI getURI() {
                    return request.getURI();
                }

                @Override
                public void apply(Request request) {
                    try {
                        // No need to set cookies as they are handled by Jetty Http Client itself.
                        URI uri = request.getURI();
                        if (DruidKerberosUtil.needToSendCredentials(baseClient.getCookieStore(), uri)) {
                            log.debug(
                                    "No Auth Cookie found for URI[%s]. Existing Cookies[%s] Authenticating... ",
                                    uri, baseClient.getCookieStore().getCookies());
                            final String host = request.getHost();
                            DruidKerberosUtil.authenticateIfRequired(internalClientPrincipal,
                                    internalClientKeytab);
                            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
                            String challenge = currentUser.doAs(new PrivilegedExceptionAction<String>() {
                                @Override
                                public String run() throws Exception {
                                    return DruidKerberosUtil.kerberosChallenge(host);
                                }
                            });
                            request.getHeaders().add(HttpHeaders.Names.AUTHORIZATION, "Negotiate " + challenge);
                        } else {
                            log.debug("Found Auth Cookie found for URI[%s].", uri);
                        }
                    } catch (Throwable e) {
                        Throwables.propagate(e);
                    }
                }
            };
        }
    });
    return baseClient;
}

From source file:io.druid.security.kerberos.KerberosHttpClient.java

License:Apache License

private <Intermediate, Final> void inner_go(final Request request,
        final HttpResponseHandler<Intermediate, Final> httpResponseHandler, final Duration duration,
        final SettableFuture<Final> future) {
    try {//from   www.  jav  a 2  s.  com
        final String host = request.getUrl().getHost();
        final URI uri = request.getUrl().toURI();

        Map<String, List<String>> cookieMap = cookieManager.get(uri,
                Collections.<String, List<String>>emptyMap());
        for (Map.Entry<String, List<String>> entry : cookieMap.entrySet()) {
            request.addHeaderValues(entry.getKey(), entry.getValue());
        }
        final boolean should_retry_on_unauthorized_response;

        if (DruidKerberosUtil.needToSendCredentials(cookieManager.getCookieStore(), uri)) {
            // No Cookies for requested URI, authenticate user and add authentication header
            log.debug("No Auth Cookie found for URI[%s]. Existing Cookies[%s] Authenticating... ", uri,
                    cookieManager.getCookieStore().getCookies());
            DruidKerberosUtil.authenticateIfRequired(config);
            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
            String challenge = currentUser.doAs(new PrivilegedExceptionAction<String>() {
                @Override
                public String run() throws Exception {
                    return DruidKerberosUtil.kerberosChallenge(host);
                }
            });
            request.setHeader(HttpHeaders.Names.AUTHORIZATION, "Negotiate " + challenge);
            should_retry_on_unauthorized_response = false;
        } else {
            should_retry_on_unauthorized_response = true;
            log.debug("Found Auth Cookie found for URI[%s].", uri);
        }

        ListenableFuture<RetryResponseHolder<Final>> internalFuture = delegate.go(request,
                new RetryIfUnauthorizedResponseHandler<Intermediate, Final>(new ResponseCookieHandler(
                        request.getUrl().toURI(), cookieManager, httpResponseHandler)),
                duration);

        Futures.addCallback(internalFuture, new FutureCallback<RetryResponseHolder<Final>>() {
            @Override
            public void onSuccess(RetryResponseHolder<Final> result) {
                if (should_retry_on_unauthorized_response && result.shouldRetry()) {
                    log.info("Preparing for Retry");
                    // remove Auth cookie
                    DruidKerberosUtil.removeAuthCookie(cookieManager.getCookieStore(), uri);
                    // clear existing cookie
                    request.setHeader("Cookie", "");
                    inner_go(request.copy(), httpResponseHandler, duration, future);
                } else {
                    log.info("Not retrying and returning future response");
                    future.set(result.getObj());
                }
            }

            @Override
            public void onFailure(Throwable t) {
                future.setException(t);
            }
        }, exec);
    } catch (Throwable e) {
        throw Throwables.propagate(e);
    }
}

From source file:io.druid.security.kerberos.KerberosJettyHttpClientProvider.java

License:Apache License

@Override
public HttpClient get() {
    final HttpClient httpClient = delegateProvider.get();
    httpClient.getAuthenticationStore().addAuthentication(new Authentication() {
        @Override//w  ww .  j av a  2  s . co m
        public boolean matches(String type, URI uri, String realm) {
            return true;
        }

        @Override
        public Result authenticate(final Request request, ContentResponse response,
                Authentication.HeaderInfo headerInfo, Attributes context) {
            return new Result() {
                @Override
                public URI getURI() {
                    return request.getURI();
                }

                @Override
                public void apply(Request request) {
                    try {
                        // No need to set cookies as they are handled by Jetty Http Client itself.
                        URI uri = request.getURI();
                        if (DruidKerberosUtil.needToSendCredentials(httpClient.getCookieStore(), uri)) {
                            log.debug(
                                    "No Auth Cookie found for URI[%s]. Existing Cookies[%s] Authenticating... ",
                                    uri, httpClient.getCookieStore().getCookies());
                            final String host = request.getHost();
                            DruidKerberosUtil.authenticateIfRequired(config);
                            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
                            String challenge = currentUser.doAs(new PrivilegedExceptionAction<String>() {
                                @Override
                                public String run() throws Exception {
                                    return DruidKerberosUtil.kerberosChallenge(host);
                                }
                            });
                            request.getHeaders().add(HttpHeaders.Names.AUTHORIZATION, "Negotiate " + challenge);
                        } else {
                            log.debug("Found Auth Cookie found for URI[%s].", uri);
                        }
                    } catch (Throwable e) {
                        Throwables.propagate(e);
                    }
                }
            };
        }
    });
    return httpClient;
}

From source file:io.druid.storage.hdfs.HdfsStorageAuthentication.java

License:Apache License

/**
 * Dose authenticate against a secured hadoop cluster
 * In case of any bug fix make sure to fix the code in JobHelper#authenticate as well.
 *//*from  w  ww.  j a va 2  s. c o  m*/
@LifecycleStart
public void authenticate() {
    String principal = hdfsKerberosConfig.getPrincipal();
    String keytab = hdfsKerberosConfig.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        UserGroupInformation.setConfiguration(hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false
                        || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                    log.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab);
                    UserGroupInformation.loginUserFromKeytab(principal, keytab);
                }
            } catch (IOException e) {
                throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal,
                        keytab);
            }
        }
    }
}

From source file:io.hops.hopsworks.common.jobs.flink.AbstractYarnClusterDescriptor.java

License:Apache License

/**
 * This method will block until the ApplicationMaster/JobManager have been
 * deployed on YARN./*ww w  . ja  va  2 s  .c  o  m*/
 */
protected YarnClusterClient deployInternal() throws Exception {
    isReadyForDeployment();
    LOG.info("Using values:");
    LOG.info("\tTaskManager count = {}", taskManagerCount);
    LOG.info("\tJobManager memory = {}", jobManagerMemoryMb);
    LOG.info("\tTaskManager memory = {}", taskManagerMemoryMb);

    final YarnClient yarnClient = getYarnClient();

    // ------------------ Check if the specified queue exists --------------------
    try {
        List<QueueInfo> queues = yarnClient.getAllQueues();
        // check only if there are queues configured in yarn and for this session.
        if (queues.size() > 0 && this.yarnQueue != null) {
            boolean queueFound = false;
            for (QueueInfo queue : queues) {
                if (queue.getQueueName().equals(this.yarnQueue)) {
                    queueFound = true;
                    break;
                }
            }
            if (!queueFound) {
                String queueNames = "";
                for (QueueInfo queue : queues) {
                    queueNames += queue.getQueueName() + ", ";
                }
                LOG.warn("The specified queue '" + this.yarnQueue + "' does not exist. " + "Available queues: "
                        + queueNames);
            }
        } else {
            LOG.debug("The YARN cluster does not have any queues configured");
        }
    } catch (Throwable e) {
        LOG.warn("Error while getting queue information from YARN: " + e.getMessage());
        if (LOG.isDebugEnabled()) {
            LOG.debug("Error details", e);
        }
    }

    // Create application via yarnClient
    final YarnClientApplication yarnApplication = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse();

    Map<String, String> jobSystemProperties = new HashMap<>(2);

    // Certificates are materialized locally so DFSClient can be set to null
    // LocalResources are not used by Flink, so set it null
    HopsUtils.copyUserKafkaCerts(services.getUserCerts(), project, username,
            services.getSettings().getHopsworksTmpCertDir(), services.getSettings().getHdfsTmpCertDir(),
            JobType.FLINK, null, null, jobSystemProperties, services.getSettings().getFlinkKafkaCertDir(),
            appResponse.getApplicationId().toString());

    StringBuilder tmpBuilder = new StringBuilder();
    for (Map.Entry<String, String> prop : jobSystemProperties.entrySet()) {
        String option = YarnRunner.escapeForShell("-D" + prop.getKey() + "=" + prop.getValue());
        javaOptions.add(option);
        addHopsworksParam(option);
        tmpBuilder.append(prop.getKey()).append("=").append(prop.getValue()).append("@@");
    }

    dynamicPropertiesEncoded += tmpBuilder.toString();

    // ------------------ Add dynamic properties to local flinkConfiguraton ------
    Map<String, String> dynProperties = getDynamicProperties(dynamicPropertiesEncoded);
    for (Map.Entry<String, String> dynProperty : dynProperties.entrySet()) {
        flinkConfiguration.setString(dynProperty.getKey(), dynProperty.getValue());
    }

    // ------------------ Set default file system scheme -------------------------
    try {
        org.apache.flink.core.fs.FileSystem.setDefaultScheme(flinkConfiguration);
    } catch (IOException e) {
        throw new IOException("Error while setting the default " + "filesystem scheme from configuration.", e);
    }

    // initialize file system
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    final FileSystem fs = FileSystem.get(conf);

    // hard coded check for the GoogleHDFS client because its not overriding the 
    // getScheme() method.
    if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the system is "
                + "using the default Hadoop configuration values. The Flink YARN "
                + "client needs to store its files in a distributed file system");
    }

    // ------ Check if the YARN ClusterClient has the requested resources ---
    // the yarnMinAllocationMB specifies the smallest possible container 
    // allocation size. all allocations below this value are automatically 
    // set to this value.
    final int yarnMinAllocationMB = conf.getInt("yarn.scheduler.minimum-allocation-mb", 0);
    if (jobManagerMemoryMb < yarnMinAllocationMB || taskManagerMemoryMb < yarnMinAllocationMB) {
        LOG.warn("The JobManager or TaskManager memory is below the smallest possible "
                + "YARN Container size. The value of 'yarn.scheduler.minimum-allocation-mb'" + " is "
                + yarnMinAllocationMB + "'. Please increase the memory size."
                + "YARN will allocate the smaller containers but the scheduler will"
                + " account for the minimum-allocation-mb, maybe not all instances "
                + "you requested will start.");
    }

    // set the memory to minAllocationMB to do the next checks correctly
    if (jobManagerMemoryMb < yarnMinAllocationMB) {
        jobManagerMemoryMb = yarnMinAllocationMB;
    }
    if (taskManagerMemoryMb < yarnMinAllocationMB) {
        taskManagerMemoryMb = yarnMinAllocationMB;
    }

    Resource maxRes = appResponse.getMaximumResourceCapability();
    final String NOTE = "Please check the 'yarn.scheduler.maximum-allocation-mb' and the "
            + "'yarn.nodemanager.resource.memory-mb' configuration values\n";
    if (jobManagerMemoryMb > maxRes.getMemory()) {
        failSessionDuringDeployment(yarnClient, yarnApplication);
        throw new YarnDeploymentException("The cluster does not have the requested resources for the JobManager"
                + " available!\n" + "Maximum Memory: " + maxRes.getMemory() + "MB Requested: "
                + jobManagerMemoryMb + "MB. " + NOTE);
    }

    if (taskManagerMemoryMb > maxRes.getMemory()) {
        failSessionDuringDeployment(yarnClient, yarnApplication);
        throw new YarnDeploymentException(
                "The cluster does not have the requested resources for the TaskManagers available!\n"
                        + "Maximum Memory: " + maxRes.getMemory() + " Requested: " + taskManagerMemoryMb
                        + "MB. " + NOTE);
    }

    final String NOTE_RSC = "\nThe Flink YARN client will try to allocate the YARN session, "
            + "but maybe not all TaskManagers are connecting from the beginning "
            + "because the resources are currently not available in the cluster. "
            + "The allocation might take more time than usual because the Flink "
            + "YARN client needs to wait until the resources become available.";
    int totalMemoryRequired = jobManagerMemoryMb + taskManagerMemoryMb * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.warn("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available." + NOTE_RSC);

    }
    if (taskManagerMemoryMb > freeClusterMem.containerLimit) {
        LOG.warn("The requested amount of memory for the TaskManagers (" + taskManagerMemoryMb
                + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit
                + NOTE_RSC);
    }
    if (jobManagerMemoryMb > freeClusterMem.containerLimit) {
        LOG.warn(
                "The requested amount of memory for the JobManager (" + jobManagerMemoryMb + "MB) is more than "
                        + "the largest possible YARN container: " + freeClusterMem.containerLimit + NOTE_RSC);
    }

    // ----------------- check if the requested containers fit into the cluster.
    int[] nmFree = Arrays.copyOf(freeClusterMem.nodeManagersFree, freeClusterMem.nodeManagersFree.length);
    // first, allocate the jobManager somewhere.
    if (!allocateResource(nmFree, jobManagerMemoryMb)) {
        LOG.warn("Unable to find a NodeManager that can fit the JobManager/Application master. "
                + "The JobManager requires " + jobManagerMemoryMb + "MB. NodeManagers available: "
                + Arrays.toString(freeClusterMem.nodeManagersFree) + NOTE_RSC);
    }
    // allocate TaskManagers
    for (int i = 0; i < taskManagerCount; i++) {
        if (!allocateResource(nmFree, taskManagerMemoryMb)) {
            LOG.warn("There is not enough memory available in the YARN cluster. "
                    + "The TaskManager(s) require " + taskManagerMemoryMb + "MB each. "
                    + "NodeManagers available: " + Arrays.toString(freeClusterMem.nodeManagersFree) + "\n"
                    + "After allocating the JobManager (" + jobManagerMemoryMb + "MB) and (" + i + "/"
                    + taskManagerCount + ") TaskManagers, " + "the following NodeManagers are available: "
                    + Arrays.toString(nmFree) + NOTE_RSC);
        }
    }

    Set<File> effectiveShipFiles = new HashSet<>(shipFiles.size());
    for (File file : shipFiles) {
        effectiveShipFiles.add(file.getAbsoluteFile());
    }

    //check if there is a logback or log4j file
    File logbackFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOGBACK_NAME);
    final boolean hasLogback = logbackFile.exists();
    if (hasLogback) {
        effectiveShipFiles.add(logbackFile);
    }

    File log4jFile = new File(configurationDirectory + File.separator + CONFIG_FILE_LOG4J_NAME);
    final boolean hasLog4j = log4jFile.exists();
    if (hasLog4j) {
        effectiveShipFiles.add(log4jFile);
        if (hasLogback) {
            // this means there is already a logback configuration file --> fail
            LOG.warn("The configuration directory ('" + configurationDirectory + "') contains both LOG4J and "
                    + "Logback configuration files. Please delete or rename one of them.");
        }
    }

    addLibFolderToShipFiles(effectiveShipFiles);

    final ContainerLaunchContext amContainer = setupApplicationMasterContainer(hasLogback, hasLog4j);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();

    final ApplicationId appId = appContext.getApplicationId();

    // ------------------ Add Zookeeper namespace to local flinkConfiguraton ------
    String zkNamespace = getZookeeperNamespace();
    // no user specified cli argument for namespace?
    if (zkNamespace == null || zkNamespace.isEmpty()) {
        // namespace defined in config? else use applicationId as default.
        zkNamespace = flinkConfiguration.getString(ConfigConstants.ZOOKEEPER_NAMESPACE_KEY,
                String.valueOf(appId));
        setZookeeperNamespace(zkNamespace);
    }

    flinkConfiguration.setString(ConfigConstants.ZOOKEEPER_NAMESPACE_KEY, zkNamespace);

    if (RecoveryMode.isHighAvailabilityModeActivated(flinkConfiguration)) {
        // activate re-execution of failed applications
        appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS,
                YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));

        activateHighAvailabilitySupport(appContext);
    } else {
        // set number of application retries to 1 in the default case
        appContext
                .setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, 1));
    }

    // local resource map for Yarn
    final Map<String, LocalResource> localResources = new HashMap<>(2 + effectiveShipFiles.size());
    // list of remote paths (after upload)
    final List<Path> paths = new ArrayList<>(2 + effectiveShipFiles.size());
    // classpath assembler
    final StringBuilder classPathBuilder = new StringBuilder();
    // ship list that enables reuse of resources for task manager containers
    StringBuilder envShipFileList = new StringBuilder();

    // upload and register ship files
    for (File shipFile : effectiveShipFiles) {
        LocalResource shipResources = Records.newRecord(LocalResource.class);

        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        Path remotePath = Utils.setupLocalResource(fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());

        paths.add(remotePath);

        localResources.put(shipFile.getName(), shipResources);

        classPathBuilder.append(shipFile.getName());
        if (shipFile.isDirectory()) {
            // add directories to the classpath
            classPathBuilder.append(File.separator).append("*");
        }
        classPathBuilder.append(File.pathSeparator);

        envShipFileList.append(remotePath).append(",");
    }
    ////////////////////////////////////////////////////////////////////////////
    /*
     * Add Hops LocalResources paths here
     *
     */
    //Add it to localResources
    for (Entry<String, LocalResource> entry : hopsworksResources.entrySet()) {
        localResources.put(entry.getKey(), entry.getValue());
        //Append name to classPathBuilder
        classPathBuilder.append(entry.getKey());
        classPathBuilder.append(File.pathSeparator);
    }

    ////////////////////////////////////////////////////////////////////////////
    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource flinkConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(fs, appId.toString(), flinkJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(fs, appId.toString(), flinkConfigurationPath, flinkConf,
            fs.getHomeDirectory());
    localResources.put("flink.jar", appMasterJar);
    localResources.put("flink-conf.yaml", flinkConf);

    paths.add(remotePathJar);
    classPathBuilder.append("flink.jar").append(File.pathSeparator);
    paths.add(remotePathConf);
    classPathBuilder.append("flink-conf.yaml").append(File.pathSeparator);

    sessionFilesDir = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/");

    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
    fs.setPermission(sessionFilesDir, permission); // set permission for path.

    // setup security tokens
    Utils.setTokensFor(amContainer, paths, conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH and environment variables for ApplicationMaster
    final Map<String, String> appMasterEnv = new HashMap<>();
    // set user specified app master environment variables
    appMasterEnv.putAll(Utils.getEnvironmentVariables(ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX,
            flinkConfiguration));
    // set Flink app class path
    appMasterEnv.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, classPathBuilder.toString());

    // set Flink on YARN internal configuration values
    appMasterEnv.put(YarnConfigKeys.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(YarnConfigKeys.ENV_TM_MEMORY, String.valueOf(taskManagerMemoryMb));
    appMasterEnv.put(YarnConfigKeys.FLINK_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_APP_ID, appId.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_USERNAME,
            UserGroupInformation.getCurrentUser().getShortUserName());
    appMasterEnv.put(YarnConfigKeys.ENV_SLOTS, String.valueOf(slots));
    appMasterEnv.put(YarnConfigKeys.ENV_DETACHED, String.valueOf(detached));
    appMasterEnv.put(YarnConfigKeys.ENV_ZOOKEEPER_NAMESPACE, getZookeeperNamespace());

    if (dynamicPropertiesEncoded != null) {
        appMasterEnv.put(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES, dynamicPropertiesEncoded);
    }

    // set classpath from YARN configuration
    Utils.setupYarnClassPath(conf, appMasterEnv);

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jobManagerMemoryMb);
    capability.setVirtualCores(1);

    String name;
    if (customName == null) {
        name = "Flink session with " + taskManagerCount + " TaskManagers";
        if (detached) {
            name += " (detached)";
        }
    } else {
        name = customName;
    }

    appContext.setApplicationName(name); // application name
    appContext.setApplicationType("Apache Flink");
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    if (yarnQueue != null) {
        appContext.setQueue(yarnQueue);
    }

    // add a hook to clean up in case deployment fails
    Thread deploymentFailureHook = new DeploymentFailureHook(yarnClient, yarnApplication);
    Runtime.getRuntime().addShutdownHook(deploymentFailureHook);
    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);

    LOG.info("Waiting for the cluster to be allocated");
    final long startTime = System.currentTimeMillis();
    ApplicationReport report;
    YarnApplicationState lastAppState = YarnApplicationState.NEW;
    loop: while (true) {
        try {
            report = yarnClient.getApplicationReport(appId);
        } catch (IOException e) {
            throw new YarnDeploymentException("Failed to deploy the cluster: " + e.getMessage());
        }
        YarnApplicationState appState = report.getYarnApplicationState();
        switch (appState) {
        case FAILED:
        case FINISHED:
        case KILLED:
            throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState
                    + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n"
                    + "If log aggregation is enabled on your cluster, use this "
                    + "command to further investigate the issue:\n" + "yarn logs -applicationId " + appId);
            //break ..
        case RUNNING:
            LOG.info("YARN application has been deployed successfully.");
            break loop;
        default:
            if (appState != lastAppState) {
                LOG.info("Deploying cluster, current state " + appState);
            }
            if (System.currentTimeMillis() - startTime > 60000) {
                LOG.info("Deployment took more than 60 seconds. Please check if the "
                        + "requested resources are available in the YARN cluster");
            }

        }
        lastAppState = appState;
        Thread.sleep(250);
    }
    // print the application id for user to cancel themselves.
    if (isDetachedMode()) {
        LOG.info("The Flink YARN client has been started in detached mode. In order to stop "
                + "Flink on YARN, use the following command or a YARN web interface to stop "
                + "it:\nyarn application -kill " + appId + "\nPlease also note that the "
                + "temporary files of the YARN session in the home directoy will not be removed.");
    }
    // since deployment was successful, remove the hook
    try {
        Runtime.getRuntime().removeShutdownHook(deploymentFailureHook);
    } catch (IllegalStateException e) {
        // we're already in the shut down hook.
    }

    String host = report.getHost();
    int port = report.getRpcPort();

    // Correctly initialize the Flink config
    flinkConfiguration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, host);
    flinkConfiguration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, port);

    // the Flink cluster is deployed in YARN. Represent cluster
    return createYarnClusterClient(this, yarnClient, report, flinkConfiguration, sessionFilesDir, true);
}

From source file:io.hops.metadata.util.TestHopYarnAPIUtilities.java

License:Apache License

@Test(timeout = 60000)
public void testAppSubmissionAndNodeUpdate() throws Exception {
    MockRM rm = new MockRM(conf);
    rm.start();/*w  w  w  .j  a v  a  2  s  .  c  o m*/

    ClientRMService rmService = rm.getClientRMService();

    GetApplicationsRequest getRequest = GetApplicationsRequest
            .newInstance(EnumSet.of(YarnApplicationState.KILLED));

    ApplicationId appId1 = getApplicationId(100);
    ApplicationId appId2 = getApplicationId(101);

    ApplicationACLsManager mockAclsManager = mock(ApplicationACLsManager.class);
    when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(), ApplicationAccessType.VIEW_APP,
            null, appId1)).thenReturn(true);

    SubmitApplicationRequest submitRequest1 = mockSubmitAppRequest(appId1, null, null);

    SubmitApplicationRequest submitRequest2 = mockSubmitAppRequest(appId2, null, null);

    try {
        rmService.submitApplication(submitRequest1);
        rmService.submitApplication(submitRequest2);

    } catch (YarnException e) {
        Assert.fail("Exception is not expected.");
    }

    assertEquals("Incorrect number of apps in the RM", 0,
            rmService.getApplications(getRequest).getApplicationList().size());
    Thread.sleep(1000);

    //test persistance of schedulerapplication
    Map<String, SchedulerApplication> schedulerApplications = RMUtilities.getSchedulerApplications();
    assertEquals("db does not contain good number of schedulerApplications", 2, schedulerApplications.size());

    MockNM nm1 = rm.registerNode("host1:1234", 5120);
    MockNM nm2 = rm.registerNode("host2:5678", 10240);

    NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
    Assert.assertEquals(4000, nodeHeartbeat.getNextHeartBeatInterval());

    NodeHeartbeatResponse nodeHeartbeat2 = nm2.nodeHeartbeat(true);
    Assert.assertEquals(4000, nodeHeartbeat2.getNextHeartBeatInterval());

    Thread.sleep(2000);
    rm.stop();
    Thread.sleep(2000);
}