Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:com.rim.logdriver.sawmill.Sawmill.java

License:Apache License

public void run(String[] args) {
    if (args.length < 1) {
        System.out.println("Usage: " + this.getClass().getSimpleName() + " <config.properties>");
        System.exit(1);//  w ww .  j a v a2 s  .c o  m
    }

    LOG.info("Starting {}", Sawmill.class.getSimpleName());

    // First arg is the config
    String configFile = args[0];

    // Load configuration.
    Properties conf = new Properties();
    try {
        conf.load(new FileInputStream(configFile));
    } catch (FileNotFoundException e) {
        LOG.error("Config file not found.", e);
        System.exit(1);
    } catch (Throwable t) {
        LOG.error("Error reading config file.", t);
        System.exit(1);
    }

    // Parse the configuration.

    // Load in any Hadoop config files.
    Configuration hConf = new Configuration();
    {
        String[] hadoopConfs = Configs.hadoopConfigPaths.getArray(conf);
        for (String confPath : hadoopConfs) {
            hConf.addResource(new Path(confPath));
        }
        // Also, don't shut down my FileSystem automatically!!!
        hConf.setBoolean("fs.automatic.close", false);
        for (Entry<Object, Object> e : System.getProperties().entrySet()) {
            if (e.getValue() instanceof Integer) {
                hConf.setInt(e.getKey().toString(), (Integer) e.getValue());
            } else if (e.getValue() instanceof Long) {
                hConf.setLong(e.getKey().toString(), (Long) e.getValue());
            } else {
                hConf.set(e.getKey().toString(), e.getValue().toString());
            }
        }
    }

    // Ensure that UserGroupInformation is set up, and knows if security is
    // enabled.
    UserGroupInformation.setConfiguration(hConf);

    // Kerberos credentials. If these are not present, then it just won't try to
    // authenticate.
    String kerbConfPrincipal = Configs.kerberosPrincipal.get(conf);
    String kerbKeytab = Configs.kerberosKeytab.get(conf);
    Authenticator.getInstance().setKerbConfPrincipal(kerbConfPrincipal);
    Authenticator.getInstance().setKerbKeytab(kerbKeytab);

    // Check out the number of threads for workers, and creater the threadpools
    // for both workers and stats updates.
    int threadCount = Configs.threadpoolSize.getInteger(conf);
    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threadCount);

    // Get the MBean server
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();

    // Set up the Mina Exception Monitor
    ExceptionMonitor.setInstance(new ExceptionLoggerExceptionMonitor());

    // For each port->output mapping, create a path (listener, queue, worker).
    // List<DataPath> paths = new ArrayList<DataPath>();
    final List<IoAcceptor> acceptors = new ArrayList<IoAcceptor>();
    final List<Writer> writers = new ArrayList<Writer>();
    {
        String[] pathStrings = Configs.paths.getArray(conf);
        for (String p : pathStrings) {
            Properties pathConf = Util.subProperties(conf, "path." + p);

            String name = Configs.name.get(pathConf);
            if (name == null) {
                LOG.info("Path has no name.  Using {}", p);
                name = p;
            }
            LOG.info("[{}] Configuring path {}", name, name);

            // Check the properties for this specific instance
            Integer maxLineLength = Configs.tcpMaxLineLength.getInteger(pathConf);
            if (maxLineLength == null) {
                maxLineLength = Configs.defaultTcpMaxLineLength.getInteger(conf);
            }
            LOG.info("[{}] Maximum line length is {}", name, maxLineLength);

            InetAddress bindAddress = null;
            try {
                String address = Configs.bindAddress.get(pathConf);
                bindAddress = InetAddress.getByName(address);
            } catch (UnknownHostException e) {
                LOG.error("[{}] Error getting bindAddress from string {}",
                        new Object[] { name, pathConf.getProperty("bindAddress") }, e);
            }

            Integer port = Configs.port.getInteger(pathConf);
            if (port == null) {
                LOG.error("[{}] Port not set.  Skipping this path.", name);
                continue;
            }

            int queueLength = Configs.queueCapacity.getInteger(pathConf);

            // Set up the actual processing chain
            IoAcceptor acceptor = new NioSocketAcceptor();
            SocketSessionConfig sessionConfig = (SocketSessionConfig) acceptor.getSessionConfig();
            sessionConfig.setReuseAddress(true);
            acceptors.add(acceptor);

            String charsetName = Configs.charset.getString(pathConf);
            Charset charset = null;
            try {
                charset = Charset.forName(charsetName);
            } catch (UnsupportedCharsetException e) {
                LOG.error("[{}] Charset '{}' is not supported.  Defaulting to UTF-8.", name, charsetName);
                charset = Charset.forName("UTF-8");
            }
            LOG.info("[{}] Using character set {}", name, charset.displayName());
            TextLineCodecFactory textLineCodecFactory = new TextLineCodecFactory(charset, LineDelimiter.UNIX,
                    LineDelimiter.AUTO);
            textLineCodecFactory.setDecoderMaxLineLength(maxLineLength);
            acceptor.getFilterChain().addLast("textLineCodec", new ProtocolCodecFilter(textLineCodecFactory));

            int numBuckets = Configs.outputBuckets.getInteger(pathConf);
            if (numBuckets > 1) {
                // Set up mulitple writers for one MultiEnqueueHandler
                @SuppressWarnings("unchecked")
                BlockingQueue<String>[] queues = new BlockingQueue[numBuckets];

                for (int i = 0; i < numBuckets; i++) {
                    BlockingQueue<String> queue = new ArrayBlockingQueue<String>(queueLength);
                    queues[i] = queue;

                    // Set up the processor on the other end.
                    Writer writer = new Writer();
                    writer.setName(name);
                    writer.setConfig(pathConf);
                    writer.setHadoopConf(hConf);
                    writer.setQueue(queue);
                    writer.init();

                    // Set up MBean for the Writer
                    {
                        ObjectName mbeanName = null;
                        try {
                            mbeanName = new ObjectName(Writer.class.getPackage().getName() + ":type="
                                    + Writer.class.getSimpleName() + " [" + i + "]" + ",name=" + name);
                        } catch (MalformedObjectNameException e) {
                            LOG.error("[{}] Error creating MBean name.", name, e);
                        } catch (NullPointerException e) {
                            LOG.error("[{}] Error creating MBean name.", name, e);
                        }
                        try {
                            mbs.registerMBean(writer, mbeanName);
                        } catch (InstanceAlreadyExistsException e) {
                            LOG.error("[{}] Error registering MBean name.", name, e);
                        } catch (MBeanRegistrationException e) {
                            LOG.error("[{}] Error registering MBean name.", name, e);
                        } catch (NotCompliantMBeanException e) {
                            LOG.error("[{}] Error registering MBean name.", name, e);
                        }
                    }

                    executor.scheduleWithFixedDelay(writer, 0, 100, TimeUnit.MILLISECONDS);
                    writers.add(writer);
                }

                MultiEnqueueHandler handler = new MultiEnqueueHandler(queues);
                acceptor.setHandler(handler);

                // Set up MBean for the MultiEnqueueHandler
                {
                    ObjectName mbeanName = null;
                    try {
                        mbeanName = new ObjectName(MultiEnqueueHandler.class.getPackage().getName() + ":type="
                                + MultiEnqueueHandler.class.getSimpleName() + ",name=" + name);
                    } catch (MalformedObjectNameException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    } catch (NullPointerException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    }
                    try {
                        mbs.registerMBean(handler, mbeanName);
                    } catch (InstanceAlreadyExistsException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (MBeanRegistrationException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (NotCompliantMBeanException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    }
                }
            } else {
                BlockingQueue<String> queue = new ArrayBlockingQueue<String>(queueLength);

                // Set up the processor on the other end.
                Writer writer = new Writer();
                writer.setName(name);
                writer.setConfig(pathConf);
                writer.setHadoopConf(hConf);
                writer.setQueue(queue);
                writer.init();

                // Set up MBean for the Writer
                {
                    ObjectName mbeanName = null;
                    try {
                        mbeanName = new ObjectName(Writer.class.getPackage().getName() + ":type="
                                + Writer.class.getSimpleName() + ",name=" + name);
                    } catch (MalformedObjectNameException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    } catch (NullPointerException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    }
                    try {
                        mbs.registerMBean(writer, mbeanName);
                    } catch (InstanceAlreadyExistsException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (MBeanRegistrationException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (NotCompliantMBeanException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    }
                }

                executor.scheduleWithFixedDelay(writer, 0, 100, TimeUnit.MILLISECONDS);
                writers.add(writer);

                EnqueueHandler handler = new EnqueueHandler(queue);
                acceptor.setHandler(handler);

                // Set up MBean for the EnqueueHandler
                {
                    ObjectName mbeanName = null;
                    try {
                        mbeanName = new ObjectName(EnqueueHandler.class.getPackage().getName() + ":type="
                                + EnqueueHandler.class.getSimpleName() + ",name=" + name);
                    } catch (MalformedObjectNameException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    } catch (NullPointerException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    }
                    try {
                        mbs.registerMBean(handler, mbeanName);
                    } catch (InstanceAlreadyExistsException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (MBeanRegistrationException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (NotCompliantMBeanException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    }
                }
            }

            acceptor.getSessionConfig().setReadBufferSize(Configs.tcpReadBufferSize.getInteger(pathConf));
            acceptor.getSessionConfig().setIdleTime(IdleStatus.BOTH_IDLE, 5);

            while (true) {
                try {
                    acceptor.bind(new InetSocketAddress(bindAddress, port));
                } catch (IOException e) {
                    LOG.error("Error binding to {}:{}.  Retrying...", bindAddress, port);

                    try {
                        Thread.sleep(2000);
                    } catch (InterruptedException e1) {
                        // nothing
                    }

                    continue;
                }

                break;
            }

        }
    }

    // Register a shutdown hook..
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            LOG.info("Shutting down");

            LOG.info("Unbinding and disposing of all IoAcceptors");
            for (IoAcceptor acceptor : acceptors) {
                acceptor.unbind();
                acceptor.dispose(true);
            }

            LOG.info("Shutting down worker threadpools.  This could take a little while.");
            executor.shutdown();
            try {
                executor.awaitTermination(10, TimeUnit.MINUTES);
            } catch (InterruptedException e) {
                LOG.error("Interrupted waiting for writer threadpool termination.", e);
            }
            if (!executor.isTerminated()) {
                LOG.error("Threadpool did not terminate cleanly.");
            }

            LOG.info("Cleaning out any remaining messages from the queues.");
            List<Thread> threads = new ArrayList<Thread>();
            for (final Writer writer : writers) {
                Runnable r = new Runnable() {
                    @Override
                    public void run() {
                        try {
                            writer.runAndClose();
                        } catch (Throwable t) {
                            LOG.error("Error shutting down writer [{}]", writer.getName(), t);
                        }
                    }
                };
                Thread t = new Thread(r);
                t.setDaemon(false);
                t.start();
                threads.add(t);
            }

            for (Thread t : threads) {
                try {
                    t.join();
                } catch (InterruptedException e) {
                    LOG.error("Interrupted waiting for thread to finish.");
                }
            }

            LOG.info("Closing filesystems.");
            try {
                FileSystem.closeAll();
            } catch (Throwable t) {
                LOG.error("Error closing filesystems.", t);
            }

            LOG.info("Finished shutting down cleanly.");
        }
    });
}

From source file:com.streamsets.datacollector.security.DefaultLoginUgiProvider.java

License:Apache License

@Override
public UserGroupInformation getLoginUgi(Configuration hdfsConfiguration) throws IOException {
    AccessControlContext accessContext = AccessController.getContext();
    Subject subject = Subject.getSubject(accessContext);
    UserGroupInformation loginUgi;/*from   w ww. j a v a 2 s  .  c  om*/
    //HADOOP-13805
    HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(hdfsConfiguration);
    UserGroupInformation.setConfiguration(hdfsConfiguration);
    if (UserGroupInformation.isSecurityEnabled()) {
        loginUgi = UserGroupInformation.getUGIFromSubject(subject);
    } else {
        UserGroupInformation.loginUserFromSubject(subject);
        loginUgi = UserGroupInformation.getLoginUser();
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
    }
    return loginUgi;
}

From source file:com.streamsets.datacollector.security.HadoopSecurityUtil.java

License:Apache License

public static UserGroupInformation getLoginUser(Configuration hdfsConfiguration) throws IOException {
    UserGroupInformation loginUgi;/*from   ww  w . ja  va 2 s.  co  m*/
    AccessControlContext accessContext = AccessController.getContext();
    Subject subject = Subject.getSubject(accessContext);
    // As per SDC-2917 doing this avoids deadlock
    synchronized (SecurityUtil.getSubjectDomainLock(accessContext)) {
        // call some method to force load static block in KerberosName
        KerberosName.hasRulesBeenSet();
    }
    // This should be always out of sync block
    UserGroupInformation.setConfiguration(hdfsConfiguration);
    synchronized (SecurityUtil.getSubjectDomainLock(accessContext)) {
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Subject = {}, Principals = {}, Login UGI = {}", subject,
                    subject == null ? "null" : subject.getPrincipals(), loginUgi);
        }
    }
    return loginUgi;
}

From source file:com.streamsets.datacollector.security.MapRLoginUgiProvider.java

License:Apache License

@Override
public UserGroupInformation getLoginUgi(Configuration hdfsConfiguration) throws IOException {
    // check system property to see if MapR U/P security is enabled
    String maprLoginEnabled = System.getProperty(MAPR_USERNAME_PASSWORD_SECURITY_ENABLED_KEY,
            MAPR_USERNAME_PASSWORD_SECURITY_ENABLED_DEFAULT);
    boolean isMapRLogin = Boolean.parseBoolean(maprLoginEnabled);
    AccessControlContext accessControlContext = AccessController.getContext();
    Subject subject = Subject.getSubject(accessControlContext);
    //HADOOP-13805
    HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(hdfsConfiguration);
    // SDC-4015 As privateclassloader is false for MapR, UGI is shared and it also needs to be under jvm lock
    UserGroupInformation.setConfiguration(hdfsConfiguration);
    UserGroupInformation loginUgi;//from w ww  . j  a  v  a  2s  . c o  m

    if (UserGroupInformation.isSecurityEnabled() && !isMapRLogin) {
        // The code in this block must only be executed in case Kerberos is enabled.
        // MapR implementation of UserGroupInformation.isSecurityEnabled() returns true even if Kerberos is not enabled.
        // System property helps to avoid this code path in such a case
        loginUgi = UserGroupInformation.getUGIFromSubject(subject);
    } else {
        UserGroupInformation.loginUserFromSubject(subject);
        loginUgi = UserGroupInformation.getLoginUser();
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
    }
    return loginUgi;

}

From source file:com.streamsets.pipeline.stage.destination.hbase.HBaseTarget.java

License:Apache License

private void validateSecurityConfigs(List<ConfigIssue> issues) {
    try {//  w  w w .ja v  a  2s.  c  o m
        if (kerberosAuth) {
            hbaseConf.set(User.HBASE_SECURITY_CONF_KEY,
                    UserGroupInformation.AuthenticationMethod.KERBEROS.name());
            hbaseConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.KERBEROS.name());
            if (hbaseConf.get(MASTER_KERBEROS_PRINCIPAL) == null) {
                try {
                    hbaseConf.set(MASTER_KERBEROS_PRINCIPAL, "hbase/_HOST@" + KerberosUtil.getDefaultRealm());
                } catch (Exception e) {
                    issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "masterPrincipal",
                            Errors.HBASE_22));
                }
            }
            if (hbaseConf.get(REGIONSERVER_KERBEROS_PRINCIPAL) == null) {
                try {
                    hbaseConf.set(REGIONSERVER_KERBEROS_PRINCIPAL,
                            "hbase/_HOST@" + KerberosUtil.getDefaultRealm());
                } catch (Exception e) {
                    issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "regionServerPrincipal",
                            Errors.HBASE_23));
                }
            }
        }

        UserGroupInformation.setConfiguration(hbaseConf);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        StringBuilder logMessage = new StringBuilder();
        if (kerberosAuth) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "kerberosAuth", Errors.HBASE_16,
                        loginUgi.getAuthenticationMethod()));
            }
        } else {
            logMessage.append("Using Simple");
            hbaseConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        LOG.info("Authentication Config: " + logMessage);
    } catch (Exception ex) {
        LOG.info("Error validating security configuration: " + ex, ex);
        issues.add(
                getContext().createConfigIssue(Groups.HBASE.name(), null, Errors.HBASE_17, ex.toString(), ex));
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.BaseHdfsTargetIT.java

License:Apache License

@Before
public void setUpTest() {
    UserGroupInformation.setConfiguration(new Configuration());
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.BaseHdfsTargetIT.java

License:Apache License

@After
public void cleanUpTest() {
    UserGroupInformation.setConfiguration(new Configuration());
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTarget.java

License:Apache License

private boolean validateHadoopFS(List<ConfigIssue> issues) {
    boolean validHapoopFsUri = true;
    if (hdfsUri.contains("://")) {
        try {//  ww  w  . j a  v  a  2 s.  co  m
            new URI(hdfsUri);
        } catch (Exception ex) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_22,
                    hdfsUri, ex.toString(), ex));
            validHapoopFsUri = false;
        }
    } else {
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_18,
                hdfsUri));
        validHapoopFsUri = false;
    }

    StringBuilder logMessage = new StringBuilder();
    try {
        hdfsConfiguration = getHadoopConfiguration(issues);

        hdfsConfiguration.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, hdfsUri);

        // forcing UGI to initialize with the security settings from the stage
        UserGroupInformation.setConfiguration(hdfsConfiguration);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        if (hdfsKerberos) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsKerberos",
                        Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(),
                        UserGroupInformation.AuthenticationMethod.KERBEROS));
            }
        } else {
            logMessage.append("Using Simple");
            hdfsConfiguration.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        if (validHapoopFsUri) {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close
                    }
                    return null;
                }
            });
        }
    } catch (Exception ex) {
        LOG.info("Validation Error: " + Errors.HADOOPFS_01.getMessage(), hdfsUri, ex.toString(), ex);
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_01, hdfsUri,
                String.valueOf(ex), ex));
    }
    LOG.info("Authentication Config: " + logMessage);
    return validHapoopFsUri;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutorIT.java

License:Apache License

@Before
public void setUpTest() throws IOException {
    UserGroupInformation.setConfiguration(new Configuration());
    inputDir = new Path("/" + name.getMethodName() + "/input/");
    outputDir = new Path("/" + name.getMethodName() + "/output/");

    inputPath = new Path(inputDir, "input.file");
    writeFile(inputPath, "CONTENT");
}

From source file:com.streamsets.pipeline.stage.destination.hive.HiveTarget.java

License:Apache License

@Override
protected List<ConfigIssue> init() {
    List<ConfigIssue> issues = super.init();

    partitionsToFields = new HashMap<>();
    columnsToFields = new HashMap<>();

    hiveConf = new HiveConf();
    if (null != hiveConfDir && !hiveConfDir.isEmpty()) {
        File hiveConfDir = new File(this.hiveConfDir);

        if (!hiveConfDir.isAbsolute()) {
            hiveConfDir = new File(getContext().getResourcesDirectory(), this.hiveConfDir).getAbsoluteFile();
        }//from  ww  w  . j  a v a2 s. c o  m

        if (hiveConfDir.exists()) {
            File coreSite = new File(hiveConfDir.getAbsolutePath(), "core-site.xml");
            File hiveSite = new File(hiveConfDir.getAbsolutePath(), "hive-site.xml");
            File hdfsSite = new File(hiveConfDir.getAbsolutePath(), "hdfs-site.xml");

            if (!coreSite.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06,
                        coreSite.getName(), this.hiveConfDir));
            } else {
                hiveConf.addResource(new Path(coreSite.getAbsolutePath()));
            }

            if (!hdfsSite.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06,
                        hdfsSite.getName(), this.hiveConfDir));
            } else {
                hiveConf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }

            if (!hiveSite.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06,
                        hiveSite.getName(), this.hiveConfDir));
            } else {
                hiveConf.addResource(new Path(hiveSite.getAbsolutePath()));
            }
        } else {
            issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_07,
                    this.hiveConfDir));
        }
    } else if (hiveThriftUrl == null || hiveThriftUrl.isEmpty()) {
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveThriftUrl", Errors.HIVE_13));
    }

    // Specified URL overrides what's in the Hive Conf
    hiveConf.set(HIVE_METASTORE_URI, hiveThriftUrl);
    // Add any additional hive conf overrides
    for (Map.Entry<String, String> entry : additionalHiveProperties.entrySet()) {
        hiveConf.set(entry.getKey(), entry.getValue());
    }

    try {
        // forcing UGI to initialize with the security settings from the stage
        UserGroupInformation.setConfiguration(hiveConf);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        // Proxy users are not currently supported due to: https://issues.apache.org/jira/browse/HIVE-11089
    } catch (IOException e) {
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), null, Errors.HIVE_11, e.getMessage()));
    }

    try {
        issues.addAll(loginUgi.doAs(new PrivilegedExceptionAction<List<ConfigIssue>>() {
            @Override
            public List<ConfigIssue> run() {
                List<ConfigIssue> issues = new ArrayList<>();
                HiveMetaStoreClient client = null;
                try {
                    client = new HiveMetaStoreClient(hiveConf);

                    List<FieldSchema> columnNames = client.getFields(schema, tableName);
                    for (FieldSchema field : columnNames) {
                        columnsToFields.put(field.getName(), SDC_FIELD_SEP + field.getName());
                    }

                    Table table = client.getTable(schema, tableName);
                    List<FieldSchema> partitionKeys = table.getPartitionKeys();
                    for (FieldSchema field : partitionKeys) {
                        partitionsToFields.put(field.getName(), SDC_FIELD_SEP + field.getName());
                    }
                } catch (UnknownDBException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "schema", Errors.HIVE_02,
                            schema));
                } catch (UnknownTableException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "table", Errors.HIVE_03,
                            schema, tableName));
                } catch (MetaException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveUrl", Errors.HIVE_05,
                            e.getMessage()));
                } catch (TException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveUrl", Errors.HIVE_04,
                            e.getMessage()));
                } finally {
                    if (null != client) {
                        client.close();
                    }
                }
                return issues;
            }
        }));
    } catch (Error | IOException | InterruptedException e) {
        LOG.error("Received unknown error in validation: {}", e.toString(), e);
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "", Errors.HIVE_01, e.toString()));
    } catch (UndeclaredThrowableException e) {
        LOG.error("Received unknown error in validation: {}", e.toString(), e);
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "", Errors.HIVE_01,
                e.getUndeclaredThrowable().toString()));
    }

    // Now apply any custom mappings
    if (validColumnMappings(issues)) {
        for (FieldMappingConfig mapping : columnMappings) {
            LOG.debug("Custom mapping field {} to column {}", mapping.field, mapping.columnName);
            if (columnsToFields.containsKey(mapping.columnName)) {
                LOG.debug("Mapping field {} to column {}", mapping.field, mapping.columnName);
                columnsToFields.put(mapping.columnName, mapping.field);
            } else if (partitionsToFields.containsKey(mapping.columnName)) {
                LOG.debug("Mapping field {} to partition {}", mapping.field, mapping.columnName);
                partitionsToFields.put(mapping.columnName, mapping.field);
            }
        }
    }

    dataGeneratorFactory = createDataGeneratorFactory();

    // Note that cleanup is done synchronously by default while servicing .get
    hiveConnectionPool = CacheBuilder.newBuilder().maximumSize(10).expireAfterAccess(10, TimeUnit.MINUTES)
            .removalListener(new HiveConnectionRemovalListener()).build(new HiveConnectionLoader());

    recordWriterPool = CacheBuilder.newBuilder().maximumSize(10).expireAfterAccess(10, TimeUnit.MINUTES)
            .build(new HiveRecordWriterLoader());

    LOG.debug("Total issues: {}", issues.size());
    return issues;
}