Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:com.kylinolap.storage.hbase.PingHBaseCLI.java

License:Apache License

public static void main(String[] args) throws IOException {
    String metadataUrl = args[0];
    String hbaseTable = args[1];/* w  ww  .  ja  v  a 2  s. co  m*/

    System.out.println("Hello friend.");

    Configuration hconf = HadoopUtil.newHBaseConfiguration(metadataUrl);
    if (User.isHBaseSecurityEnabled(hconf)) {
        try {
            System.out.println("--------------Getting kerberos credential for user "
                    + UserGroupInformation.getCurrentUser().getUserName());
            TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser());
        } catch (InterruptedException e) {
            System.out.println("--------------Error while getting kerberos credential for user "
                    + UserGroupInformation.getCurrentUser().getUserName());
        }
    }

    Scan scan = new Scan();
    int limit = 20;

    HConnection conn = null;
    HTableInterface table = null;
    ResultScanner scanner = null;
    try {
        conn = HConnectionManager.createConnection(hconf);
        table = conn.getTable(hbaseTable);
        scanner = table.getScanner(scan);
        int count = 0;
        for (Result r : scanner) {
            byte[] rowkey = r.getRow();
            System.out.println(Bytes.toStringBinary(rowkey));
            count++;
            if (count == limit)
                break;
        }
    } finally {
        if (scanner != null) {
            scanner.close();
        }
        if (table != null) {
            table.close();
        }
        if (conn != null) {
            conn.close();
        }
    }

}

From source file:com.linkedin.pinot.common.segment.fetcher.HdfsSegmentFetcher.java

License:Apache License

private void authenticate(Configuration hadoopConf, org.apache.commons.configuration.Configuration configs) {
    String principal = configs.getString(PRINCIPLE);
    String keytab = configs.getString(KEYTAB);
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        UserGroupInformation.setConfiguration(hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                if (!UserGroupInformation.getCurrentUser().hasKerberosCredentials()
                        || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                    LOGGER.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab);
                    UserGroupInformation.loginUserFromKeytab(principal, keytab);
                }//from  w  w  w  . j a  va 2  s . c om
            } catch (IOException e) {
                throw new RuntimeException(String.format(
                        "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab), e);
            }
        }
    }
}

From source file:com.linkedin.pinot.filesystem.HadoopPinotFS.java

License:Apache License

private void authenticate(org.apache.hadoop.conf.Configuration hadoopConf,
        org.apache.commons.configuration.Configuration configs) {
    String principal = configs.getString(PRINCIPAL);
    String keytab = configs.getString(KEYTAB);
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        UserGroupInformation.setConfiguration(hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                if (!UserGroupInformation.getCurrentUser().hasKerberosCredentials()
                        || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                    LOGGER.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab);
                    UserGroupInformation.loginUserFromKeytab(principal, keytab);
                }//from   w ww .ja  va2 s .  c  o  m
            } catch (IOException e) {
                throw new RuntimeException(String.format(
                        "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab), e);
            }
        }
    }
}

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} as its RetryInvocationHandler. Otherwise one of nameNodeUri or
 * rpcNamenode//from  w  ww. j a  va2s.  c  om
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats)
        throws IOException {
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
    traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
            .build();
    // Copy only the required DFSClient configuration
    this.dfsClientConf = new DFSClientConfBridge2_7(conf);
    if (this.dfsClientConf.isUseLegacyBlockReaderLocal()) {
        LOG.debug("Using legacy short-circuit local reads.");
    }
    this.conf = conf;
    this.stats = stats;
    this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

    this.ugi = UserGroupInformation.getCurrentUser();

    this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority();
    this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + DFSUtil.getRandom().nextInt() + "_"
            + Thread.currentThread().getId();
    provider = DFSUtil.createKeyProvider(conf);
    if (LOG.isDebugEnabled()) {
        if (provider == null) {
            LOG.debug("No KeyProvider found.");
        } else {
            LOG.debug("Found KeyProvider: " + provider.toString());
        }
    }
    int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
            DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
    NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
    AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
    if (numResponseToDrop > 0) {
        // This case is used for testing.
        LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to "
                + numResponseToDrop + ", this hacked client will proactively drop responses");
        proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class,
                numResponseToDrop, nnFallbackToSimpleAuth);
    }

    if (proxyInfo != null) {
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    } else if (rpcNamenode != null) {
        // This case is used for testing.
        Preconditions.checkArgument(nameNodeUri == null);
        this.namenode = rpcNamenode;
        dtService = null;
    } else {
        Preconditions.checkArgument(nameNodeUri != null, "null URI");
        proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class,
                nnFallbackToSimpleAuth);
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    }

    String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
    localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
    if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
        LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses ["
                + Joiner.on(',').join(localInterfaceAddrs) + "]");
    }

    Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
    Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null
            : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
    Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
    this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead);
    this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead);
    this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
            dfsClientConf);
    this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
    int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
    if (numThreads > 0) {
        this.initThreadsNumForHedgedReads(numThreads);
    }
    this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
            TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Test reading while writing. */
@Test/*from  www.  java 2 s.c  o m*/
public void pipeline_02_03() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    // create cluster
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        //change the lease limits.
        cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

        //wait for the cluster
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        final Path p = new Path(DIR, "file1");
        final int half = BLOCK_SIZE / 2;

        //a. On Machine M1, Create file. Write half block of data.
        //   Invoke DFSOutputStream.hflush() on the dfs file handle.
        //   Do not close file yet.
        {
            final FSDataOutputStream out = fs.create(p, true,
                    fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3,
                    BLOCK_SIZE);
            write(out, 0, half);

            //hflush
            ((DFSOutputStream) out.getWrappedStream()).hflush();
        }

        //b. On another machine M2, open file and verify that the half-block
        //   of data can be read successfully.
        checkFile(p, half, conf);
        MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()");
        ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin();

        //c. On M1, append another half block of data.  Close file on M1.
        {
            //sleep to let the lease is expired.
            Thread.sleep(2 * SOFT_LEASE_LIMIT);

            final UserGroupInformation current = UserGroupInformation.getCurrentUser();
            final UserGroupInformation ugi = UserGroupInformation
                    .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" });
            final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
                @Override
                public DistributedFileSystem run() throws Exception {
                    return (DistributedFileSystem) FileSystem.newInstance(conf);
                }
            });
            final FSDataOutputStream out = append(dfs, p);
            write(out, 0, half);
            out.close();
        }

        //d. On M2, open file and read 1 block of data from it. Close file.
        checkFile(p, 2 * half, conf);
    } finally {
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

static void checkFile(Path p, int expectedsize, final Configuration conf)
        throws IOException, InterruptedException {
    //open the file with another user account
    final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "_" + ++userCount;

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
            new String[] { "supergroup" });

    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

    final HdfsDataInputStream in = (HdfsDataInputStream) fs.open(p);

    //Check visible length
    Assert.assertTrue(in.getVisibleLength() >= expectedsize);

    //Able to read?
    for (int i = 0; i < expectedsize; i++) {
        Assert.assertEquals((byte) i, (byte) in.read());
    }/*from w  ww. j  a v a2 s.co m*/

    in.close();
}

From source file:com.moz.fiji.schema.impl.hbase.HBaseFijiTable.java

License:Apache License

/**
 * Loads partitioned HFiles directly into the regions of this Fiji table.
 *
 * @param hfilePath Path of the HFiles to load.
 * @throws IOException on I/O error./*  ww w.  j  ava2s  . com*/
 */
public void bulkLoad(Path hfilePath) throws IOException {
    final LoadIncrementalHFiles loader = createHFileLoader(mConf);

    final String hFileScheme = hfilePath.toUri().getScheme();
    Token<DelegationTokenIdentifier> hdfsDelegationToken = null;

    // If we're bulk loading from a secure HDFS, we should request and forward a delegation token.
    // LoadIncrementalHfiles will actually do this if none is provided, but because we call it
    // repeatedly in a short amount of time, this seems to trigger a possible race condition
    // where we ask to load the next HFile while there is a pending token cancellation request.
    // By requesting the token ourselves, it is re-used for each bulk load call.
    // Once we're done with the bulk loader we cancel the token.
    if (UserGroupInformation.isSecurityEnabled() && hFileScheme.equals(HDFS_SCHEME)) {
        final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        final DistributedFileSystem fileSystem = (DistributedFileSystem) hfilePath.getFileSystem(mConf);
        hdfsDelegationToken = fileSystem.getDelegationToken(RENEWER);
        ugi.addToken(hdfsDelegationToken);
    }

    try {
        // LoadIncrementalHFiles.doBulkLoad() requires an HTable instance, not an HTableInterface:
        final HTable htable = (HTable) mHTableFactory.create(mConf, mHBaseTableName);
        try {
            final List<Path> hfilePaths = Lists.newArrayList();

            // Try to find any hfiles for partitions within the passed in path
            final FileStatus[] hfiles = hfilePath.getFileSystem(mConf).globStatus(new Path(hfilePath, "*"));
            for (FileStatus hfile : hfiles) {
                String partName = hfile.getPath().getName();
                if (!partName.startsWith("_") && partName.endsWith(".hfile")) {
                    Path partHFile = new Path(hfilePath, partName);
                    hfilePaths.add(partHFile);
                }
            }
            if (hfilePaths.isEmpty()) {
                // If we didn't find any parts, add in the passed in parameter
                hfilePaths.add(hfilePath);
            }
            for (Path path : hfilePaths) {
                loader.doBulkLoad(path, htable);
                LOG.info("Successfully loaded: " + path.toString());
            }
        } finally {
            htable.close();
        }
    } catch (TableNotFoundException tnfe) {
        throw new InternalFijiError(tnfe);
    }

    // Cancel the HDFS delegation token if we requested one.
    if (null != hdfsDelegationToken) {
        try {
            hdfsDelegationToken.cancel(mConf);
        } catch (InterruptedException e) {
            LOG.warn("Failed to cancel HDFS delegation token.", e);
        }
    }
}

From source file:com.mycompany.app.TestStagingDirectoryPermissions.java

License:Apache License

@Test
public void perms() throws IOException, InterruptedException {
    MiniDFSCluster minidfs = null;/* w w  w. ja  v  a 2  s .  com*/
    FileSystem fs = null;
    MiniMRClientCluster minimr = null;
    try {
        Configuration conf = new Configuration(true);
        conf.set("fs.permission.umask-mode", "0077");
        minidfs = new MiniDFSCluster.Builder(conf).build();
        minidfs.waitActive();

        fs = minidfs.getFileSystem();
        conf.set(FileSystem.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
        Path p = path("/in");
        fs.mkdirs(p);

        FSDataOutputStream os = fs.create(new Path(p, "input.txt"));
        os.write("hello!".getBytes("UTF-8"));
        os.close();

        String user = UserGroupInformation.getCurrentUser().getUserName();
        Path home = new Path("/User/" + user);
        fs.mkdirs(home);
        minimr = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
        JobConf job = new JobConf(minimr.getConfig());

        job.setJobName("PermsTest");
        JobClient client = new JobClient(job);
        FileInputFormat.addInputPath(job, p);
        FileOutputFormat.setOutputPath(job, path("/out"));
        job.setInputFormat(TextInputFormat.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setMapperClass(MySleepMapper.class);

        job.setNumReduceTasks(1);
        RunningJob submittedJob = client.submitJob(job);

        // Sleep for a bit to let localization finish
        System.out.println("Sleeping...");
        Thread.sleep(3 * 1000l);
        System.out.println("Done sleeping...");
        assertFalse(UserGroupInformation.isSecurityEnabled());

        Path stagingRoot = path("/tmp/hadoop-yarn/staging/" + user + "/.staging/");
        assertTrue(fs.exists(stagingRoot));
        assertEquals(1, fs.listStatus(stagingRoot).length);
        Path staging = fs.listStatus(stagingRoot)[0].getPath();
        Path jobXml = path(staging + "/job.xml");

        assertTrue(fs.exists(jobXml));

        FileStatus fileStatus = fs.getFileStatus(jobXml);
        System.out.println("job.xml permission = " + fileStatus.getPermission());
        assertTrue(fileStatus.getPermission().getOtherAction().implies(FsAction.READ));
        assertTrue(fileStatus.getPermission().getGroupAction().implies(FsAction.READ));

        submittedJob.waitForCompletion();
    } finally {
        if (minimr != null) {
            minimr.stop();
        }
        if (fs != null) {
            fs.close();
        }
        if (minidfs != null) {
            minidfs.shutdown(true);
        }
    }
}

From source file:com.netflix.bdp.s3mper.alert.impl.CloudWatchAlertDispatcher.java

License:Apache License

private void buildMessage(AbstractMessage message) {
    String hostname = "unknown";

    try {/*from  w w w .  j a v  a 2 s.co  m*/
        hostname = InetAddress.getLocalHost().getHostName();
    } catch (UnknownHostException e) {
        log.warn("Failed to identify hostname", e);
    }

    message.setEpoch(System.currentTimeMillis());
    message.setTimestamp(new Date(message.getEpoch()).toString());
    message.setHostname(hostname);

    String username = conf.get("user.name", System.getProperty("user.name"));

    try {
        username = UserGroupInformation.getCurrentUser().getUserName();
    } catch (IOException e) {
        log.warn("Failed to identify user using hadoop library.", e);
    }

    message.setUsername(username);

    message.setGenieId(conf.get("genie.job.id"));
    message.setDataovenId(conf.get("dataoven.job.id"));
    String queryId = conf.get("hive.query.id");

    QueryType queryType = QueryType.Unknown;

    if (queryId != null) {
        queryType = QueryType.Hive;
        message.setLogFile(conf.get("hive.log.file"));
    } else {
        queryId = conf.get("pig.script.id");

        if (queryId != null) {
            queryType = QueryType.Pig;
            message.setLogFile(conf.get("pig.logfile"));
        }
    }

    message.setQueryId(queryId);
    message.setQueryType(queryType);

    message.setJobId(conf.get("mapred.job.id"));
    message.setTaskId(conf.get("mapred.tip.id"));
    message.setAttemptId(conf.get("mapred.task.id"));
    message.setInputFile(conf.get("mapred.input.file"));
    message.setEmail(conf.get("s3mper.email"));

    try {
        //We have to guess at this since it may not be explicitly in the config
        if (message.getJobId() == null) {
            String[] split = conf.get("mapreduce.job.dir").split("/");
            String jobId = split[split.length - 1];

            message.setJobId(jobId);
        }
    } catch (RuntimeException e) {
        log.debug("Failed to determine job id");
    }

    try {
        StackTraceElement[] stack = Thread.currentThread().getStackTrace();

        List<String> stackTrace = new ArrayList<String>(traceDepth);

        for (int i = 0; i < traceDepth && i < stack.length; i++) {
            stackTrace.add(stack[i].toString());
        }

        message.setStackTrace(stackTrace);
    } catch (Exception e) {
        log.debug("Stacktrace generation failed", e);
    }

}

From source file:com.netflix.lipstick.pigtolipstick.BasicP2LClient.java

License:Apache License

@Override
@SuppressWarnings("unused")
public void createPlan(MROperPlan plan) {
    if (plan != null && unopPlanGenerator != null && opPlanGenerator != null && context != null) {
        Configuration conf = null;
        for (MapReduceOper job : plan) {
            if (conf == null) {
                conf = new Configuration();
                ScriptState.get().addSettingsToConf(job, conf);
                break;
            }//from   w  w w .j a va 2  s .c o  m
        }
        try {
            Map<PhysicalOperator, Operator> p2lMap = Maps.newHashMap();
            Map<Operator, PhysicalOperator> l2pMap = context.getExecutionEngine().getLogToPhyMap();
            for (Entry<Operator, PhysicalOperator> i : l2pMap.entrySet()) {
                p2lMap.put(i.getValue(), i.getKey());
            }

            String script = null;

            // suppress getting script from conf for now - do something smarter later
            if (conf != null && false) {
                script = new String(Base64.decodeBase64(conf.get("pig.script")));
            }
            if ((script == null || script.length() == 0) && (ps != null)) {
                script = StringUtils.join(ps.getScriptCache(), '\n');
            }

            MRPlanCalculator opPlan = new MRPlanCalculator(opPlanGenerator.getP2jPlan(), plan, p2lMap,
                    opPlanGenerator.getReverseMap());
            MRPlanCalculator unopPlan = new MRPlanCalculator(unopPlanGenerator.getP2jPlan(), plan, p2lMap,
                    unopPlanGenerator.getReverseMap());

            P2jPlanPackage plans = new P2jPlanPackage(opPlan.getP2jPlan(), unopPlan.getP2jPlan(), script,
                    planId);

            Properties props = context.getProperties();
            plans.setUserName(UserGroupInformation.getCurrentUser().getUserName());
            if (props.containsKey(JOB_NAME_PROP)) {
                plans.setJobName(props.getProperty(JOB_NAME_PROP));
            } else {
                plans.setJobName("unknown");
            }

            if (props.containsKey(ENABLE_SAMPLE_OUTPUT_PROP)) {
                String strProp = props.getProperty(ENABLE_SAMPLE_OUTPUT_PROP).toLowerCase();
                if (strProp.equals("f") || strProp.equals("false")) {
                    enableSampleOutput = false;
                    LOG.warn("Sample Output has been disabled.");
                }
            }

            plans.getStatus().setStartTime();
            plans.getStatus().setStatusText(StatusText.running);
            invalidClient = (psClient.savePlan(plans) == null);

        } catch (Exception e) {
            LOG.error("Caught unexpected exception generating json plan.", e);
            invalidClient = true;
        }
    } else {
        LOG.warn("Not saving plan, missing necessary objects to do so");
        invalidClient = true;
    }

    if (invalidClient) {
        LOG.error("Failed to properly create lipstick client and save plan.  Lipstick will be disabled.");
    }
}