Example usage for org.apache.hadoop.io Text Text

List of usage examples for org.apache.hadoop.io Text Text

Introduction

In this page you can find the example usage for org.apache.hadoop.io Text Text.

Prototype

public Text(byte[] utf8) 

Source Link

Document

Construct from a byte array.

Usage

From source file:azkaban.common.web.JsonSequenceFileViewer.java

License:Apache License

public boolean canReadFile(Reader reader) {
    Text keySchema = reader.getMetadata().get(new Text("key.schema"));
    Text valueSchema = reader.getMetadata().get(new Text("value.schema"));

    return keySchema != null && valueSchema != null;
}

From source file:azkaban.common.web.JsonSequenceFileViewer.java

License:Apache License

public void displaySequenceFile(SequenceFile.Reader reader, PrintWriter output, int startLine, int endLine)
        throws IOException {

    if (logger.isDebugEnabled())
        logger.debug("display json file");

    try {//from  w w  w  .  j ava  2s . c o m
        BytesWritable keyWritable = new BytesWritable();
        BytesWritable valueWritable = new BytesWritable();
        Text keySchema = reader.getMetadata().get(new Text("key.schema"));
        Text valueSchema = reader.getMetadata().get(new Text("value.schema"));

        JsonTypeSerializer keySerializer = new JsonTypeSerializer(keySchema.toString());
        JsonTypeSerializer valueSerializer = new JsonTypeSerializer(valueSchema.toString());

        // skip lines before the start line
        for (int i = 1; i < startLine; i++)
            reader.next(keyWritable, valueWritable);

        // now actually output lines
        for (int i = startLine; i <= endLine; i++) {
            boolean readSomething = reader.next(keyWritable, valueWritable);
            if (!readSomething)
                break;
            output.write(safeToString(keySerializer.toObject(keyWritable.getBytes())));
            output.write("\t=>\t");
            output.write(safeToString(valueSerializer.toObject(valueWritable.getBytes())));
            output.write("\n");
            output.flush();
        }
    } finally {
        reader.close();
    }
}

From source file:azkaban.jobtype.SecurePigWrapper.java

License:Apache License

public static void main(final String[] args) throws IOException, InterruptedException {
    final Logger logger = Logger.getRootLogger();
    final Properties p = System.getProperties();
    final Configuration conf = new Configuration();

    SecurityUtils.getProxiedUser(p, logger, conf).doAs(new PrivilegedExceptionAction<Void>() {
        @Override/*from   www  .  j  a v  a 2 s  . co  m*/
        public Void run() throws Exception {
            prefetchToken();
            org.apache.pig.Main.main(args);
            return null;
        }

        // For Pig jobs that need to do extra communication with the
        // JobTracker, it's necessary to pre-fetch a token and include it in
        // the credentials cache
        private void prefetchToken() throws InterruptedException, IOException {
            String shouldPrefetch = p.getProperty(OBTAIN_BINARY_TOKEN);
            if (shouldPrefetch != null && shouldPrefetch.equals("true")) {
                logger.info("Pre-fetching token");
                Job job = new Job(conf, "totally phony, extremely fake, not real job");

                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching: Got new JobClient: " + jc);
                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("hi"));
                job.getCredentials().addToken(new Text("howdy"), mrdt);

                File temp = File.createTempFile("mr-azkaban", ".token");
                temp.deleteOnExit();

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(temp);
                    dos = new DataOutputStream(fos);
                    job.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        dos.close();
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                logger.info("Setting " + MAPREDUCE_JOB_CREDENTIALS_BINARY + " to " + temp.getAbsolutePath());
                System.setProperty(MAPREDUCE_JOB_CREDENTIALS_BINARY, temp.getAbsolutePath());
            } else {
                logger.info("Not pre-fetching token");
            }
        }
    });
}

From source file:azkaban.security.commons.SecurityUtils.java

License:Apache License

public static synchronized void prefetchToken(final File tokenFile, final Props p, final Logger logger)
        throws InterruptedException, IOException {

    final Configuration conf = new Configuration();
    logger.info("Getting proxy user for " + p.getString(TO_PROXY));
    logger.info("Getting proxy user for " + p.toString());

    getProxiedUser(p.toProperties(), logger, conf).doAs(new PrivilegedExceptionAction<Void>() {
        @Override/* w  ww.ja va2s .co  m*/
        public Void run() throws Exception {
            getToken(p);
            return null;
        }

        private void getToken(Props p) throws InterruptedException, IOException {
            String shouldPrefetch = p.getString(OBTAIN_BINARY_TOKEN);
            if (shouldPrefetch != null && shouldPrefetch.equals("true")) {
                logger.info("Pre-fetching token");

                logger.info("Pre-fetching fs token");
                FileSystem fs = FileSystem.get(conf);
                Token<?> fsToken = fs.getDelegationToken(p.getString("user.to.proxy"));
                logger.info("Created token: " + fsToken.toString());

                Job job = new Job(conf, "totally phony, extremely fake, not real job");
                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching job token: Got new JobClient: " + jc);
                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("hi"));
                logger.info("Created token: " + mrdt.toString());

                job.getCredentials().addToken(new Text("howdy"), mrdt);
                job.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    job.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        dos.close();
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                logger.info("Loading hadoop tokens into " + tokenFile.getAbsolutePath());
                p.put("HadoopTokenFileLoc", tokenFile.getAbsolutePath());
            } else {
                logger.info("Not pre-fetching token");
            }
        }
    });
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final String userToProxy, final Logger logger)
        throws HadoopSecurityManagerException {

    logger.info("Getting hadoop tokens for " + userToProxy);

    try {/*from w  w  w  . jav a 2s.c om*/
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {

                FileSystem fs = FileSystem.get(conf);
                // check if we get the correct FS, and most importantly, the conf
                logger.info("Getting DFS token from " + fs.getCanonicalServiceName() + fs.getUri());
                Token<?> fsToken = fs.getDelegationToken(userToProxy);
                if (fsToken == null) {
                    logger.error("Failed to fetch DFS token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch DFS token for " + userToProxy);
                }
                logger.info("Created DFS token: " + fsToken.toString());
                logger.info("Token kind: " + fsToken.getKind());
                logger.info("Token id: " + fsToken.getIdentifier());
                logger.info("Token service: " + fsToken.getService());

                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching JT token: Got new JobClient: " + jc);

                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                if (mrdt == null) {
                    logger.error("Failed to fetch JT token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                }
                logger.info("Created JT token: " + mrdt.toString());
                logger.info("Token kind: " + mrdt.getKind());
                logger.info("Token id: " + mrdt.getIdentifier());
                logger.info("Token service: " + mrdt.getService());

                jc.getCredentials().addToken(mrdt.getService(), mrdt);
                jc.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    jc.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        dos.close();
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                // stash them to cancel after use.
                logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
            }
        });
    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());

    }
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken/* ww  w .j a  va 2 s .  com*/
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());
            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MAPREDUCE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                cancelNameNodeToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {/*from  w w  w  . java2  s . c  om*/
            logger.info("Pre-fetching Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            logger.info("HiveConf.ConfVars.METASTOREURIS.varname "
                    + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
            logger.info("HIVE_METASTORE_SASL_ENABLED " + hiveConf.get(HIVE_METASTORE_SASL_ENABLED));
            logger.info("HIVE_METASTORE_KERBEROS_PRINCIPAL " + hiveConf.get(HIVE_METASTORE_KERBEROS_PRINCIPAL));
            logger.info("HIVE_METASTORE_LOCAL " + hiveConf.get(HIVE_METASTORE_LOCAL));

            HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
            String hcatTokenStr = hiveClient.getDelegationToken(userToProxy,
                    UserGroupInformation.getLoginUser().getShortUserName());
            Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
            hcatToken.decodeFromUrlString(hcatTokenStr);
            logger.info("Created hive metastore token: " + hcatTokenStr);
            logger.info("Token kind: " + hcatToken.getKind());
            logger.info("Token id: " + hcatToken.getIdentifier());
            logger.info("Token service: " + hcatToken.getService());
            cred.addToken(hcatToken.getService(), hcatToken);
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("Failed to get hive metastore token." + e.getMessage() + e.getCause());
        } catch (Throwable t) {
            t.printStackTrace();
            logger.error("Failed to get hive metastore token." + t.getMessage() + t.getCause());
        }
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs.getDelegationToken(userToProxy);
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());
                    cred.addToken(fsToken.getService(), fsToken);
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobClient jobClient = new JobClient(new JobConf());
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }
            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                dos.close();
            }
            if (fos != null) {
                fos.close();
            }
        }

        // stash them to cancel after use.
        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());
    } catch (Throwable t) {
        t.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause());
    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final String userToProxy, final Logger logger)
        throws HadoopSecurityManagerException {

    logger.info("Getting hadoop tokens for " + userToProxy);

    try {/*w w  w .  j av  a  2  s.com*/
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {

                FileSystem fs = FileSystem.get(conf);
                // check if we get the correct FS, and most importantly, the conf
                logger.info("Getting DFS token from " + fs.getCanonicalServiceName() + fs.getUri());
                Token<?> fsToken = fs.getDelegationToken(userToProxy);
                if (fsToken == null) {
                    logger.error("Failed to fetch DFS token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch DFS token for " + userToProxy);
                }
                logger.info("Created DFS token: " + fsToken.toString());
                logger.info("Token kind: " + fsToken.getKind());
                logger.info("Token id: " + fsToken.getIdentifier());
                logger.info("Token service: " + fsToken.getService());

                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching JT token: Got new JobClient: " + jc);

                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                if (mrdt == null) {
                    logger.error("Failed to fetch JT token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                }
                logger.info("Created JT token: " + mrdt.toString());
                logger.info("Token kind: " + mrdt.getKind());
                logger.info("Token id: " + mrdt.getIdentifier());
                logger.info("Token service: " + mrdt.getService());

                jc.getCredentials().addToken(mrdt.getService(), mrdt);
                jc.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    jc.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        try {
                            dos.close();
                        } catch (Throwable t) {
                            // best effort
                            logger.error(
                                    "encountered exception while closing DataOutputStream of the tokenFile", t);
                        }
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                // stash them to cancel after use.
                logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
            }
        });
    } catch (Exception e) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());

    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken//from  w  w w  . j  a v  a2 s .  co m
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {

            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());

            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("RM_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                // cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                // cancelNameNodeToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MR_DELEGATION_TOKEN"))) {
                logger.info("Cancelling jobhistoryserver mr token " + new String(t.getIdentifier()));
                // cancelJhsToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel tokens " + e.getMessage() + e.getCause(), e);
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

/**
 * function to fetch hcat token as per the specified hive configuration and
 * then store the token in to the credential store specified .
 *
 * @param userToProxy String value indicating the name of the user the token
 *          will be fetched for./*  ww w  .  j a va  2s . c  o m*/
 * @param hiveConf the configuration based off which the hive client will be
 *          initialized.
 * @param logger the logger instance which writes the logging content to the
 *          job logs.
 *
 * @throws IOException
 * @throws TException
 * @throws MetaException
 *
 * */
private Token<DelegationTokenIdentifier> fetchHcatToken(String userToProxy, HiveConf hiveConf,
        String tokenSignatureOverwrite, final Logger logger) throws IOException, MetaException, TException {

    logger.info(HiveConf.ConfVars.METASTOREURIS.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));

    logger.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));

    logger.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));

    HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
    String hcatTokenStr = hiveClient.getDelegationToken(userToProxy,
            UserGroupInformation.getLoginUser().getShortUserName());
    Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
    hcatToken.decodeFromUrlString(hcatTokenStr);

    // overwrite the value of the service property of the token if the signature
    // override is specified.
    if (tokenSignatureOverwrite != null && tokenSignatureOverwrite.trim().length() > 0) {
        hcatToken.setService(new Text(tokenSignatureOverwrite.trim().toLowerCase()));

        logger.info(HIVE_TOKEN_SIGNATURE_KEY + ":"
                + (tokenSignatureOverwrite == null ? "" : tokenSignatureOverwrite));
    }

    logger.info("Created hive metastore token: " + hcatTokenStr);
    logger.info("Token kind: " + hcatToken.getKind());
    logger.info("Token id: " + hcatToken.getIdentifier());
    logger.info("Token service: " + hcatToken.getService());
    return hcatToken;
}