List of usage examples for org.apache.commons.codec.binary Base64 encodeBase64URLSafeString
public static String encodeBase64URLSafeString(final byte[] binaryData)
From source file:org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite.java
/** * Print all of the storage descriptors. This doesn't take a regular expression since the key * is an md5 hash and it's hard to see how a regex on this would be useful. * @return list of all storage descriptors as strings * @throws IOException//from w w w.j a va 2 s.co m * @throws TException */ List<String> printStorageDescriptors() throws IOException, TException { Iterator<Result> results = scan(SD_TABLE, CATALOG_CF, CATALOG_COL); if (!results.hasNext()) return Arrays.asList("No storage descriptors"); List<String> lines = new ArrayList<>(); while (results.hasNext()) { Result result = results.next(); lines.add(Base64.encodeBase64URLSafeString(result.getRow()) + ": " + dumpThriftObject( HBaseUtils.deserializeStorageDescriptor(result.getValue(CATALOG_CF, CATALOG_COL)))); } return lines; }
From source file:org.apache.hadoop.hive.metastore.security.DBTokenStore.java
@Override public boolean addToken(DelegationTokenIdentifier tokenIdentifier, DelegationTokenInformation token) throws TokenStoreException { try {/*from w w w.ja va 2 s . c o m*/ String identifier = TokenStoreDelegationTokenSecretManager.encodeWritable(tokenIdentifier); String tokenStr = Base64.encodeBase64URLSafeString( MetastoreDelegationTokenSupport.encodeDelegationTokenInformation(token)); boolean result = (Boolean) invokeOnTokenStore("addToken", new Object[] { identifier, tokenStr }, String.class, String.class); LOG.trace("addToken: tokenIdentifier = {}, added = {}", tokenIdentifier, result); return result; } catch (IOException e) { throw new TokenStoreException(e); } }
From source file:org.apache.hadoop.hive.thrift.DBTokenStore.java
@Override public boolean addToken(DelegationTokenIdentifier tokenIdentifier, DelegationTokenInformation token) throws TokenStoreException { try {/*from ww w. j av a2 s . c o m*/ String identifier = TokenStoreDelegationTokenSecretManager.encodeWritable(tokenIdentifier); String tokenStr = Base64 .encodeBase64URLSafeString(HiveDelegationTokenSupport.encodeDelegationTokenInformation(token)); boolean result = (Boolean) invokeOnTokenStore("addToken", new Object[] { identifier, tokenStr }, String.class, String.class); if (LOG.isTraceEnabled()) { LOG.trace("addToken: tokenIdentifier = " + tokenIdentifier + ", added = " + result); } return result; } catch (IOException e) { throw new TokenStoreException(e); } }
From source file:org.apache.hadoop.io.crypto.bee.key.sasl.KeySaslClient.java
public SaslParam connection(String strBaseUrl, SaslParam saslParam) throws Exception { String strParam = new Gson().toJson(saslParam); String strUrl = strBaseUrl + Base64.encodeBase64URLSafeString(strParam.getBytes()); logger.debug("prepare to connect. strBaseUrl = " + strBaseUrl + ", strParam = " + strParam); URL url = new URL(strUrl); StringBuffer sb = new RestClient(url).getResult(); return new Gson().fromJson(sb.toString(), SaslParam.class); }
From source file:org.apache.hadoop.io.crypto.tool.BeeCli.java
private URL buildRequestUrl(String method, String parameter) throws Exception { // String strUrl = "http://" + this.strManagerUrl + BeeConstants.API_ROOT + // method;// w w w. jav a2 s . c om String strUrl = "https://" + this.strManagerUrl + BeeConstants.API_ROOT + method; if (parameter != null && !parameter.isEmpty()) { strUrl = strUrl + "/" + Base64.encodeBase64URLSafeString(parameter.getBytes()); } return new URL(strUrl); }
From source file:org.apache.hadoop.mapreduce.CryptoUtils.java
/** * Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the * data buffer required for the stream is specified by the * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration * variable./* w ww . j a v a 2 s . co m*/ * * @param conf * @param out * @return FSDataOutputStream * @throws IOException */ public static FSDataOutputStream wrapIfNecessary(Configuration conf, FSDataOutputStream out) throws IOException { if (isEncryptedSpillEnabled(conf)) { out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array()); byte[] iv = createIV(conf); out.write(iv); if (LOG.isDebugEnabled()) { LOG.debug("IV written to Stream [" + Base64.encodeBase64URLSafeString(iv) + "]"); } return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf), getBufferSize(conf), getEncryptionKey(), iv); } else { return out; } }
From source file:org.apache.hadoop.mapreduce.CryptoUtils.java
/** * Wraps a given InputStream with a CryptoInputStream. The size of the data * buffer required for the stream is specified by the * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration * variable./*from w w w. j a va 2 s . co m*/ * * If the value of 'length' is > -1, The InputStream is additionally * wrapped in a LimitInputStream. CryptoStreams are late buffering in nature. * This means they will always try to read ahead if they can. The * LimitInputStream will ensure that the CryptoStream does not read past the * provided length from the given Input Stream. * * @param conf * @param in * @param length * @return InputStream * @throws IOException */ public static InputStream wrapIfNecessary(Configuration conf, InputStream in, long length) throws IOException { if (isEncryptedSpillEnabled(conf)) { int bufferSize = getBufferSize(conf); if (length > -1) { in = new LimitInputStream(in, length); } byte[] offsetArray = new byte[8]; IOUtils.readFully(in, offsetArray, 0, 8); long offset = ByteBuffer.wrap(offsetArray).getLong(); CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf); byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()]; IOUtils.readFully(in, iv, 0, cryptoCodec.getCipherSuite().getAlgorithmBlockSize()); if (LOG.isDebugEnabled()) { LOG.debug("IV read from [" + Base64.encodeBase64URLSafeString(iv) + "]"); } return new CryptoInputStream(in, cryptoCodec, bufferSize, getEncryptionKey(), iv, offset + cryptoPadding(conf)); } else { return in; } }
From source file:org.apache.hadoop.mapreduce.CryptoUtils.java
/** * Wraps a given FSDataInputStream with a CryptoInputStream. The size of the * data buffer required for the stream is specified by the * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration * variable.//from w w w . j a v a2 s.c o m * * @param conf * @param in * @return FSDataInputStream * @throws IOException */ public static FSDataInputStream wrapIfNecessary(Configuration conf, FSDataInputStream in) throws IOException { if (isEncryptedSpillEnabled(conf)) { CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf); int bufferSize = getBufferSize(conf); // Not going to be used... but still has to be read... // Since the O/P stream always writes it.. IOUtils.readFully(in, new byte[8], 0, 8); byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()]; IOUtils.readFully(in, iv, 0, cryptoCodec.getCipherSuite().getAlgorithmBlockSize()); if (LOG.isDebugEnabled()) { LOG.debug("IV read from Stream [" + Base64.encodeBase64URLSafeString(iv) + "]"); } return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize, getEncryptionKey(), iv); } else { return in; } }
From source file:org.apache.hadoop.mapreduce.v2.TestMRJobs.java
@Test(timeout = 120000) public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return;//from ww w . ja va2 s. c o m } final SleepJob sleepJob = new SleepJob(); final JobConf sleepConf = new JobConf(mrCluster.getConfig()); sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); final long userLogKb = 4; sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb); sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3); sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString()); final long amLogKb = 7; sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb); sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7); sleepJob.setConf(sleepConf); final Job job = sleepJob.createJob(1, 0, 1L, 100, 0L, 0); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); // The AppMaster jar itself. job.waitForCompletion(true); final JobId jobId = TypeConverter.toYarn(job.getJobID()); final ApplicationId appID = jobId.getAppId(); int pollElapsed = 0; while (true) { Thread.sleep(1000); pollElapsed += 1000; if (TERMINAL_RM_APP_STATES .contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) { break; } if (pollElapsed >= 60000) { LOG.warn("application did not reach terminal state within 60 seconds"); break; } } Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState()); // Job finished, verify logs // final String appIdStr = appID.toString(); final String appIdSuffix = appIdStr.substring("application_".length(), appIdStr.length()); final String containerGlob = "container_" + appIdSuffix + "_*_*"; final String syslogGlob = appIdStr + Path.SEPARATOR + containerGlob + Path.SEPARATOR + TaskLog.LogName.SYSLOG; int numAppMasters = 0; int numMapTasks = 0; String user = UserGroupInformation.getCurrentUser().getUserName(); String userFolder; try { MessageDigest digest = MessageDigest .getInstance(mrCluster.getResourceManager().getRMContext().getUserFolderHashAlgo()); byte[] userBytes = user.getBytes(StandardCharsets.UTF_8); byte[] hashBase = ArrayUtils.addAll(userBytes, mrCluster.getResourceManager().getRMContext().getSeed()); byte[] hash = digest.digest(hashBase); userFolder = Base64.encodeBase64URLSafeString(hash); } catch (NoSuchAlgorithmException ex) { LOG.error("error while creating userFolder random string", ex); throw new Error("error while creating userFolder random string", ex); } for (int i = 0; i < NUM_NODE_MGRS; i++) { final Configuration nmConf = mrCluster.getNodeManager(i).getConfig(); for (String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) { Path userLogDir = new Path(logDir, userFolder); final Path absSyslogGlob = new Path(userLogDir + Path.SEPARATOR + syslogGlob); LOG.info("Checking for glob: " + absSyslogGlob); final FileStatus[] syslogs = localFs.globStatus(absSyslogGlob); for (FileStatus slog : syslogs) { boolean foundAppMaster = job.isUber(); final Path containerPathComponent = slog.getPath().getParent(); if (!foundAppMaster) { final ContainerId cid = ContainerId.fromString(containerPathComponent.getName()); foundAppMaster = ((cid.getContainerId() & ContainerId.CONTAINER_ID_BITMASK) == 1); } final FileStatus[] sysSiblings = localFs .globStatus(new Path(containerPathComponent, TaskLog.LogName.SYSLOG + "*")); // sort to ensure for i > 0 sysSiblings[i] == "syslog.i" Arrays.sort(sysSiblings); if (foundAppMaster) { numAppMasters++; } else { numMapTasks++; } if (foundAppMaster) { Assert.assertSame("Unexpected number of AM sylog* files", sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS, 0) + 1, sysSiblings.length); Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb, sysSiblings[1].getLen() >= amLogKb * 1024); } else { Assert.assertSame("Unexpected number of MR task sylog* files", sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS, 0) + 1, sysSiblings.length); Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb, sysSiblings[1].getLen() >= userLogKb * 1024); } } } } // Make sure we checked non-empty set // Assert.assertEquals("No AppMaster log found!", 1, numAppMasters); if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false)) { Assert.assertEquals("MapTask log with uber found!", 0, numMapTasks); } else { Assert.assertEquals("No MapTask log found!", 1, numMapTasks); } }
From source file:org.apache.hadoop.mapreduce.v2.TestMRJobs.java
@Test(timeout = 120000) public void testThreadDumpOnTaskTimeout() throws IOException, InterruptedException, ClassNotFoundException { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return;/* www . j av a 2 s . com*/ } final SleepJob sleepJob = new SleepJob(); final JobConf sleepConf = new JobConf(mrCluster.getConfig()); sleepConf.setLong(MRJobConfig.TASK_TIMEOUT, 3 * 1000L); sleepConf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1); sleepJob.setConf(sleepConf); if (this instanceof TestUberAM) { sleepConf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, 30 * 1000); } // sleep for 10 seconds to trigger a kill with thread dump final Job job = sleepJob.createJob(1, 0, 10 * 60 * 1000L, 1, 0L, 0); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); // The AppMaster jar itself. job.waitForCompletion(true); final JobId jobId = TypeConverter.toYarn(job.getJobID()); final ApplicationId appID = jobId.getAppId(); int pollElapsed = 0; while (true) { Thread.sleep(1000); pollElapsed += 1000; if (TERMINAL_RM_APP_STATES .contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) { break; } if (pollElapsed >= 60000) { LOG.warn("application did not reach terminal state within 60 seconds"); break; } } // Job finished, verify logs // final String appIdStr = appID.toString(); final String appIdSuffix = appIdStr.substring("application_".length(), appIdStr.length()); final String containerGlob = "container_" + appIdSuffix + "_*_*"; final String syslogGlob = appIdStr + Path.SEPARATOR + containerGlob + Path.SEPARATOR + TaskLog.LogName.SYSLOG; int numAppMasters = 0; int numMapTasks = 0; String user = UserGroupInformation.getCurrentUser().getUserName(); String userFolder; try { MessageDigest digest = MessageDigest .getInstance(mrCluster.getResourceManager().getRMContext().getUserFolderHashAlgo()); byte[] userBytes = user.getBytes(StandardCharsets.UTF_8); byte[] hashBase = ArrayUtils.addAll(userBytes, mrCluster.getResourceManager().getRMContext().getSeed()); byte[] hash = digest.digest(hashBase); userFolder = Base64.encodeBase64URLSafeString(hash); } catch (NoSuchAlgorithmException ex) { LOG.error("error while creating userFolder random string", ex); throw new Error("error while creating userFolder random string", ex); } for (int i = 0; i < NUM_NODE_MGRS; i++) { final Configuration nmConf = mrCluster.getNodeManager(i).getConfig(); for (String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) { Path userLogFolder = new Path(logDir, userFolder); final Path absSyslogGlob = new Path(userLogFolder + Path.SEPARATOR + syslogGlob); LOG.info("Checking for glob: " + absSyslogGlob); for (FileStatus syslog : localFs.globStatus(absSyslogGlob)) { boolean foundAppMaster = false; boolean foundThreadDump = false; // Determine the container type final BufferedReader syslogReader = new BufferedReader( new InputStreamReader(localFs.open(syslog.getPath()))); try { for (String line; (line = syslogReader.readLine()) != null;) { if (line.contains(MRAppMaster.class.getName())) { foundAppMaster = true; break; } } } finally { syslogReader.close(); } // Check for thread dump in stdout final Path stdoutPath = new Path(syslog.getPath().getParent(), TaskLog.LogName.STDOUT.toString()); final BufferedReader stdoutReader = new BufferedReader( new InputStreamReader(localFs.open(stdoutPath))); try { for (String line; (line = stdoutReader.readLine()) != null;) { if (line.contains("Full thread dump")) { foundThreadDump = true; break; } } } finally { stdoutReader.close(); } if (foundAppMaster) { numAppMasters++; if (this instanceof TestUberAM) { Assert.assertTrue("No thread dump", foundThreadDump); } else { Assert.assertFalse("Unexpected thread dump", foundThreadDump); } } else { numMapTasks++; Assert.assertTrue("No thread dump", foundThreadDump); } } } } // Make sure we checked non-empty set // Assert.assertEquals("No AppMaster log found!", 1, numAppMasters); if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false)) { Assert.assertSame("MapTask log with uber found!", 0, numMapTasks); } else { Assert.assertSame("No MapTask log found!", 1, numMapTasks); } }