List of usage examples for org.apache.hadoop.io DataOutputBuffer DataOutputBuffer
public DataOutputBuffer()
From source file:com.tikal.fuseday.bigdata.FastaRecordReader.java
License:Apache License
public boolean readValue(Text value) throws IOException { DataOutputBuffer out = new DataOutputBuffer(); readUntilMatch(_RECORD_BEGIN, false, false, out); value.set(new String(out.getData(), 0, out.getLength()).replaceAll("\n", "")); return true;/*w w w . j a va 2s.c om*/ }
From source file:com.tito.easyyarn.appmaster.ApplicationMaster.java
License:Apache License
private void extractTokens() { // Credentials, Token, UserGroupInformation, DataOutputBuffer Credentials credentials;// ww w. jav a 2 s. c o m try { credentials = UserGroupInformation.getCurrentUser().getCredentials(); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); // Now remove the AM->RM token so that containers cannot access it. Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); LOG.info("Executing with tokens:"); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.info(token); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { iter.remove(); } } allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); } catch (IOException e) { LOG.error("extractTokens error={}", e); } }
From source file:com.toy.Client.java
License:Apache License
/** * Start a new Application Master and deploy the web application on 2 Tomcat containers * * @throws Exception/*from w w w . j a v a 2s . co m*/ */ void start() throws Exception { //Check tomcat dir final File tomcatHomeDir = new File(toyConfig.tomcat); final File tomcatLibraries = new File(tomcatHomeDir, "lib"); final File tomcatBinaries = new File(tomcatHomeDir, "bin"); Preconditions.checkState(tomcatLibraries.isDirectory(), tomcatLibraries.getAbsolutePath() + " does not exist"); //Check war file final File warFile = new File(toyConfig.war); Preconditions.checkState(warFile.isFile(), warFile.getAbsolutePath() + " does not exist"); yarn = YarnClient.createYarnClient(); yarn.init(configuration); yarn.start(); YarnClientApplication yarnApplication = yarn.createApplication(); GetNewApplicationResponse newApplication = yarnApplication.getNewApplicationResponse(); appId = newApplication.getApplicationId(); ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext(); appContext.setApplicationName("Tomcat : " + tomcatHomeDir.getName() + "\n War : " + warFile.getName()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // Register required libraries Map<String, LocalResource> localResources = new HashMap<>(); FileSystem fs = FileSystem.get(configuration); uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-client-2.3.0.jar"); uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-framework-2.3.0.jar"); uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-recipes-2.3.0.jar"); // Register application master jar registerLocalResource(localResources, appId, fs, new Path(appMasterJar)); // Register the WAR that will be deployed on Tomcat registerLocalResource(localResources, appId, fs, new Path(warFile.getAbsolutePath())); // Register Tomcat libraries for (File lib : tomcatLibraries.listFiles()) { registerLocalResource(localResources, appId, fs, new Path(lib.getAbsolutePath())); } File juli = new File(tomcatBinaries, "tomcat-juli.jar"); if (juli.exists()) { registerLocalResource(localResources, appId, fs, new Path(juli.getAbsolutePath())); } amContainer.setLocalResources(localResources); // Setup master environment Map<String, String> env = new HashMap<>(); final String TOMCAT_LIBS = fs.getHomeDirectory() + "/" + Constants.TOY_PREFIX + appId.toString(); env.put(Constants.TOMCAT_LIBS, TOMCAT_LIBS); if (toyConfig.zookeeper != null) { env.put(Constants.ZOOKEEPER_QUORUM, toyConfig.zookeeper); } else { env.put(Constants.ZOOKEEPER_QUORUM, NetUtils.getHostname()); } // 1. Compute classpath StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$()) .append(File.pathSeparatorChar).append("./*"); for (String c : configuration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (configuration.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); env.put(Constants.WAR, warFile.getName()); // For unit test with YarnMiniCluster env.put(YarnConfiguration.RM_SCHEDULER_ADDRESS, configuration.get(YarnConfiguration.RM_SCHEDULER_ADDRESS)); amContainer.setEnvironment(env); // 1.2 Set constraint for the app master Resource capability = Records.newRecord(Resource.class); capability.setMemory(32); appContext.setResource(capability); // 2. Compute app master cmd line Vector<CharSequence> vargs = new Vector<>(10); // Set java executable command vargs.add(ApplicationConstants.Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx32m"); // Set class name vargs.add(TOYMaster.class.getCanonicalName()); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<>(); commands.add(command.toString()); amContainer.setCommands(commands); appContext.setAMContainerSpec(amContainer); // 3. Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = configuration.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new Exception("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final org.apache.hadoop.security.token.Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (org.apache.hadoop.security.token.Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setQueue("default"); LOG.info("Submitting TOY application {} to ASM", appId.toString()); yarn.submitApplication(appContext); // Monitor the application and exit if it is RUNNING monitorApplication(appId); }
From source file:com.tripadvisor.hadoop.ExternalHDFSChecksumGenerator.java
License:Apache License
/** * * This is the function that calculates the hdfs-style checksum for a local file in the same way that * hdfs does it in a parallel fashion on all of the blocks in hdsf. * * @param strPath/* www . j a v a 2 s. c o m*/ * @param bytesPerCRC * @param lBlockSize * @return * @throws IOException */ public MD5MD5CRC32FileChecksum getLocalFilesystemHDFSStyleChecksum(String strPath, int bytesPerCRC, long lBlockSize) throws IOException { long lFileSize = 0; int iBlockCount = 0; DataOutputBuffer md5outDataBuffer = new DataOutputBuffer(); DataChecksum chksm = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512); InputStream in = null; MD5MD5CRC32FileChecksum returnChecksum = null; long crc_per_block = lBlockSize / bytesPerCRC; java.io.File file = new java.io.File(strPath); // FileStatus f_stats = srcFs.getFileStatus( srcPath ); lFileSize = file.length(); iBlockCount = (int) Math.ceil((double) lFileSize / (double) lBlockSize); // System.out.println( "Debug > getLen == " + f_stats.getLen() + // " bytes" ); // System.out.println( "Debug > iBlockCount == " + iBlockCount ); if (file.isDirectory()) { throw new IOException("Cannot compute local hdfs hash, " + strPath + " is a directory! "); } try { in = new FileInputStream(file); long lTotalBytesRead = 0; for (int x = 0; x < iBlockCount; x++) { ByteArrayOutputStream ar_CRC_Bytes = new ByteArrayOutputStream(); byte crc[] = new byte[4]; byte buf[] = new byte[512]; try { int bytesRead = 0; while ((bytesRead = in.read(buf)) > 0) { lTotalBytesRead += bytesRead; chksm.reset(); chksm.update(buf, 0, bytesRead); chksm.writeValue(crc, 0, true); ar_CRC_Bytes.write(crc); if (lTotalBytesRead >= (x + 1) * lBlockSize) { break; } } // while DataInputStream inputStream = new DataInputStream( new ByteArrayInputStream(ar_CRC_Bytes.toByteArray())); // this actually computes one ---- run on the server // (DataXceiver) side final MD5Hash md5_dataxceiver = MD5Hash.digest(inputStream); md5_dataxceiver.write(md5outDataBuffer); } catch (IOException e) { e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } } // for // this is in 0.19.0 style with the extra padding bug final MD5Hash md5_of_md5 = MD5Hash.digest(md5outDataBuffer.getData()); returnChecksum = new MD5MD5CRC32FileChecksum(bytesPerCRC, crc_per_block, md5_of_md5); } catch (IOException e) { e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } finally { in.close(); } // try return returnChecksum; }
From source file:com.vertica.hadoop.VerticaConfiguration.java
License:Apache License
/** * Sets a collection of lists. Each list is passed to an input split and used * as arguments to the input query.//from w w w . jav a2 s . c o m * * @param segment_params * @throws IOException */ public void setInputParams(Collection<List<Object>> segment_params) throws IOException { String[] values = new String[segment_params.size()]; int i = 0; for (List<Object> params : segment_params) { DataOutputBuffer out = new DataOutputBuffer(); out.writeInt(params.size()); for (Object obj : params) { int type = VerticaRecord.getType(obj); out.writeInt(type); VerticaRecord.write(obj, type, out); } values[i++] = StringUtils.byteToHexString(out.getData()); } conf.setStrings(QUERY_PARAMS_PROP, values); }
From source file:com.yahoo.storm.yarn.StormAMRMClient.java
License:Open Source License
public void launchSupervisorOnContainer(Container container) throws IOException { // create a container launch context ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class); UserGroupInformation user = UserGroupInformation.getCurrentUser(); try {/*from w w w . j a va 2s . c o m*/ Credentials credentials = user.getCredentials(); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); launchContext.setTokens(securityTokens); } catch (IOException e) { LOG.warn("Getting current user info failed when trying to launch the container" + e.getMessage()); } // CLC: env Map<String, String> env = new HashMap<String, String>(); env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); launchContext.setEnvironment(env); // CLC: local resources includes storm, conf Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); String storm_zip_path = (String) storm_conf.get("storm.zip.path"); Path zip = new Path(storm_zip_path); FileSystem fs = FileSystem.get(hadoopConf); String vis = (String) storm_conf.get("storm.zip.visibility"); if (vis.equals("PUBLIC")) localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PUBLIC)); else if (vis.equals("PRIVATE")) localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PRIVATE)); else if (vis.equals("APPLICATION")) localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.APPLICATION)); String appHome = Util.getApplicationHomeForId(appAttemptId.toString()); Path confDst = Util.createConfigurationFileInFs(fs, appHome, this.storm_conf, this.hadoopConf); localResources.put("conf", Util.newYarnAppResource(fs, confDst)); launchContext.setLocalResources(localResources); // CLC: command List<String> supervisorArgs = Util.buildSupervisorCommands(this.storm_conf); launchContext.setCommands(supervisorArgs); try { LOG.info("Use NMClient to launch supervisors in container. "); nmClient.startContainer(container, launchContext); String userShortName = user.getShortUserName(); if (userShortName != null) LOG.info("Supervisor log: http://" + container.getNodeHttpAddress() + "/node/containerlogs/" + container.getId().toString() + "/" + userShortName + "/supervisor.log"); } catch (Exception e) { LOG.error("Caught an exception while trying to start a container", e); System.exit(-1); } }
From source file:com.yahoo.storm.yarn.StormOnYarn.java
License:Open Source License
private void launchApp(String appName, String queue, int amMB, String storm_zip_location) throws Exception { LOG.debug("StormOnYarn:launchApp() ..."); YarnClientApplication client_app = _yarn.createApplication(); GetNewApplicationResponse app = client_app.getNewApplicationResponse(); _appId = app.getApplicationId();/*www. j av a 2 s . c o m*/ LOG.debug("_appId:" + _appId); if (amMB > app.getMaximumResourceCapability().getMemory()) { //TODO need some sanity checks amMB = app.getMaximumResourceCapability().getMemory(); } ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); appContext.setApplicationId(app.getApplicationId()); appContext.setApplicationName(appName); appContext.setQueue(queue); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the // local resources LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path String appMasterJar = findContainingJar(MasterServer.class); FileSystem fs = FileSystem.get(_hadoopConf); Path src = new Path(appMasterJar); String appHome = Util.getApplicationHomeForId(_appId.toString()); Path dst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + "AppMaster.jar"); fs.copyFromLocalFile(false, true, src, dst); localResources.put("AppMaster.jar", Util.newYarnAppResource(fs, dst)); String stormVersion = Util.getStormVersion(); Path zip; if (storm_zip_location != null) { zip = new Path(storm_zip_location); } else { zip = new Path("/lib/storm/" + stormVersion + "/storm.zip"); } _stormConf.put("storm.zip.path", zip.makeQualified(fs).toUri().getPath()); LocalResourceVisibility visibility = LocalResourceVisibility.PUBLIC; _stormConf.put("storm.zip.visibility", "PUBLIC"); if (!Util.isPublic(fs, zip)) { visibility = LocalResourceVisibility.APPLICATION; _stormConf.put("storm.zip.visibility", "APPLICATION"); } localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, visibility)); Path confDst = Util.createConfigurationFileInFs(fs, appHome, _stormConf, _hadoopConf); // establish a symbolic link to conf directory localResources.put("conf", Util.newYarnAppResource(fs, confDst)); // Setup security tokens Path[] paths = new Path[3]; paths[0] = dst; paths[1] = zip; paths[2] = confDst; Credentials credentials = new Credentials(); TokenCache.obtainTokensForNamenodes(credentials, paths, _hadoopConf); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); //security tokens for HDFS distributed cache amContainer.setTokens(securityTokens); // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the env variables to be setup in the env where the application master // will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // add the runtime classpath needed for tests to work Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./conf"); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./AppMaster.jar"); //Make sure that AppMaster has access to all YARN JARs List<String> yarn_classpath_cmd = java.util.Arrays.asList("yarn", "classpath"); ProcessBuilder pb = new ProcessBuilder(yarn_classpath_cmd).redirectError(Redirect.INHERIT); LOG.info("YARN CLASSPATH COMMAND = [" + yarn_classpath_cmd + "]"); pb.environment().putAll(System.getenv()); Process proc = pb.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream(), "UTF-8")); String line = ""; String yarn_class_path = (String) _stormConf.get("storm.yarn.yarn_classpath"); if (yarn_class_path == null) { StringBuilder yarn_class_path_builder = new StringBuilder(); while ((line = reader.readLine()) != null) { yarn_class_path_builder.append(line); } yarn_class_path = yarn_class_path_builder.toString(); } LOG.info("YARN CLASSPATH = [" + yarn_class_path + "]"); proc.waitFor(); reader.close(); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), yarn_class_path); String stormHomeInZip = Util.getStormHomeInZip(fs, zip, stormVersion); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/*"); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/lib/*"); String java_home = (String) _stormConf.get("storm.yarn.java_home"); if (java_home == null) java_home = System.getenv("JAVA_HOME"); if (java_home != null && !java_home.isEmpty()) env.put("JAVA_HOME", java_home); LOG.info("Using JAVA_HOME = [" + env.get("JAVA_HOME") + "]"); env.put("appJar", appMasterJar); env.put("appName", appName); env.put("appId", new Integer(_appId.getId()).toString()); env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<String> vargs = new Vector<String>(); if (java_home != null && !java_home.isEmpty()) vargs.add(env.get("JAVA_HOME") + "/bin/java"); else vargs.add("java"); vargs.add("-Dstorm.home=./storm/" + stormHomeInZip + "/"); vargs.add("-Dlogfile.name=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/master.log"); //vargs.add("-verbose:class"); vargs.add("com.yahoo.storm.yarn.MasterServer"); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"); // Set java executable command LOG.info("Setting up app master command:" + vargs); amContainer.setCommands(vargs); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMB); appContext.setResource(capability); appContext.setAMContainerSpec(amContainer); _yarn.submitApplication(appContext); }
From source file:Compress.TestLZO.java
License:Open Source License
public static void main(String[] argv) throws IOException { System.out.println(System.getProperty("java.library.path")); Configuration conf = new Configuration(); conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024); LzoCodec codec = new LzoCodec(); codec.setConf(conf);/* w ww. j a v a 2s .c o m*/ OutputStream out = new DataOutputBuffer(); CompressionOutputStream out2 = codec.createOutputStream(out); byte[] str2 = new byte[20]; int num = 10000; int before = 0; String part = "hello konten hello konten"; for (long i = 0; i < num; i++) { Util.long2bytes(str2, i); out2.write(str2, 0, 8); } out2.finish(); byte[] buffer = ((DataOutputBuffer) out).getData(); System.out.println("org len:" + num * 8 + ", compressed len:" + ((DataOutputBuffer) out).getLength()); InputStream in = new DataInputBuffer(); ((DataInputBuffer) in).reset(((DataOutputBuffer) out).getData(), 0, ((DataOutputBuffer) out).getLength()); CompressionInputStream in2 = codec.createInputStream(in); byte[] buf = new byte[100]; for (long i = 0; i < num; i++) { int count = 0; count = in2.read(buf, 0, 8); if (count > 0) { long value = Util.bytes2long(buf, 0, 8); if (value != i) { System.out.println(i + ",count:" + count + ",value:" + value); } else if (i > (num - 20)) { System.out.println(i + ",value:" + value); } } else { System.out.println("count:" + count + ", string " + i); break; } } in2.close(); System.out.println("test compress array..."); OutputStream out3 = new DataOutputBuffer(); CompressionOutputStream out4 = codec.createOutputStream(out3); DataOutputBuffer tout3 = new DataOutputBuffer(); for (long i = 0; i < num; i++) { Util.long2bytes(str2, i); out4.write(str2, 0, 8); } out4.finish(); buffer = ((DataOutputBuffer) out3).getData(); System.out.println("org len:" + num * 8 + ", compressed len:" + ((DataOutputBuffer) out3).getLength()); InputStream in3 = new DataInputBuffer(); ((DataInputBuffer) in3).reset(((DataOutputBuffer) out3).getData(), 0, ((DataOutputBuffer) out3).getLength()); CompressionInputStream in4 = codec.createInputStream(in3); for (long i = 0; i < num; i++) { int count = 0; count = in4.read(buf, 0, 8); if (count > 0) { long value = Util.bytes2long(buf, 0, 8); if (value != i) { System.out.println(i + ",count:" + count + ",value:" + value); } if (i > (num - 20)) { System.out.println(i + ",value:" + value); } } else { System.out.println("count:" + count + ", string " + i); break; } } in2.close(); }
From source file:cosmos.records.impl.MapRecord.java
License:Apache License
@Override public Value toValue() throws IOException { DataOutputBuffer buf = new DataOutputBuffer(); this.write(buf); buf.close();//from w w w. j av a 2 s. c o m byte[] bytes = new byte[buf.getLength()]; System.arraycopy(buf.getData(), 0, bytes, 0, buf.getLength()); return new Value(bytes); }
From source file:cosmos.results.MultimapQueryResultTest.java
License:Apache License
@Test public void identityWritableEquality() throws Exception { Multimap<Column, RecordValue<?>> data = HashMultimap.create(); data.put(Column.create("TEXT"), RecordValue.create("foo", VIZ)); data.put(Column.create("TEXT"), RecordValue.create("bar", VIZ)); MultimapRecord mqr = new MultimapRecord(data, "1", VIZ); DataOutputBuffer out = new DataOutputBuffer(); mqr.write(out);// w w w . jav a2 s .c o m DataInputBuffer in = new DataInputBuffer(); byte[] bytes = out.getData(); in.reset(bytes, out.getLength()); MultimapRecord mqr2 = MultimapRecord.recreate(in); Assert.assertEquals(mqr, mqr2); }