List of usage examples for org.apache.hadoop.fs FileSystem copyFromLocalFile
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException
From source file:org.etosha.cumulusonyarn.CumulusRDFRunner.java
License:Apache License
/** * Main run function for the client//from w ww . j av a2 s . c o m * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); FileSystem fs = FileSystem.get(conf); Path src = new Path(appJar); String pathSuffix = appName + File.separator + appId.getId() + File.separator + CumulusConstants.JBOSS_ON_YARN_APP; Path dst = new Path(fs.getHomeDirectory(), pathSuffix); jbossAppUri = dst.toUri().toString(); fs.copyFromLocalFile(false, true, src, dst); FileStatus destStatus = fs.getFileStatus(dst); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); amJarRsrc.setType(LocalResourceType.FILE); amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); amJarRsrc.setTimestamp(destStatus.getModificationTime()); amJarRsrc.setSize(destStatus.getLen()); localResources.put(CumulusConstants.JBOSS_ON_YARN_APP, amJarRsrc); if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } amContainer.setLocalResources(localResources); LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); Vector<CharSequence> vargs = new Vector<CharSequence>(30); LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); vargs.add("-Xmx" + amMemory + "m"); vargs.add(appMasterMainClass); vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); vargs.add("--admin_user " + adminUser); vargs.add("--admin_password " + adminPassword); vargs.add("--jar " + jbossAppUri); if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + CumulusConstants.JBOSS_CONTAINER_LOG_DIR + "/JBossApplicationMaster.stdout"); vargs.add("2>" + CumulusConstants.JBOSS_CONTAINER_LOG_DIR + "/JBossApplicationMaster.stderr"); StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); appContext.setAMContainerSpec(amContainer); Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); appContext.setQueue(amQueue); LOG.info("Submitting the application to ASM"); yarnClient.submitApplication(appContext); return monitorApplication(appId); }
From source file:org.godhuli.rhipe.FileUtils.java
License:Apache License
public void copyFromLocalFile(String[] src, String dst, boolean overwrite) throws IOException { Path dstPath = new Path(dst); FileSystem dstFs = dstPath.getFileSystem(cfg); Path[] srcp = new Path[src.length]; for (int i = 0; i < src.length; i++) srcp[i] = new Path(src[i]); dstFs.copyFromLocalFile(false, overwrite, srcp, dstPath); }
From source file:org.goldenorb.OrbRunner.java
License:Apache License
/** * Distribute files through HDFS/*www . j a va 2 s. c o m*/ * * @param orbConf * OrbConfiguration containing the file paths to distributed * @throws IOException */ public void distributeFiles(OrbConfiguration orbConf) throws IOException { try { FileSystem fs = FileSystem.get(orbConf); if (orbConf.getDistributedFilePaths() != null) { String[] filePaths = orbConf.getDistributedFilePaths().split(","); for (String localPath : filePaths) { if (!(localPath = localPath.trim()).equals("")) { Path hdfsPath = createHDFSPath(localPath); logger.info("Adding " + localPath + " to HDFS at " + hdfsPath.toString()); fs.copyFromLocalFile(false, true, new Path(localPath), hdfsPath); orbConf.addHDFSDistributedFile(hdfsPath.toString()); } } } } catch (IOException e) { logger.error("EXCEPTION: Error adding files to HDFS."); logger.error(e.getMessage()); throw e; } }
From source file:org.moya.core.yarn.Client.java
License:Apache License
/** * Main run function for the client//from www . ja v a 2 s.c om * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask // if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource // manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of // the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); Path src = new Path(appMasterJar); String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar"; Path dst = new Path(fs.getHomeDirectory(), pathSuffix); fs.copyFromLocalFile(false, true, src, dst); FileStatus destStatus = fs.getFileStatus(dst); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); // Set the type of resource - file or archive // archives are untarred at destination // we don't need the jar file to be untarred amJarRsrc.setType(LocalResourceType.FILE); // Set visibility of the resource // Setting to most private option amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); // Set the resource to be copied over amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); // Set timestamp and length of file so that the framework // can do basic sanity checks for the local resource // after it has been copied over to ensure it is the same // resource the client intended to use with the application amJarRsrc.setTimestamp(destStatus.getModificationTime()); amJarRsrc.setSize(destStatus.getLen()); localResources.put("AppMaster.jar", amJarRsrc); // Setup App Master Constants String amJarLocation = ""; long amJarLen = 0; long amJarTimestamp = 0; // adding info so we can add the jar to the App master container path amJarLocation = dst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(dst); amJarLen = shellFileStatus.getLen(); amJarTimestamp = shellFileStatus.getModificationTime(); // ADD libs needed that will be untared // Keep it all archived for now so add it as a file... src = new Path(localLibJar); pathSuffix = appName + "/" + appId.getId() + "/Runnable.jar"; dst = new Path(fs.getHomeDirectory(), pathSuffix); fs.copyFromLocalFile(false, true, src, dst); destStatus = fs.getFileStatus(dst); LocalResource libsJarRsrc = Records.newRecord(LocalResource.class); libsJarRsrc.setType(LocalResourceType.FILE); libsJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); libsJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); libsJarRsrc.setTimestamp(destStatus.getModificationTime()); localResources.put("Runnable.jar", libsJarRsrc); // Setup Libs Constants String libsLocation = ""; long libsLen = 0; long libsTimestamp = 0; // adding info so we can add the jar to the App master container path libsLocation = dst.toUri().toString(); FileStatus libsFileStatus = fs.getFileStatus(dst); libsLen = libsFileStatus.getLen(); libsTimestamp = libsFileStatus.getModificationTime(); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the env variables to be setup in the env where the application // master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put the AM jar into env and MOYA Runnable // using the env info, the application master will create the correct // local resource for the // eventual containers that will be launched to execute the shell // scripts env.put(MConstants.APPLICATIONMASTERJARLOCATION, amJarLocation); env.put(MConstants.APPLICATIONMASTERJARTIMESTAMP, Long.toString(amJarTimestamp)); env.put(MConstants.APPLICATIONMASTERJARLEN, Long.toString(amJarLen)); env.put(MConstants.LIBSLOCATION, libsLocation); env.put(MConstants.LIBSTIMESTAMP, Long.toString(libsTimestamp)); env.put(MConstants.LIBSLEN, Long.toString(libsLen)); env.put(MConstants.ZOOKEEPERHOSTS, ZKHosts); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(moyaPriority)); if (!localLibJar.isEmpty()) { vargs.add("--lib " + localLibJar + ""); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // The following are not required for launching an application master // amContainer.setContainerId(containerId); appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = // applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on // success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application return monitorApplication(appId); }
From source file:org.mrgeo.hdfs.utils.HadoopFileUtils.java
License:Apache License
public static void copyToHdfs(final Path fromDir, final Path toDir, final String fileName) throws IOException { final FileSystem fs = getFileSystem(toDir); fs.mkdirs(toDir);/*ww w . j ava2 s. c o m*/ fs.copyFromLocalFile(false, true, new Path(fromDir, fileName), new Path(toDir, fileName)); }
From source file:org.mrgeo.hdfs.utils.HadoopFileUtils.java
License:Apache License
@SuppressWarnings("squid:S2095") // hadoop FileSystem cannot be closed, or else subsequent uses will fail public static void copyToHdfs(final String fromDir, final String toDir) throws IOException { final Path toPath = new Path(toDir); final Path fromPath = new Path(fromDir); final FileSystem fs = HadoopFileUtils.getFileSystem(toPath); fs.mkdirs(toPath);//from w w w. j a v a 2s.co m fs.copyFromLocalFile(false, true, fromPath, toPath); }
From source file:org.mrgeo.resources.wms.WmsGeneratorTestAbstract.java
License:Apache License
protected static void copyInputData() throws IOException { final FileSystem fileSystem = HadoopFileUtils.getFileSystem(inputHdfs); Properties mrgeoProperties = MrGeoProperties.getInstance(); mrgeoProperties.put(MrGeoConstants.MRGEO_COMMON_HOME, inputHdfs.toString()); mrgeoProperties.put(MrGeoConstants.MRGEO_HDFS_IMAGE, inputHdfs.toString()); mrgeoProperties.put(MrGeoConstants.MRGEO_HDFS_COLORSCALE, inputHdfs.toString()); mrgeoProperties.put("base.path", inputHdfs.toString()); // WmsGenerator.setBasePath(inputHdfs); // WmsGenerator.setColorScaleBasePath(inputHdfs); // regular data with pyramids built fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2"), inputHdfs); // set up the system color scale fileSystem.copyFromLocalFile(false, true, new Path(input, "rainbow.xml"), new Path(inputHdfs, "Default.xml")); // copy a custom color scale fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2"), new Path(inputHdfs, "IslandsElevation-v2-color-scale")); fileSystem.copyFromLocalFile(false, true, new Path(input, "brewer-green-log.xml"), new Path(inputHdfs, "IslandsElevation-v2-color-scale/ColorScale.xml")); // same data as above with only the highest res image; metadata file accurately represents // directory contents fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2-no-pyramid"), inputHdfs); // no pyramid data with a metadata file showing the pyramid contains all zoom levels, but it // actually only contains the highest res image fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2-no-pyramid"), new Path(inputHdfs, "IslandsElevation-v2-no-pyramid-extra-metadata")); fileSystem.copyFromLocalFile(false, true, new Path(input, "metadata-no-pyramid-extra"), new Path(inputHdfs, "IslandsElevation-v2-no-pyramid-extra-metadata/metadata")); // no statistics have been calculated for this pyramid fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2"), new Path(inputHdfs, "IslandsElevation-v2-no-stats")); fileSystem.copyFromLocalFile(false, true, new Path(input, "metadata-no-stats"), new Path(inputHdfs, "IslandsElevation-v2-no-stats/metadata")); HadoopFileUtils.copyToHdfs(new Path(Defs.INPUT), inputHdfs, imageStretch); imageStretchUnqualified = HadoopFileUtils.unqualifyPath(new Path(inputHdfs, imageStretch)).toString(); HadoopFileUtils.copyToHdfs(new Path(Defs.INPUT), inputHdfs, imageStretch2); imageStretch2Unqualified = HadoopFileUtils.unqualifyPath(new Path(inputHdfs, imageStretch2)).toString(); HadoopFileUtils.copyToHdfs(new Path(Defs.INPUT), inputHdfs, small3band); small3bandUnqualified = HadoopFileUtils.unqualifyPath(new Path(inputHdfs, small3band)).toString(); }
From source file:org.mrgeo.services.wms.WmsGeneratorTestAbstract.java
License:Apache License
protected static void copyInputData() throws IOException { final FileSystem fileSystem = HadoopFileUtils.getFileSystem(inputHdfs); Properties mrgeoProperties = MrGeoProperties.getInstance(); mrgeoProperties.put("MRGEO_HOME", inputHdfs.toString()); mrgeoProperties.put(HadoopUtils.IMAGE_BASE, inputHdfs.toString()); mrgeoProperties.put(HadoopUtils.COLOR_SCALE_BASE, inputHdfs.toString()); mrgeoProperties.put("base.path", inputHdfs.toString()); // WmsGenerator.setBasePath(inputHdfs); // WmsGenerator.setColorScaleBasePath(inputHdfs); // regular data with pyramids built fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2"), inputHdfs); // set up the system color scale fileSystem.copyFromLocalFile(false, true, new Path(input, "rainbow.xml"), new Path(inputHdfs, "Default.xml")); // copy a custom color scale fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2"), new Path(inputHdfs, "IslandsElevation-v2-color-scale")); fileSystem.copyFromLocalFile(false, true, new Path(input, "brewer-green-log.xml"), new Path(inputHdfs, "IslandsElevation-v2-color-scale/ColorScale.xml")); // same data as above with only the highest res image; metadata file accurately represents // directory contents fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2-no-pyramid"), inputHdfs); // no pyramid data with a metadata file showing the pyramid contains all zoom levels, but it // actually only contains the highest res image fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2-no-pyramid"), new Path(inputHdfs, "IslandsElevation-v2-no-pyramid-extra-metadata")); fileSystem.copyFromLocalFile(false, true, new Path(input, "metadata-no-pyramid-extra"), new Path(inputHdfs, "IslandsElevation-v2-no-pyramid-extra-metadata/metadata")); // no statistics have been calculated for this pyramid fileSystem.copyFromLocalFile(false, true, new Path(input, "IslandsElevation-v2"), new Path(inputHdfs, "IslandsElevation-v2-no-stats")); fileSystem.copyFromLocalFile(false, true, new Path(input, "metadata-no-stats"), new Path(inputHdfs, "IslandsElevation-v2-no-stats/metadata")); HadoopFileUtils.copyToHdfs(new Path(Defs.INPUT), inputHdfs, imageStretch); imageStretchUnqualified = HadoopFileUtils.unqualifyPath(new Path(inputHdfs, imageStretch)).toString(); HadoopFileUtils.copyToHdfs(new Path(Defs.INPUT), inputHdfs, imageStretch2); imageStretch2Unqualified = HadoopFileUtils.unqualifyPath(new Path(inputHdfs, imageStretch2)).toString(); HadoopFileUtils.copyToHdfs(new Path(Defs.INPUT), inputHdfs, small3band); small3bandUnqualified = HadoopFileUtils.unqualifyPath(new Path(inputHdfs, small3band)).toString(); }
From source file:org.openflamingo.collector.handler.LocalToHdfsHandler.java
License:Apache License
@Override public void execute() throws Exception { // ? ?? ? ??. copyToWorkingDirectory();/*w ww . ja v a2 s . c om*/ // ?? ? ? ?? ??. List<FileStatus> files = getFilesFromWorkingDirectory(); if (files.size() < 1) { logger.info( " ?? ? ? ."); return; } // ? ?? HDFS . Iterator<FileStatus> iterator = files.iterator(); while (iterator.hasNext()) { // ? ?? ? ? ??. FileStatus workingFile = iterator.next(); FileSystem workingFS = getFileSystem(workingFile.getPath()); // ? ?? . String processingFileName = workingFile.getPath().getName() + PROCESSING_FILE_QUALIFIER; String workingDirectory = correctPath(jobContext.getValue(local.getWorkingDirectory().trim())); Path processingFile = new Path(workingDirectory, processingFileName); boolean renamed = workingFS.rename(workingFile.getPath(), processingFile); logger.debug( " ? ? '{}'? '{}' ?? .", workingFile.getPath(), processingFile); if (renamed) { // Target HDFS ??. ToHdfs hdfs = job.getPolicy().getEgress().getToHdfs(); // ?? HDFS? FileSystem ??. String cluster = jobContext.getValue(hdfs.getCluster()); Configuration configuration = getConfiguration(jobContext.getModel(), cluster); FileSystem targetFS = FileSystem.get(configuration); logger.info( "HDFS? Hadoop Cluster '{}'? Hadoop Cluster? ? ? .", cluster); // HDFS? target, staging . String targetDirectory = jobContext.getValue(hdfs.getTargetPath()); String stagingDirectory = jobContext.getValue(hdfs.getStagingPath()); logger.info( "HDFS? ? '{}'? ? '{}'.", targetDirectory, stagingDirectory); // ? ? ?? . int hash = Math.abs((workingFile.getPath().toString() + processingFile.toString()).hashCode()) + Integer.parseInt(JVMIDUtils.generateUUID()); if (hash < 0) hash = -hash; logger.debug( "? '{}'? ? '{}'? '{}'? ?.", new Object[] { stagingDirectory, processingFile.getName(), hash }); // ? ? . // FIXME Path stagingFile = new Path(stagingDirectory, DateUtils.parseDate(jobContext.getStartDate(), "yyyyMMddHHmmss") + "_" + String.valueOf(hash)); try { targetFS.copyFromLocalFile(false, false, processingFile, stagingFile); } catch (Exception ex) { logger.warn( " ? ? '{}'? ? ? '{}' ? ??.", processingFile, stagingFile); copyToErrorDirectory(workingFile); continue; } logger.info( " ? ? '{}'? ? ? '{}' .", processingFile, stagingFile); // ? ?? ? ??. Path targetFile = new Path(targetDirectory, workingFile.getPath().getName()); targetFS.rename(stagingFile, targetFile); logger.info("? ? '{}' ?? '{}' ??.", stagingFile, targetFile); // ?? ??. copyToCompleteDirectory(workingFS.getFileStatus(processingFile)); } } }
From source file:org.openflamingo.uploader.handler.LocalToHdfsHandler.java
License:Open Source License
@Override public void execute() throws Exception { // ? ?? ? ??. copyToWorkingDirectory();//from www .j a v a 2 s. c o m // ?? ? ? ?? ??. List<FileStatus> files = getFilesFromWorkingDirectory(); if (files.size() < 1) { jobLogger.info( " ?? ? ? ."); return; } // ? ?? HDFS . Iterator<FileStatus> iterator = files.iterator(); while (iterator.hasNext()) { // ? ?? ? ? ??. FileStatus workingFile = iterator.next(); FileSystem workingFS = getFileSystem(workingFile.getPath()); // ? ?? . String processingFileName = workingFile.getPath().getName() + PROCESSING_FILE_QUALIFIER; String workingDirectory = correctPath(jobContext.getValue(local.getWorkingDirectory())); Path processingFile = new Path(workingDirectory, processingFileName); boolean renamed = workingFS.rename(workingFile.getPath(), processingFile); jobLogger.debug( " ? ? '{}'? '{}' ?? .", workingFile.getPath(), processingFile); if (renamed) { // Outgress? HDFS ??. Hdfs hdfs = job.getPolicy().getOutgress().getHdfs(); // ?? HDFS? FileSystem ??. String cluster = jobContext.getValue(hdfs.getCluster()); Configuration configuration = getConfiguration(jobContext.getModel(), cluster); FileSystem targetFS = FileSystem.get(configuration); jobLogger.info( "HDFS? Hadoop Cluster '{}'? Hadoop Cluster? ? ? .", cluster); // HDFS? target, staging . String targetDirectory = jobContext.getValue(hdfs.getTargetPath()); String stagingDirectory = jobContext.getValue(hdfs.getStagingPath()); jobLogger.info( "HDFS? ? '{}'? ? '{}'.", targetDirectory, stagingDirectory); // ? ? ?? . int hash = Math.abs((workingFile.getPath().toString() + processingFile.toString()).hashCode()) + Integer.parseInt(JVMIDUtils.generateUUID()); if (hash < 0) hash = -hash; jobLogger.debug( "? '{}'? ? '{}'? '{}'? ?.", new Object[] { stagingDirectory, processingFile.getName(), hash }); // ? ? . // FIXME Path stagingFile = new Path(stagingDirectory, DateUtils.parseDate(jobContext.getStartDate(), "yyyyMMddHHmmss") + "_" + String.valueOf(hash)); try { targetFS.copyFromLocalFile(false, false, processingFile, stagingFile); } catch (Exception ex) { jobLogger.warn( " ? ? '{}'? ? ? '{}' ? ??.", processingFile, stagingFile); copyToErrorDirectory(workingFile); continue; } jobLogger.info( " ? ? '{}'? ? ? '{}' .", processingFile, stagingFile); // ? ?? ? ??. Path targetFile = new Path(targetDirectory, workingFile.getPath().getName()); targetFS.rename(stagingFile, targetFile); jobLogger.info("? ? '{}' ?? '{}' ??.", stagingFile, targetFile); // ?? ??. copyToCompleteDirectory(workingFS.getFileStatus(processingFile)); } } }