List of usage examples for org.apache.hadoop.fs Path makeQualified
@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" }) public Path makeQualified(URI defaultUri, Path workingDir)
From source file:RawParascaleFileSystem.java
License:Apache License
/** * Get the virtual filesystem of a path <code>aPath</code>. The VFS is * determined by the qualified uri of the path. If the path is not qualified * it will made qualified.//from w w w. j a va2 s .c om * * @param aPath Path to virtual filesystem * @param isQualified Must be set if the path is not qualified * * @return virtual filesystem */ String getVirtualFSFromPath(final Path aPath, final boolean isQualified) { final Path makeQualified = isQualified ? aPath : aPath.makeQualified(this.getUri(), this.getWorkingDirectory()); return parseAuthority(makeAbsolute(makeQualified).toUri())[0]; }
From source file:RawParascaleFileSystem.java
License:Apache License
/** * {@inheritDoc}//from ww w. j a v a2 s . co m */ @Override public File pathToFile(final Path path) { // this has public modifier due to subclassing checkPath(path); final String mountPoint = getMountPoint(); final File file; if (path.isAbsolute()) { final URI pathUri = path.makeQualified(this.getUri(), this.getWorkingDirectory()).toUri(); final String[] parseAuthority = parseAuthority(pathUri); file = new File(mountPoint, String.format("/%s/%s/%s", parseAuthority[1], parseAuthority[0], pathUri.getPath())); } else { file = new File(mountPoint, String.format("/%s/%s/%s/%s", controlNode, virtualFs, getRawHomeDirectory(), path.toUri().getPath())); } return file; }
From source file:backup.namenode.NameNodeBackupBlockCheckProcessor.java
License:Apache License
private Path getLocalSort(String name) throws IOException { Path sortDir = conf.getLocalPath(DFS_BACKUP_NAMENODE_LOCAL_DIR_KEY, name); LocalFileSystem local = FileSystem.getLocal(conf); sortDir = sortDir.makeQualified(local.getUri(), local.getWorkingDirectory()); local.delete(sortDir, true);/* w w w.j a v a 2 s . c o m*/ return sortDir; }
From source file:com.cloudera.impala.common.FileSystemUtil.java
License:Apache License
/** * Fully-qualifies the given path based on the FileSystem configuration. If the given * path is already fully qualified, a new Path object with the same location will be * returned./*from w w w. j av a 2 s . c o m*/ */ public static Path createFullyQualifiedPath(Path location) { return location.makeQualified(FileSystem.getDefaultUri(CONF), location); }
From source file:com.facebook.presto.hive.PrestoS3FileSystem.java
License:Apache License
private Path qualifiedPath(Path path) { return path.makeQualified(this.uri, getWorkingDirectory()); }
From source file:com.ibm.crail.hdfs.CrailHadoopFileSystem.java
License:Apache License
@Override public FileStatus getFileStatus(Path path) throws IOException { CrailNode directFile = null;//from w ww . j a v a 2 s. c o m try { directFile = dfs.lookup(path.toUri().getRawPath()).get(); } catch (Exception e) { throw new IOException(e); } if (directFile == null) { throw new FileNotFoundException("File does not exist: " + path); } FsPermission permission = FsPermission.getFileDefault(); if (directFile.getType().isDirectory()) { permission = FsPermission.getDirDefault(); } FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir)); return status; }
From source file:com.ibm.crail.hdfs.CrailHDFS.java
License:Apache License
@Override public FileStatus getFileStatus(Path path) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { CrailNode directFile = null;/*from w w w . ja va 2 s. c o m*/ try { directFile = dfs.lookup(path.toUri().getRawPath()).get(); } catch (Exception e) { throw new IOException(e); } if (directFile == null) { throw new FileNotFoundException("filename " + path); } FsPermission permission = FsPermission.getFileDefault(); if (directFile.getType().isDirectory()) { permission = FsPermission.getDirDefault(); } FileStatus status = new FileStatus(directFile.getCapacity(), directFile.getType().isContainer(), CrailConstants.SHADOW_REPLICATION, CrailConstants.BLOCK_SIZE, directFile.getModificationTime(), directFile.getModificationTime(), permission, CrailConstants.USER, CrailConstants.USER, path.makeQualified(this.getUri(), this.workingDir)); return status; }
From source file:com.kylinolap.dict.DictionaryManager.java
License:Apache License
private String unpackDataSet(String tempHDFSDir, String dataSetName) throws IOException { InputStream in = this.getClass().getResourceAsStream("/com/kylinolap/dict/" + dataSetName + ".txt"); if (in == null) // data set resource not found return null; ByteArrayOutputStream buf = new ByteArrayOutputStream(); IOUtils.copy(in, buf);/* ww w . j a v a2 s .co m*/ in.close(); byte[] bytes = buf.toByteArray(); Path tmpDataSetPath = new Path( tempHDFSDir + "/dict/temp_dataset/" + dataSetName + "_" + bytes.length + ".txt"); FileSystem fs = HadoopUtil.getFileSystem(tempHDFSDir); boolean writtenNewFile = false; if (fs.exists(tmpDataSetPath) == false || fs.getFileStatus(tmpDataSetPath).getLen() != bytes.length) { fs.mkdirs(tmpDataSetPath.getParent()); FSDataOutputStream out = fs.create(tmpDataSetPath); IOUtils.copy(new ByteArrayInputStream(bytes), out); out.close(); writtenNewFile = true; } String qualifiedPath = tmpDataSetPath.makeQualified(fs.getUri(), new Path("/")).toString(); if (writtenNewFile) logger.info("Dictionary temp data set file written to " + qualifiedPath); return qualifiedPath; }
From source file:com.kylinolap.job.tools.DeployCoprocessorCLI.java
License:Apache License
public static Path uploadCoprocessorJar(String localCoprocessorJar, FileSystem fileSystem, Set<String> oldJarPaths) throws IOException { Path uploadPath = null; File localCoprocessorFile = new File(localCoprocessorJar); // check existing jars if (oldJarPaths == null) { oldJarPaths = new HashSet<String>(); }/* w w w .ja v a 2 s . c o m*/ Path coprocessorDir = getCoprocessorHDFSDir(fileSystem, KylinConfig.getInstanceFromEnv()); for (FileStatus fileStatus : fileSystem.listStatus(coprocessorDir)) { if (fileStatus.getLen() == localCoprocessorJar.length() && fileStatus.getModificationTime() == localCoprocessorFile.lastModified()) { uploadPath = fileStatus.getPath(); break; } String filename = fileStatus.getPath().toString(); if (filename.endsWith(".jar")) { oldJarPaths.add(filename); } } // upload if not existing if (uploadPath == null) { // figure out a unique new jar file name Set<String> oldJarNames = new HashSet<String>(); for (String path : oldJarPaths) { oldJarNames.add(new Path(path).getName()); } String baseName = getBaseFileName(localCoprocessorJar); String newName = null; int i = 0; while (newName == null) { newName = baseName + "-" + (i++) + ".jar"; if (oldJarNames.contains(newName)) newName = null; } // upload uploadPath = new Path(coprocessorDir, newName); FileInputStream in = null; FSDataOutputStream out = null; try { in = new FileInputStream(localCoprocessorFile); out = fileSystem.create(uploadPath); IOUtils.copy(in, out); } finally { IOUtils.closeQuietly(in); IOUtils.closeQuietly(out); } fileSystem.setTimes(uploadPath, localCoprocessorFile.lastModified(), System.currentTimeMillis()); } uploadPath = uploadPath.makeQualified(fileSystem.getUri(), null); return uploadPath; }
From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java
License:Apache License
/** * Set up the distributed cache by localizing the resources, and updating * the configuration with references to the localized resources. * @param conf job configuration//from w w w . jav a 2s . c o m * @throws IOException */ public void setup(Configuration conf) throws IOException { //If we are not 0th worker, wait for 0th worker to set up the cache if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS, WAIT_GRANULARITY_MS); } catch (Exception e) { throw new RuntimeException(e); } return; } File workDir = new File(System.getProperty("user.dir")); // Generate YARN local resources objects corresponding to the distributed // cache configuration Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>(); MRApps.setupDistributedCache(conf, localResources); //CODE CHANGE FROM ORIGINAL FILE: //We need to clear the resources from jar files, since they are distributed through the IG. // Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator(); while (iterator.hasNext()) { Entry<String, LocalResource> entry = iterator.next(); if (entry.getKey().endsWith(".jar")) { iterator.remove(); } } // Generating unique numbers for FSDownload. AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis()); // Find which resources are to be put on the local classpath Map<String, Path> classpaths = new HashMap<String, Path>(); Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClassPaths != null) { for (Path p : archiveClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf); if (fileClassPaths != null) { for (Path p : fileClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } // Localize the resources LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR); FileContext localFSFileContext = FileContext.getLocalFSFileContext(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ExecutorService exec = null; try { ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("LocalDistributedCacheManager Downloader #%d").build(); exec = Executors.newCachedThreadPool(tf); Path destPath = localDirAllocator.getLocalPathForWrite(".", conf); Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap(); for (LocalResource resource : localResources.values()) { Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf, new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource); Future<Path> future = exec.submit(download); resourcesToPaths.put(resource, future); } for (Entry<String, LocalResource> entry : localResources.entrySet()) { LocalResource resource = entry.getValue(); Path path; try { path = resourcesToPaths.get(resource).get(); } catch (InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { throw new IOException(e); } String pathString = path.toUri().toString(); String link = entry.getKey(); String target = new File(path.toUri()).getPath(); symlink(workDir, target, link); if (resource.getType() == LocalResourceType.ARCHIVE) { localArchives.add(pathString); } else if (resource.getType() == LocalResourceType.FILE) { localFiles.add(pathString); } else if (resource.getType() == LocalResourceType.PATTERN) { //PATTERN is not currently used in local mode throw new IllegalArgumentException( "Resource type PATTERN is not " + "implemented yet. " + resource.getResource()); } Path resourcePath; try { resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource()); } catch (URISyntaxException e) { throw new IOException(e); } LOG.info(String.format("Localized %s as %s", resourcePath, path)); String cp = resourcePath.toUri().getPath(); if (classpaths.keySet().contains(cp)) { localClasspaths.add(path.toUri().getPath().toString()); } } } finally { if (exec != null) { exec.shutdown(); } } // Update the configuration object with localized data. if (!localArchives.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()]))); } if (!localFiles.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()]))); } setupCalled = true; //If we are 0th worker, signal action complete if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME); } catch (Exception e) { throw new RuntimeException(e); } } }