List of usage examples for org.apache.hadoop.fs Path SEPARATOR
String SEPARATOR
To view the source code for org.apache.hadoop.fs Path SEPARATOR.
Click Source Link
From source file:io.hops.erasure_coding.Codec.java
License:Apache License
/** * Make sure the direcotry string has the format "/a/b/c" *//*w ww. ja va 2s . c o m*/ private void checkDirectory(String d) { if (!d.startsWith(Path.SEPARATOR)) { throw new IllegalArgumentException("Bad directory:" + d); } if (d.endsWith(Path.SEPARATOR)) { throw new IllegalArgumentException("Bad directory:" + d); } }
From source file:io.hops.erasure_coding.Codec.java
License:Apache License
public String getParityPrefix() { String prefix = this.parityDirectory; if (!prefix.endsWith(Path.SEPARATOR)) { prefix += Path.SEPARATOR; }/*from ww w . j a va 2 s.c om*/ return prefix; }
From source file:io.hops.erasure_coding.PolicyInfo.java
License:Apache License
private String normalizePath(String path) { if (!path.endsWith(Path.SEPARATOR)) { path += Path.SEPARATOR; } return path; }
From source file:io.hops.hopsworks.common.security.CertificateMaterializer.java
License:Open Source License
private void materializeRemoteInternal(MaterialKey key, String ownerName, String groupName, FsPermission permissions, String remoteDirectory) throws IOException { RemoteMaterialReferences materialRef = null; RemoteMaterialRefID identifier = new RemoteMaterialRefID(key.getExtendedUsername(), remoteDirectory); int retries = 0; while (materialRef == null && retries < MAX_NUMBER_OF_RETRIES) { try {//from ww w . j a v a2 s .c om materialRef = remoteMaterialReferencesFacade.acquireLock(identifier, lock_id); // Managed to take the lock, proceed if (materialRef == null) { remoteMaterialReferencesFacade.createNewMaterialReference(identifier); materialRef = remoteMaterialReferencesFacade.acquireLock(identifier, lock_id); // First time request for this material in this directory // 1. Check if in cache otherwise fetch from DB CryptoMaterial material = materialCache.get(key); if (material == null) { material = getMaterialFromDatabase(key); } // 2. Upload to HDFS DistributedFileSystemOps dfso = distributedFsService.getDfsOps(); try { Path keyStore = new Path( remoteDirectory + Path.SEPARATOR + key.getExtendedUsername() + KEYSTORE_SUFFIX); writeToHDFS(dfso, keyStore, material.getKeyStore().array()); dfso.setOwner(keyStore, ownerName, groupName); dfso.setPermission(keyStore, permissions); Path trustStore = new Path( remoteDirectory + Path.SEPARATOR + key.getExtendedUsername() + TRUSTSTORE_SUFFIX); writeToHDFS(dfso, trustStore, material.getTrustStore().array()); dfso.setOwner(trustStore, ownerName, groupName); dfso.setPermission(trustStore, permissions); Path passwordFile = new Path( remoteDirectory + Path.SEPARATOR + key.getExtendedUsername() + CERT_PASS_SUFFIX); writeToHDFS(dfso, passwordFile, new String(material.getPassword()).getBytes()); dfso.setOwner(passwordFile, ownerName, groupName); dfso.setPermission(passwordFile, permissions); // Cache should be flushed otherwise NN will raise permission exceptions dfso.flushCache(ownerName, groupName); } finally { if (dfso != null) { distributedFsService.closeDfsClient(dfso); } } // 3. Set the correct initial references and persist materialRef.setReferences(1); remoteMaterialReferencesFacade.update(materialRef); } else { materialRef.incrementReferences(); remoteMaterialReferencesFacade.update(materialRef); } } catch (Exception ex) { if (ex instanceof AcquireLockException) { LOG.log(Level.WARNING, ex.getMessage(), ex); retries++; try { TimeUnit.MILLISECONDS.sleep(RETRY_WAIT_TIMEOUT); } catch (InterruptedException iex) { throw new IOException(iex); } } else { throw new IOException(ex); } } finally { try { remoteMaterialReferencesFacade.releaseLock(identifier, lock_id); } catch (AcquireLockException ex) { LOG.log(Level.SEVERE, "Cannot release lock for " + identifier, ex); } } } if (materialRef == null) { throw new IOException("Could not materialize certificates for " + key.getExtendedUsername() + " in remote directory " + remoteDirectory); } }
From source file:io.hops.resolvingcache.OptimalMemcache.java
License:Apache License
@Override protected void setInternal(MemcachedClient mc, String path, List<INode> inodes) { if (INode.getPathNames(path).length != inodes.size()) return;/* w w w.j a va2 s.c o m*/ int lastIndex = path.lastIndexOf(Path.SEPARATOR); if (lastIndex <= 0) return; INode file = inodes.get(inodes.size() - 1); if (file.isDirectory()) { super.setInternal(mc, path, inodes); return; } String parentPath = path.substring(0, lastIndex); super.setInternal(mc, parentPath, inodes.subList(0, inodes.size() - 1)); setInternal(mc, file); }
From source file:io.hops.resolvingcache.OptimalMemcache.java
License:Apache License
@Override protected int[] getInternal(MemcachedClient mc, String path) throws IOException { int lastIndex = path.lastIndexOf(Path.SEPARATOR); if (lastIndex <= 0) return null; String parentPath = path.substring(0, lastIndex); int[] inodeIds = super.getInternal(mc, parentPath); if (inodeIds == null) return null; String file = path.substring(lastIndex + 1, path.length()); int fileParentId = inodeIds[inodeIds.length - 1]; Integer fileInodeId = INodeMemcache.getInternal(mc, keyPrefix, file, fileParentId); if (fileInodeId != null) { inodeIds = Arrays.copyOf(inodeIds, inodeIds.length + 1); inodeIds[inodeIds.length - 1] = fileInodeId; }/* ww w . ja v a 2 s .c om*/ return inodeIds; }
From source file:io.hops.resolvingcache.OptimalMemcache.java
License:Apache License
@Override protected void deleteInternal(MemcachedClient mc, String path) { int lastIndex = path.lastIndexOf(Path.SEPARATOR); if (lastIndex == -1) return;/*from w w w . j a v a2 s . c o m*/ String parentPath = path.substring(0, lastIndex); super.deleteInternal(mc, parentPath); }
From source file:io.pravega.segmentstore.storage.impl.hdfs.FileSystemOperation.java
License:Open Source License
/** * Gets an HDFS-friendly path prefix for the given Segment name by pre-pending the HDFS root from the config. *//* w ww. j av a 2 s . co m*/ private String getPathPrefix(String segmentName) { return this.context.config.getHdfsRoot() + Path.SEPARATOR + segmentName; }
From source file:it.crs4.pydoop.mapreduce.pipes.Application.java
License:Apache License
/** * Start the child process to handle the task for us. * @throws IOException/* ww w.j a v a 2 s . c om*/ * @throws InterruptedException */ Application(TaskInputOutputContext<K1, V1, K2, V2> context, DummyRecordReader input) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); serverSocket = new ServerSocket(0); Map<String, String> env = new HashMap<String, String>(); // add TMPDIR environment variable with the value of java.io.tmpdir env.put("TMPDIR", System.getProperty("java.io.tmpdir")); env.put(Submitter.PORT, Integer.toString(serverSocket.getLocalPort())); //Add token to the environment if security is enabled Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(context.getCredentials()); // This password is used as shared secret key between this application and // child pipes process byte[] password = jobToken.getPassword(); String localPasswordFile = new File(".") + Path.SEPARATOR + "jobTokenPassword"; writePasswordToLocalFile(localPasswordFile, password, conf); // FIXME why is this not Submitter.SECRET_LOCATION ? env.put("hadoop.pipes.shared.secret.location", localPasswordFile); List<String> cmd = new ArrayList<String>(); String interpretor = conf.get(Submitter.INTERPRETOR); if (interpretor != null) { cmd.add(interpretor); } String executable = context.getLocalCacheFiles()[0].toString(); if (!(new File(executable).canExecute())) { // LinuxTaskController sets +x permissions on all distcache files already. // In case of DefaultTaskController, set permissions here. FileUtil.chmod(executable, "u+x"); } cmd.add(executable); // wrap the command in a stdout/stderr capture // we are starting map/reduce task of the pipes job. this is not a cleanup // attempt. TaskAttemptID taskid = context.getTaskAttemptID(); File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT); File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR); long logLength = TaskLog.getTaskLogLength(conf); cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength, false); process = runClient(cmd, env); clientSocket = serverSocket.accept(); String challenge = getSecurityChallenge(); String digestToSend = createDigest(password, challenge); String digestExpected = createDigest(password, digestToSend); handler = new OutputHandler<K2, V2>(context, input, digestExpected); K2 outputKey = (K2) ReflectionUtils.newInstance(context.getOutputKeyClass(), conf); V2 outputValue = (V2) ReflectionUtils.newInstance(context.getOutputValueClass(), conf); downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, outputKey, outputValue, conf); downlink.authenticate(digestToSend, challenge); waitForAuthentication(); LOG.debug("Authentication succeeded"); downlink.start(); downlink.setJobConf(conf); }
From source file:it.crs4.pydoop.pipes.LocalJobRunner.java
License:Apache License
public static String getLocalTaskDir(String user, String jobid, String taskid, boolean isCleanupAttempt) { String taskDir = jobDir + Path.SEPARATOR + user + Path.SEPARATOR + JOBCACHE + Path.SEPARATOR + jobid + Path.SEPARATOR + taskid; return taskDir; }