List of usage examples for org.apache.hadoop.fs FileSystem getUri
public abstract URI getUri();
From source file:org.apache.tajo.conf.TajoConf.java
License:Apache License
public static Path getQueryHistoryDir(TajoConf conf) throws IOException { String historyDirString = conf.getVar(ConfVars.HISTORY_QUERY_DIR); if (!hasScheme(historyDirString)) { Path stagingPath = getDefaultRootStagingDir(conf); FileSystem fs = stagingPath.getFileSystem(conf); Path path = new Path(fs.getUri().toString(), historyDirString); conf.setVar(ConfVars.HISTORY_QUERY_DIR, path.toString()); return path; }// w ww. j a v a2s .c o m return new Path(historyDirString); }
From source file:org.apache.tajo.storage.OldStorageManager.java
License:Apache License
/** * Returns the proper Tablespace instance according to the storeType. * * @param tajoConf Tajo system property. * @param storeType Storage type//from w ww . j a va2 s . c o m * @return * @throws IOException */ public static Tablespace getStorageManager(TajoConf tajoConf, String storeType) throws IOException { FileSystem fileSystem = TajoConf.getWarehouseDir(tajoConf).getFileSystem(tajoConf); if (fileSystem != null) { return getStorageManager(tajoConf, fileSystem.getUri(), storeType); } else { return getStorageManager(tajoConf, null, storeType); } }
From source file:org.apache.tajo.storage.StorageManager.java
License:Apache License
/** * Returns the proper StorageManager instance according to the storeType. * * @param tajoConf Tajo system property. * @param storeType Storage type/*from ww w . j a va 2s.c o m*/ * @return * @throws java.io.IOException */ public static StorageManager getStorageManager(TajoConf tajoConf, StoreType storeType) throws IOException { FileSystem fileSystem = TajoConf.getWarehouseDir(tajoConf).getFileSystem(tajoConf); if (fileSystem != null) { return getStorageManager(tajoConf, storeType, fileSystem.getUri().toString()); } else { return getStorageManager(tajoConf, storeType, null); } }
From source file:org.apache.tajo.yarn.command.LaunchCommand.java
License:Apache License
private void setupSecurityTokens(ContainerLaunchContext amContainer, FileSystem fs) throws IOException { if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); }/* ww w. j a v a 2 s. co m*/ // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } }
From source file:org.apache.tez.common.security.TokenCache.java
License:Apache License
/** * get delegation token for a specific FS * @param fs/*www.jav a 2 s . c om*/ * @param credentials * @param p * @param conf * @throws IOException */ static void obtainTokensForFileSystemsInternal(FileSystem fs, Credentials credentials, Configuration conf) throws IOException { // TODO Change this to use YARN utilities once YARN-1664 is fixed. String delegTokenRenewer = Master.getMasterPrincipal(conf); if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for use as renewer"); } final Token<?> tokens[] = fs.addDelegationTokens(delegTokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } }
From source file:org.apache.tez.engine.common.security.TokenCache.java
License:Apache License
/** * get delegation token for a specific FS * @param fs//from w w w . j a v a 2 s .c om * @param credentials * @param p * @param conf * @throws IOException */ static void obtainTokensForNamenodesInternal(FileSystem fs, Credentials credentials, Configuration conf) throws IOException { String delegTokenRenewer = Master.getMasterPrincipal(conf); if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for use as renewer"); } mergeBinaryTokens(credentials, conf); final Token<?> tokens[] = fs.addDelegationTokens(delegTokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } }
From source file:org.apache.tez.mapreduce.output.TestMROutput.java
License:Apache License
public static LogicalIOProcessorRuntimeTask createLogicalTask(Configuration conf, TezUmbilical umbilical, String dagName, String vertexName) throws Exception { ProcessorDescriptor procDesc = ProcessorDescriptor.create(TestProcessor.class.getName()); List<InputSpec> inputSpecs = Lists.newLinkedList(); List<OutputSpec> outputSpecs = Lists.newLinkedList(); outputSpecs.add(new OutputSpec("Null", MROutput.createConfigBuilder(conf, TestOutputFormat.class).build().getOutputDescriptor(), 1)); TaskSpec taskSpec = new TaskSpec(TezTestUtils.getMockTaskAttemptId(0, 0, 0, 0), dagName, vertexName, -1, procDesc, inputSpecs, outputSpecs, null); FileSystem fs = FileSystem.getLocal(conf); Path workDir = new Path(new Path(System.getProperty("test.build.data", "/tmp")), "TestMapOutput") .makeQualified(fs.getUri(), fs.getWorkingDirectory()); LogicalIOProcessorRuntimeTask task = new LogicalIOProcessorRuntimeTask(taskSpec, 0, conf, new String[] { workDir.toString() }, umbilical, null, new HashMap<String, String>(), HashMultimap.<String, String>create(), null, "", new ExecutionContextImpl("localhost"), Runtime.getRuntime().maxMemory(), true, new DefaultHadoopShim()); return task;/*from w w w. ja v a 2 s .c o m*/ }
From source file:org.apache.tez.test.TestFaultTolerance.java
License:Apache License
@BeforeClass public static void setup() throws Exception { LOG.info("Starting mini clusters"); FileSystem remoteFs = null; try {// w ww .ja v a 2s.c o m conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).racks(null).build(); remoteFs = dfsCluster.getFileSystem(); } catch (IOException io) { throw new RuntimeException("problem starting mini dfs cluster", io); } if (miniTezCluster == null) { miniTezCluster = new MiniTezCluster(TestFaultTolerance.class.getName(), 4, 1, 1); Configuration miniTezconf = new Configuration(conf); miniTezconf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS miniTezCluster.init(miniTezconf); miniTezCluster.start(); Path remoteStagingDir = remoteFs .makeQualified(new Path(TEST_ROOT_DIR, String.valueOf(new Random().nextInt(100000)))); TezClientUtils.ensureStagingDirExists(conf, remoteStagingDir); TezConfiguration tezConf = new TezConfiguration(miniTezCluster.getConfig()); tezConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, remoteStagingDir.toString()); tezConf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false); tezSession = TezClient.create("TestFaultTolerance", tezConf, true); tezSession.start(); } }
From source file:org.commoncrawl.mapred.ec2.parser.EC2ParserTask.java
License:Open Source License
public EC2ParserTask(Configuration conf) throws Exception { FileSystem fs = FileSystem.get(new URI("s3n://aws-publicdatasets"), conf); LOG.info(/* w w w . j av a 2 s . com*/ "FileSystem is:" + fs.getUri() + " Scanning for candidates at path:" + CRAWL_LOG_INTERMEDIATE_PATH); TreeSet<Path> candidateSet = buildCandidateList(fs, new Path(CRAWL_LOG_INTERMEDIATE_PATH)); LOG.info("Scanning for completed segments"); List<Path> processedLogs = scanForCompletedSegments(fs); LOG.info("Found " + processedLogs.size() + " processed logs"); // remove processed from candidate set ... candidateSet.removeAll(processedLogs); // ok we are ready to go .. LOG.info("There are: " + candidateSet.size() + " logs in need of parsing"); int iteration = 0; while (candidateSet.size() != 0) { ImmutableList.Builder<Path> pathBuilder = new ImmutableList.Builder<Path>(); Iterator<Path> iterator = Iterators.limit(candidateSet.iterator(), LOGS_PER_ITERATION); while (iterator.hasNext()) { pathBuilder.add(iterator.next()); iterator.remove(); } LOG.info("Queueing Parse"); queue(fs, conf, pathBuilder.build()); LOG.info("Queued Parse"); } // queue shutdown items for (int i = 0; i < MAX_SIMULTANEOUS_JOBS; ++i) { _queue.put(new QueueItem()); } }
From source file:org.commoncrawl.mapred.ec2.parser.OutputCommitter.java
License:Open Source License
@Override public void commitTask(TaskAttemptContext context) throws IOException { LOG.info("Commit Called on Task:" + context.getTaskAttemptID().toString()); Path taskOutputPath = getTempTaskOutputPath(context); TaskAttemptID attemptId = context.getTaskAttemptID(); JobConf job = context.getJobConf();/*from ww w . j ava2 s . co m*/ if (taskOutputPath != null) { FileSystem fs = taskOutputPath.getFileSystem(job); LOG.info("FileSystem for commit for Task:" + attemptId + " is:" + fs.getUri()); context.getProgressible().progress(); if (fs.exists(taskOutputPath)) { Path jobOutputPath = taskOutputPath.getParent().getParent(); // Move the task outputs to their final place moveTaskOutputs(context, fs, jobOutputPath, taskOutputPath); // Delete the temporary task-specific output directory if (!fs.delete(taskOutputPath, true)) { LOG.info("Failed to delete the temporary output" + " directory of task: " + attemptId + " - " + taskOutputPath); } LOG.info("Saved output of task '" + attemptId + "' to " + jobOutputPath); } } }