List of usage examples for org.apache.hadoop.fs FileSystem getUri
public abstract URI getUri();
From source file:gobblin.util.HadoopUtils.java
License:Apache License
/** * This method is an additive implementation of the {@link FileSystem#rename(Path, Path)} method. It moves all the * files/directories under 'from' path to the 'to' path without overwriting existing directories in the 'to' path. * * <p>/*from w w w . j av a2 s . co m*/ * The rename operation happens at the first non-existent sub-directory. If a directory at destination path already * exists, it recursively tries to move sub-directories. If all the sub-directories also exist at the destination, * a file level move is done * </p> * * @param fileSystem on which the data needs to be moved * @param from path of the data to be moved * @param to path of the data to be moved */ public static void renameRecursively(FileSystem fileSystem, Path from, Path to) throws IOException { log.info(String.format("Recursively renaming %s in %s to %s.", from, fileSystem.getUri(), to)); FileSystem throttledFS = getOptionallyThrottledFileSystem(fileSystem, 10000); ExecutorService executorService = ScalingThreadPoolExecutor.newScalingThreadPool(1, 100, 100, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("rename-thread-%d"))); Queue<Future<?>> futures = Queues.newConcurrentLinkedQueue(); try { if (!fileSystem.exists(from)) { throw new IOException("Trying to rename a path that does not exist! " + from); } futures.add(executorService.submit(new RenameRecursively(throttledFS, fileSystem.getFileStatus(from), to, executorService, futures))); int futuresUsed = 0; while (!futures.isEmpty()) { try { futures.poll().get(); futuresUsed++; } catch (ExecutionException | InterruptedException ee) { throw new IOException(ee.getCause()); } } log.info(String.format("Recursive renaming of %s to %s. (details: used %d futures)", from, to, futuresUsed)); } finally { ExecutorsUtils.shutdownExecutorService(executorService, Optional.of(log), 1, TimeUnit.SECONDS); } }
From source file:gobblin.util.HadoopUtilsTest.java
License:Apache License
@Test(groups = { "performance" }) public void testRenamePerformance() throws Exception { FileSystem fs = Mockito.mock(FileSystem.class); Path sourcePath = new Path("/source"); Path s1 = new Path(sourcePath, "d1"); FileStatus[] sourceStatuses = new FileStatus[10000]; FileStatus[] targetStatuses = new FileStatus[1000]; for (int i = 0; i < sourceStatuses.length; i++) { sourceStatuses[i] = getFileStatus(new Path(s1, "path" + i), false); }// w w w . jav a2 s .com for (int i = 0; i < targetStatuses.length; i++) { targetStatuses[i] = getFileStatus(new Path(s1, "path" + i), false); } Mockito.when(fs.getUri()).thenReturn(new URI("file:///")); Mockito.when(fs.getFileStatus(sourcePath)).thenAnswer(getDelayedAnswer(getFileStatus(sourcePath, true))); Mockito.when(fs.exists(sourcePath)).thenAnswer(getDelayedAnswer(true)); Mockito.when(fs.listStatus(sourcePath)) .thenAnswer(getDelayedAnswer(new FileStatus[] { getFileStatus(s1, true) })); Mockito.when(fs.exists(s1)).thenAnswer(getDelayedAnswer(true)); Mockito.when(fs.listStatus(s1)).thenAnswer(getDelayedAnswer(sourceStatuses)); Path target = new Path("/target"); Path s1Target = new Path(target, "d1"); Mockito.when(fs.exists(target)).thenAnswer(getDelayedAnswer(true)); Mockito.when(fs.exists(s1Target)).thenAnswer(getDelayedAnswer(true)); Mockito.when(fs.mkdirs(Mockito.any(Path.class))).thenAnswer(getDelayedAnswer(true)); Mockito.when(fs.rename(Mockito.any(Path.class), Mockito.any(Path.class))) .thenAnswer(getDelayedAnswer(true)); HadoopUtils.renameRecursively(fs, sourcePath, target); }
From source file:gobblin.util.JobLauncherUtils.java
License:Apache License
private static ParallelRunner getParallelRunner(FileSystem fs, Closer closer, int parallelRunnerThreads, Map<String, ParallelRunner> parallelRunners) { String uriAndHomeDir = new Path(new Path(fs.getUri()), fs.getHomeDirectory()).toString(); if (!parallelRunners.containsKey(uriAndHomeDir)) { parallelRunners.put(uriAndHomeDir, closer.register(new ParallelRunner(parallelRunnerThreads, fs))); }//from w ww . ja v a 2 s .c om return parallelRunners.get(uriAndHomeDir); }
From source file:gobblin.util.ParallelRunnerTest.java
License:Apache License
@Test public void testMovePath() throws IOException, URISyntaxException { String expected = "test"; ByteArrayOutputStream actual = new ByteArrayOutputStream(); Path src = new Path("/src/file.txt"); Path dst = new Path("/dst/file.txt"); FileSystem fs1 = Mockito.mock(FileSystem.class); Mockito.when(fs1.exists(src)).thenReturn(true); Mockito.when(fs1.isFile(src)).thenReturn(true); Mockito.when(fs1.getUri()).thenReturn(new URI("fs1:////")); Mockito.when(fs1.getFileStatus(src)).thenReturn(new FileStatus(1, false, 1, 1, 1, src)); Mockito.when(fs1.open(src)).thenReturn( new FSDataInputStream(new SeekableFSInputStream(new ByteArrayInputStream(expected.getBytes())))); Mockito.when(fs1.delete(src, true)).thenReturn(true); FileSystem fs2 = Mockito.mock(FileSystem.class); Mockito.when(fs2.exists(dst)).thenReturn(false); Mockito.when(fs2.getUri()).thenReturn(new URI("fs2:////")); Mockito.when(fs2.getConf()).thenReturn(new Configuration()); Mockito.when(fs2.create(dst, false)).thenReturn(new FSDataOutputStream(actual, null)); try (ParallelRunner parallelRunner = new ParallelRunner(1, fs1)) { parallelRunner.movePath(src, fs2, dst, Optional.<String>absent()); }//from w w w . j a v a2 s. c o m Assert.assertEquals(actual.toString(), expected); }
From source file:gobblin.util.ProxiedFileSystemCache.java
License:Apache License
private static URI resolveUri(URI uri, Configuration configuration, FileSystem fileSystem) throws IOException { if (uri != null) { return uri; }/* w w w . j a va 2 s.co m*/ if (fileSystem != null) { return fileSystem.getUri(); } if (configuration != null) { return FileSystem.getDefaultUri(configuration); } throw new IOException("FileSystem URI could not be determined from available inputs."); }
From source file:hadoop.yarn.distributedshell.DshellClient.java
License:Apache License
/** * Main run function for the client/*from w w w . ja v a 2 s. c o m*/ * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask // if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource // manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setKeepContainersAcrossApplicationAttempts(keepContainers); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of // the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.toString() + "/" + SCRIPT_PATH; Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } if (!shellCommand.isEmpty()) { addToLocalResources(fs, null, shellCommandPath, appId.toString(), localResources, shellCommand); } if (shellArgs.length > 0) { addToLocalResources(fs, null, shellArgsPath, appId.toString(), localResources, StringUtils.join(shellArgs, " ")); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed // amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application // master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct // local resource for the // eventual containers that will be launched to execute the shell // scripts env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DshellDSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); // ========================================jar? if (containerJarPaths.length != 0) { for (int i = 0; i < containerJarPaths.length; i++) { String hdfsJarLocation = ""; String[] jarNameSplit = containerJarPaths[i].split("/"); String jarName = jarNameSplit[jarNameSplit.length - 1]; long hdfsJarLen = 0; long hdfsJarTimestamp = 0; if (!containerJarPaths[i].isEmpty()) { Path jarSrc = new Path(containerJarPaths[i]); String jarPathSuffix = appName + "/" + appId.toString() + "/" + jarName; Path jarDst = new Path(fs.getHomeDirectory(), jarPathSuffix); fs.copyFromLocalFile(false, true, jarSrc, jarDst); hdfsJarLocation = jarDst.toUri().toString(); FileStatus jarFileStatus = fs.getFileStatus(jarDst); hdfsJarLen = jarFileStatus.getLen(); hdfsJarTimestamp = jarFileStatus.getModificationTime(); env.put(DshellDSConstants.DISTRIBUTEDJARLOCATION + i, hdfsJarLocation); env.put(DshellDSConstants.DISTRIBUTEDJARTIMESTAMP + i, Long.toString(hdfsJarTimestamp)); env.put(DshellDSConstants.DISTRIBUTEDJARLEN + i, Long.toString(hdfsJarLen)); } } } // ========================================jar? // ========================================archive? if (containerArchivePaths.length != 0) { for (int i = 0; i < containerArchivePaths.length; i++) { String hdfsArchiveLocation = ""; String[] archiveNameSplit = containerArchivePaths[i].split("/"); String archiveName = archiveNameSplit[archiveNameSplit.length - 1]; long hdfsArchiveLen = 0; long hdfsArchiveTimestamp = 0; if (!containerArchivePaths[i].isEmpty()) { Path archiveSrc = new Path(containerArchivePaths[i]); String archivePathSuffix = appName + "/" + appId.toString() + "/" + archiveName; Path archiveDst = new Path(fs.getHomeDirectory(), archivePathSuffix); fs.copyFromLocalFile(false, true, archiveSrc, archiveDst); hdfsArchiveLocation = archiveDst.toUri().toString(); FileStatus archiveFileStatus = fs.getFileStatus(archiveDst); hdfsArchiveLen = archiveFileStatus.getLen(); hdfsArchiveTimestamp = archiveFileStatus.getModificationTime(); env.put(DshellDSConstants.DISTRIBUTEDARCHIVELOCATION + i, hdfsArchiveLocation); env.put(DshellDSConstants.DISTRIBUTEDARCHIVETIMESTAMP + i, Long.toString(hdfsArchiveTimestamp)); env.put(DshellDSConstants.DISTRIBUTEDARCHIVELEN + i, Long.toString(hdfsArchiveLen)); } } } // ========================================archive? // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()) .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) { classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR); classPathEnv.append(c.trim()); } classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); capability.setVirtualCores(amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = // applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on // success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application return monitorApplication(appId); }
From source file:io.druid.storage.hdfs.HdfsDataSegmentFinder.java
License:Apache License
@Override public Set<DataSegment> findSegments(String workingDirPathStr, boolean updateDescriptor) throws SegmentLoadingException { final Set<DataSegment> segments = Sets.newHashSet(); final Path workingDirPath = new Path(workingDirPathStr); FileSystem fs; try {/* w ww. ja v a 2 s . c o m*/ fs = workingDirPath.getFileSystem(config); log.info(fs.getScheme()); log.info("FileSystem URI:" + fs.getUri().toString()); if (!fs.exists(workingDirPath)) { throw new SegmentLoadingException("Working directory [%s] doesn't exist.", workingDirPath); } if (!fs.isDirectory(workingDirPath)) { throw new SegmentLoadingException("Working directory [%s] is not a directory!?", workingDirPath); } final RemoteIterator<LocatedFileStatus> it = fs.listFiles(workingDirPath, true); while (it.hasNext()) { final LocatedFileStatus locatedFileStatus = it.next(); final Path path = locatedFileStatus.getPath(); if (path.getName().endsWith("descriptor.json")) { final Path indexZip; final String descriptorParts[] = path.getName().split("_"); if (descriptorParts.length == 2 && descriptorParts[1].equals("descriptor.json") && org.apache.commons.lang.StringUtils.isNumeric(descriptorParts[0])) { indexZip = new Path(path.getParent(), StringUtils.format("%s_index.zip", descriptorParts[0])); } else { indexZip = new Path(path.getParent(), "index.zip"); } if (fs.exists(indexZip)) { final DataSegment dataSegment = mapper.readValue(fs.open(path), DataSegment.class); log.info("Found segment [%s] located at [%s]", dataSegment.getIdentifier(), indexZip); final Map<String, Object> loadSpec = dataSegment.getLoadSpec(); final String pathWithoutScheme = indexZip.toUri().getPath(); if (!loadSpec.get("type").equals(HdfsStorageDruidModule.SCHEME) || !loadSpec.get("path").equals(pathWithoutScheme)) { loadSpec.put("type", HdfsStorageDruidModule.SCHEME); loadSpec.put("path", pathWithoutScheme); if (updateDescriptor) { log.info("Updating loadSpec in descriptor.json at [%s] with new path [%s]", path, pathWithoutScheme); mapper.writeValue(fs.create(path, true), dataSegment); } } segments.add(dataSegment); } else { throw new SegmentLoadingException( "index.zip didn't exist at [%s] while descripter.json exists!?", indexZip); } } } } catch (IOException e) { throw new SegmentLoadingException(e, "Problems interacting with filesystem[%s].", workingDirPath); } return segments; }
From source file:io.dstream.tez.utils.HdfsSerializerUtils.java
License:Apache License
/** * Will serialize object to HDFS returning its {@link Path}. * * @param source/*from w ww. j ava2 s . co m*/ * @param fs * @param targetPath * @return */ public static Path serialize(Object source, FileSystem fs, Path targetPath) { Assert.notNull(targetPath, "'targetPath' must not be null"); Assert.notNull(fs, "'fs' must not be null"); Assert.notNull(source, "'source' must not be null"); Path resultPath = targetPath.makeQualified(fs.getUri(), fs.getWorkingDirectory()); OutputStream targetOutputStream = null; try { targetOutputStream = fs.create(targetPath); SerializationUtils.serialize(source, targetOutputStream); } catch (Exception e) { throw new IllegalStateException("Failed to serialize " + source + " to " + resultPath, e); } return resultPath; }
From source file:io.hops.erasure_coding.BaseEncodingManager.java
License:Apache License
static public void logRaidEncodingMetrics(String result, Codec codec, long delay, long numReadBytes, long numReadBlocks, long metaBlocks, long metaBytes, long savingBytes, Path srcPath, LOGTYPES type, FileSystem fs) { try {/*from w w w . ja v a2 s. co m*/ JSONObject json = new JSONObject(); json.put("result", result); json.put("code", codec.id); json.put("delay", delay); json.put("readbytes", numReadBytes); json.put("readblocks", numReadBlocks); json.put("metablocks", metaBlocks); json.put("metabytes", metaBytes); json.put("savingbytes", savingBytes); json.put("path", srcPath.toString()); json.put("type", type.name()); json.put("cluster", fs.getUri().getAuthority()); ENCODER_METRICS_LOG.info(json.toString()); } catch (JSONException e) { LOG.warn("Exception when logging the Raid metrics: " + e.getMessage(), e); } }
From source file:io.hops.erasure_coding.Decoder.java
License:Apache License
public void logRaidReconstructionMetrics(String result, long bytes, Codec codec, long delay, int numMissingBlocks, long numReadBytes, Path srcFile, long errorOffset, LOGTYPES type, FileSystem fs) { try {// w ww . j av a2s . c o m JSONObject json = new JSONObject(); json.put("result", result); json.put("constructedbytes", bytes); json.put("code", codec.id); json.put("delay", delay); json.put("missingblocks", numMissingBlocks); json.put("readbytes", numReadBytes); json.put("file", srcFile.toString()); json.put("offset", errorOffset); json.put("type", type.name()); json.put("cluster", fs.getUri().getAuthority()); DECODER_METRICS_LOG.info(json.toString()); } catch (JSONException e) { LOG.warn("Exception when logging the Raid metrics: " + e.getMessage(), e); } }