Example usage for org.apache.commons.io FilenameUtils removeExtension

List of usage examples for org.apache.commons.io FilenameUtils removeExtension

Introduction

In this page you can find the example usage for org.apache.commons.io FilenameUtils removeExtension.

Prototype

public static String removeExtension(String filename) 

Source Link

Document

Removes the extension from a filename.

Usage

From source file:org.apache.ctakes.assertion.medfacts.cleartk.AssertionCleartkAnalysisEngine.java

@Override
@SuppressWarnings("deprecation")
public void initialize(UimaContext context) throws ResourceInitializationException {
    super.initialize(context);

    // Re-process the "directory" string for domains that were used in the data
    if (null != fileDomainMap) {
        String[] dirs = fileDomainMap.split("[;:]");
        for (String dir : dirs) {

            // TODO: normalize dir to real domainId
            String domainId = normalizeToDomain(dir);

            File dataDir = new File(dir);
            if (dataDir.listFiles() != null) {
                for (File f : dataDir.listFiles()) {
                    fileToDomain.put(FilenameUtils.removeExtension(f.getName()), domainId);
                }//from ww w .  j  a va 2  s.com
                //       System.out.println(trainFiles.toString());
            }
        }
    }

    if (this.isTraining() && this.goldViewName == null) {
        throw new IllegalArgumentException(PARAM_GOLD_VIEW_NAME + " must be defined during training");
    }

    // alias for NGram feature parameters
    //    int fromRight = CharacterNGramProliferator.RIGHT_TO_LEFT;

    // a list of feature extractors that require only the token:
    // the stem of the word, the text of the word itself, plus
    // features created from the word text like character ngrams
    this.entityFeatureExtractors = new ArrayList<>();

    // a list of feature extractors that require the token and the sentence
    //    this.contextFeatureExtractors = new ArrayList<CleartkExtractor>();

    this.tokenCleartkExtractors = new ArrayList<>();

    CleartkExtractor<IdentifiedAnnotation, BaseToken> tokenExtraction1 = new CleartkExtractor<>(BaseToken.class,
            //                new FeatureFunctionExtractor(new CoveredTextExtractor(), new LowerCaseFeatureFunction()),
            //            new FeatureFunctionExtractor(new CoveredTextExtractor(), new BrownClusterFeatureFunction()),
            new CoveredTextExtractor<BaseToken>(),
            //new CleartkExtractor.Covered(),
            new CleartkExtractor.LastCovered(2), new CleartkExtractor.Preceding(5),
            new CleartkExtractor.Following(4), new CleartkExtractor.Bag(new CleartkExtractor.Preceding(3)),
            new CleartkExtractor.Bag(new CleartkExtractor.Following(3)),
            new CleartkExtractor.Bag(new CleartkExtractor.Preceding(5)),
            new CleartkExtractor.Bag(new CleartkExtractor.Following(5)),
            new CleartkExtractor.Bag(new CleartkExtractor.Preceding(10)),
            new CleartkExtractor.Bag(new CleartkExtractor.Following(10)));

    //    CleartkExtractor posExtraction1 = 
    //          new CleartkExtractor(
    //                BaseToken.class,
    //                new TypePathExtractor(BaseToken.class, "partOfSpeech"),
    //                new CleartkExtractor.LastCovered(2),
    //                new CleartkExtractor.Preceding(3),
    //                new CleartkExtractor.Following(2)
    //                );

    this.tokenCleartkExtractors.add(tokenExtraction1);
    //this.tokenCleartkExtractors.add(posExtraction1);

    //    this.contextFeatureExtractors.add(new CleartkExtractor(IdentifiedAnnotation.class,
    //        new CoveredTextExtractor(),
    //        //new TypePathExtractor(IdentifiedAnnotation.class, "stem"),
    //        new Preceding(2),
    //        new Following(2)));

    // stab at dependency-based features
    //List<Feature> features = new ArrayList<Feature>();
    //ConllDependencyNode node1 = findAnnotationHead(jCas, arg1);

    //    CombinedExtractor1 baseExtractorCuePhraseCategory =
    //        new CombinedExtractor1
    //          (
    //           new CoveredTextExtractor<BaseToken>(),
    //           new TypePathExtractor(AssertionCuePhraseAnnotation.class, "cuePhrase"),
    //           new TypePathExtractor(AssertionCuePhraseAnnotation.class, "cuePhraseCategory"),
    //           new TypePathExtractor(AssertionCuePhraseAnnotation.class, "cuePhraseAssertionFamily")
    //          );

    cuePhraseInWindowExtractor = new CleartkExtractor<>(BaseToken.class, new CoveredTextExtractor<BaseToken>(),
            new CleartkExtractor.Bag(new CleartkExtractor.Covered())
    //          AssertionCuePhraseAnnotation.class,
    //          baseExtractorCuePhraseCategory,
    //          new CleartkExtractor.Bag(new CleartkExtractor.Preceding(3)),
    //          new CleartkExtractor.Bag(new CleartkExtractor.Following(3)),
    //          new CleartkExtractor.Bag(new CleartkExtractor.Preceding(5)),
    //          new CleartkExtractor.Bag(new CleartkExtractor.Following(5)),
    //          new CleartkExtractor.Bag(new CleartkExtractor.Preceding(10)),
    //          new CleartkExtractor.Bag(new CleartkExtractor.Following(10))
    );

    if (!fileToDomain.isEmpty()) {
        // set up FeatureFunction for all the laggard, non-Extractor features
        ffDomainAdaptor = new FedaFeatureFunction(new ArrayList<>(new HashSet<>(fileToDomain.values())));
    }
    entityTreeExtractors = new ArrayList<>();
}

From source file:org.apache.flex.compiler.clients.ASC.java

/**
 * Compile one source file. Each source file has its own symbol table.
 * /*from  w  ww .j a va 2s. c  om*/
 * @param workspace workspace
 * @param sourceFilename source filename
 * @throws InterruptedException compiler thread error
 * @return true compiled without problem
 */
private boolean compileSourceFiles(final Workspace workspace, final List<String> sourceFilenames)
        throws InterruptedException {
    boolean success = true;
    long startTime = System.nanoTime();
    int problemCount = 0;

    //  Set up a problem query object to check the result of the compilation.
    //  Some problems found aren't ever relevant to ASC, and some depend on 
    //  the switches passed on the command line.
    problemQuery = new ProblemQuery();
    problemQuery.setShowProblemByClass(MultipleExternallyVisibleDefinitionsProblem.class, false);
    problemQuery.setShowProblemByClass(UnfoundPropertyProblem.class, false);
    problemQuery.setShowStrictSemantics(useStaticSemantics());
    problemQuery.setShowWarnings(getShowWarnings());

    // process source AS3 files
    Set<ICompilationUnit> mainUnits = new LinkedHashSet<ICompilationUnit>(getSourceFilenames().size());
    final HashMap<ICompilationUnit, Integer> unitOrdering = new HashMap<ICompilationUnit, Integer>();

    ASCProject applicationProject = createProject(workspace, problemQuery);

    // Add any problems from parsing config vars supplied on the command line
    List<ICompilerProblem> configProblems = new ArrayList<ICompilerProblem>();
    applicationProject.collectProblems(configProblems);
    problemQuery.addAll(configProblems);

    int i = 0;
    for (final String sourceFilename : sourceFilenames) {
        // If we are not merging then create a new project
        // and set the compilation units.
        if (i > 0 && !getMergeABCs()) {
            applicationProject = createProject(workspace, problemQuery);
            mainUnits.clear();
            unitOrdering.clear();
            problemQuery.clear();
        }

        final IFileSpecification sourceFileSpec = new FileSpecification(sourceFilename);
        workspace.fileAdded(sourceFileSpec);
        final ICompilationUnit cu = ASCompilationUnit.createMainCompilationUnitForASC(applicationProject,
                sourceFileSpec, this);
        mainUnits.add(cu);
        unitOrdering.put(cu, unitOrdering.size());

        // add compilation unit to project
        applicationProject.addCompilationUnit(cu);
        applicationProject.updatePublicAndInternalDefinitions(Collections.singletonList(cu));

        // The logic that re-parses a garbage collected syntax tree, does not
        // know about the files included with the -in option, so we'll pin
        // the syntax tree here so we know we will never need to re-parse the
        // the synax tree for the root compilation unit.
        rootedSyntaxTrees.add(cu.getSyntaxTreeRequest().get().getAST());

        // syntax errors
        for (final ICompilationUnit compilationUnit : applicationProject.getCompilationUnits()) {
            final ICompilerProblem[] problems = compilationUnit.getSyntaxTreeRequest().get().getProblems();
            problemQuery.addAll(problems);
        }

        //  Parse trees
        if (getShowParseTrees()) {
            final String outputSyntaxFilename = FilenameUtils.removeExtension(sourceFilename).concat(".p");
            try {
                PrintWriter syntaxFile = new PrintWriter(outputSyntaxFilename);
                final IASNode ast = cu.getSyntaxTreeRequest().get().getAST();
                if (ast instanceof FileNode) {
                    // Parse the full tree and add the new problems found in the
                    // function bodies into the problem collection.
                    final FileNode fileNode = (FileNode) ast;
                    final ImmutableSet<ICompilerProblem> skeletonProblems = ImmutableSet
                            .copyOf(fileNode.getProblems());
                    fileNode.populateFunctionNodes();
                    final ImmutableSet<ICompilerProblem> allProblems = ImmutableSet
                            .copyOf(fileNode.getProblems());

                    // Only add newly found problems. Otherwise, there will be
                    // duplicates in "problemQuery".
                    final SetView<ICompilerProblem> difference = Sets.difference(skeletonProblems, allProblems);
                    problemQuery.addAll(difference);
                }

                syntaxFile.println(ast);
                syntaxFile.flush();
                syntaxFile.close();
            } catch (FileNotFoundException e) {
                problemQuery.add(new FileWriteProblem(e));
            }
        }

        // output
        // For the merged case, wait until the last source file.
        // For the non-merged case, make each source file individually
        if (!getMergeABCs() || (getMergeABCs() && (i == sourceFilenames.size() - 1))) {

            // Let's start up all the compilation units to try and get more threads generating code
            // at the same time.
            for (final ICompilationUnit compilationUnit : applicationProject.getCompilationUnits()) {
                compilationUnit.startBuildAsync(TargetType.SWF);
            }

            //  Run the resolveRefs() logic for as long as it's relevant.
            for (final ICompilationUnit compilationUnit : applicationProject.getCompilationUnits()) {
                final ICompilerProblem[] problems = compilationUnit.getOutgoingDependenciesRequest().get()
                        .getProblems();
                problemQuery.addAll(problems);
            }

            String outputFileBaseName = FilenameUtils.getBaseName(sourceFilename);
            String outputDirectoryName = FilenameUtils.getFullPath(sourceFilename);

            // Apply user specified basename and output directory. The
            // basename is only changed ABCs are merged since each abc
            // needs a unique filename.
            if (getMergeABCs() && getOutputBasename() != null)
                outputFileBaseName = getOutputBasename();

            final String specifiedOutputDirectory = getOutputDirectory();
            if (!Strings.isNullOrEmpty(specifiedOutputDirectory))
                outputDirectoryName = normalizeDirectoryName(specifiedOutputDirectory);

            // Output to either a SWF or ABC file.
            if (isGenerateSWF()) {
                final boolean swfBuilt = generateSWF(outputDirectoryName, outputFileBaseName,
                        applicationProject, mainUnits, sourceFilename, problemQuery, startTime);
                if (!swfBuilt)
                    success = false;
            } else {
                Collection<ICompilationUnit> units = mainUnits;
                if (getMergeABCs()) {
                    // Run the topological sort to figure out which order to output the ABCs in
                    // Resorts to using commandline order rather than a filename based lexical sort in
                    // cases where there are no real dependencies between the scripts
                    units = applicationProject.getDependencyGraph().topologicalSort(mainUnits,
                            new Comparator<ICompilationUnit>() {
                                @Override
                                public int compare(ICompilationUnit o1, ICompilationUnit o2) {
                                    return (unitOrdering.containsKey(o2) ? unitOrdering.get(o2) : 0)
                                            - (unitOrdering.containsKey(o1) ? unitOrdering.get(o1) : 0);
                                }
                            });
                    Collection<ICompilationUnit> sourceUnits = new ArrayList<ICompilationUnit>(
                            mainUnits.size());
                    for (ICompilationUnit unit : units) {
                        // The dependency graph will put all CompilationUnits in the results, but
                        // we only want the CUs for the source files, since the imports should not be merged
                        // into the resulting ABC
                        if (mainUnits.contains(unit)) {
                            sourceUnits.add(unit);
                        }
                    }
                    units = sourceUnits;
                }
                final boolean abcBuilt = generateABCFile(outputDirectoryName, outputFileBaseName,
                        applicationProject, units, sourceFilename, problemQuery, startTime);
                if (!abcBuilt)
                    success = false;
            }

            //*************************************
            // Report problems.
            //

            // let's make a categorizer, so we can differentiate errors and warnings
            CompilerProblemCategorizer compilerProblemCategorizer = new CompilerProblemCategorizer();
            problemFormatter = new WorkspaceProblemFormatter(workspace, compilerProblemCategorizer);
            ProblemPrinter printer = new ProblemPrinter(problemFormatter, err);
            problemCount += printer.printProblems(problemQuery.getFilteredProblems());

            startTime = System.nanoTime();
        }
        i++;
    }

    // If there were problems, print out the summary
    if (problemCount > 0) {
        Collection<ICompilerProblem> errors = new ArrayList<ICompilerProblem>();
        Collection<ICompilerProblem> warnings = new ArrayList<ICompilerProblem>();
        problemQuery.getErrorsAndWarnings(errors, warnings);

        int errorCount = errors.size();
        int warningCount = warnings.size();

        if (errorCount == 1) {
            err.println();
            err.println("1 error found");
        } else if (errorCount > 1) {
            err.println();
            err.println(errorCount + " errors found");
        }

        if (warningCount == 1) {
            err.println();
            err.println("1 warning found");
        } else if (warningCount > 1) {
            err.println();
            err.println(warningCount + " warnings found");
        }

        if (success && (errorCount > 0)) {
            success = false;
        }
    }

    return success;
}

From source file:org.apache.flex.compiler.clients.MXMLC.java

/**
 * Get the output file path. If {@code -output} is specified, use its value;
 * otherwise, use the same base name as the target file.
 * /*w  w w.  j av  a  2  s.c o  m*/
 * @return output file path
 */
private String getOutputFilePath() {
    if (config.getOutput() == null)
        return FilenameUtils.removeExtension(config.getTargetFile()).concat(SWF_EXT);
    else
        return config.getOutput();
}

From source file:org.apache.flex.compiler.clients.MXMLJSC.java

/**
 * Get the output file path. If {@code -output} is specified, use its value;
 * otherwise, use the same base name as the target file.
 * //from w w  w  . j av a2s  .c o m
 * @return output file path
 */
private String getOutputFilePath() {
    if (config.getOutput() == null) {
        final String extension = "." + JSSharedData.OUTPUT_EXTENSION;
        return FilenameUtils.removeExtension(config.getTargetFile()).concat(extension);
    } else
        return config.getOutput();
}

From source file:org.apache.hadoop.gateway.services.topology.impl.DefaultTopologyService.java

private Topology loadTopologyAttempt(File file) throws IOException, SAXException, URISyntaxException {
    Topology topology;/*w  w  w  .  ja  v a2 s  .  c o  m*/
    Digester digester = digesterLoader.newDigester();
    TopologyBuilder topologyBuilder = digester.parse(FileUtils.openInputStream(file));
    if (null == topologyBuilder) {
        return null;
    }
    topology = topologyBuilder.build();
    topology.setUri(file.toURI());
    topology.setName(FilenameUtils.removeExtension(file.getName()));
    topology.setTimestamp(file.lastModified());
    return topology;
}

From source file:org.apache.hadoop.gateway.services.topology.impl.DefaultTopologyService.java

public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config) {
    File tFile = null;// w  w w . ja v  a  2s  .c o m
    Map<String, List<String>> urls = new HashMap<>();
    if (directory.isDirectory() && directory.canRead()) {
        for (File f : directory.listFiles()) {
            if (FilenameUtils.removeExtension(f.getName()).equals(t.getName())) {
                tFile = f;
            }
        }
    }
    Set<ServiceDefinition> defs;
    if (tFile != null) {
        defs = ServiceDefinitionsLoader.getServiceDefinitions(new File(config.getGatewayServicesDir()));

        for (ServiceDefinition def : defs) {
            urls.put(def.getRole(), def.getTestURLs());
        }
    }
    return urls;
}

From source file:org.apache.hadoop.gateway.topology.file.FileTopologyProvider.java

private static Topology loadTopology(FileObject file) throws IOException, SAXException, URISyntaxException {
    log.loadingTopologyFile(file.getName().getFriendlyURI());
    Digester digester = digesterLoader.newDigester();
    FileContent content = file.getContent();
    TopologyBuilder topologyBuilder = digester.parse(content.getInputStream());
    Topology topology = topologyBuilder.build();
    topology.setUri(file.getURL().toURI());
    topology.setName(FilenameUtils.removeExtension(file.getName().getBaseName()));
    topology.setTimestamp(content.getLastModifiedTime());
    return topology;
}

From source file:org.apache.hadoop.hive.ql.exec.tez.TezSessionState.java

/**
 * Returns a local resource representing a jar.
 * This resource will be used to execute the plan on the cluster.
 * @param localJarPath Local path to the jar to be localized.
 * @return LocalResource corresponding to the localized hive exec resource.
 * @throws IOException when any file system related call fails.
 * @throws LoginException when we are unable to determine the user.
 * @throws URISyntaxException when current jar location cannot be determined.
 *//*from  www .j a v a 2 s. co m*/
private LocalResource createJarLocalResource(String localJarPath)
        throws IOException, LoginException, IllegalArgumentException, FileNotFoundException {
    // TODO Reduce the number of lookups that happen here. This shouldn't go to HDFS for each call.
    // The hiveJarDir can be determined once per client.
    FileStatus destDirStatus = utils.getHiveJarDirectory(conf);
    assert destDirStatus != null;
    Path destDirPath = destDirStatus.getPath();

    Path localFile = new Path(localJarPath);
    String sha = getSha(localFile);

    String destFileName = localFile.getName();

    // Now, try to find the file based on SHA and name. Currently we require exact name match.
    // We could also allow cutting off versions and other stuff provided that SHA matches...
    destFileName = FilenameUtils.removeExtension(destFileName) + "-" + sha + FilenameUtils.EXTENSION_SEPARATOR
            + FilenameUtils.getExtension(destFileName);

    if (LOG.isDebugEnabled()) {
        LOG.debug("The destination file name for [" + localJarPath + "] is " + destFileName);
    }

    // TODO: if this method is ever called on more than one jar, getting the dir and the
    //       list need to be refactored out to be done only once.
    Path destFile = new Path(destDirPath.toString() + "/" + destFileName);
    return utils.localizeResource(localFile, destFile, LocalResourceType.FILE, conf);
}

From source file:org.apache.hadoop.hive.ql.udf.generic.GenericUDTFGetSplits.java

/**
 * Returns a local resource representing a jar. This resource will be used to
 * execute the plan on the cluster./* w  w  w  . j a  v a2 s  . com*/
 * 
 * @param localJarPath
 *          Local path to the jar to be localized.
 * @return LocalResource corresponding to the localized hive exec resource.
 * @throws IOException
 *           when any file system related call fails.
 * @throws LoginException
 *           when we are unable to determine the user.
 * @throws URISyntaxException
 *           when current jar location cannot be determined.
 */
private LocalResource createJarLocalResource(String localJarPath, DagUtils utils, Configuration conf)
        throws IOException, LoginException, IllegalArgumentException, FileNotFoundException {
    FileStatus destDirStatus = utils.getHiveJarDirectory(conf);
    assert destDirStatus != null;
    Path destDirPath = destDirStatus.getPath();

    Path localFile = new Path(localJarPath);
    String sha = getSha(localFile, conf);

    String destFileName = localFile.getName();

    // Now, try to find the file based on SHA and name. Currently we require
    // exact name match.
    // We could also allow cutting off versions and other stuff provided that
    // SHA matches...
    destFileName = FilenameUtils.removeExtension(destFileName) + "-" + sha + FilenameUtils.EXTENSION_SEPARATOR
            + FilenameUtils.getExtension(destFileName);

    // TODO: if this method is ever called on more than one jar, getting the dir
    // and the
    // list need to be refactored out to be done only once.
    Path destFile = new Path(destDirPath.toString() + "/" + destFileName);
    return utils.localizeResource(localFile, destFile, LocalResourceType.FILE, conf);
}

From source file:org.apache.jackrabbit.oak.spi.blob.FileBlobStore.java

@Override
public Iterator<String> getAllChunkIds(final long maxLastModifiedTime) throws Exception {
    FluentIterable<File> iterable = Files.fileTreeTraverser().postOrderTraversal(baseDir);
    final Iterator<File> iter = iterable.filter(new Predicate<File>() {
        // Ignore the directories and files newer than maxLastModifiedTime if specified
        @Override/*from  w ww .ja v a 2 s. com*/
        public boolean apply(@Nullable File input) {
            if (!input.isDirectory()
                    && ((maxLastModifiedTime <= 0) || FileUtils.isFileOlder(input, maxLastModifiedTime))) {
                return true;
            }
            return false;
        }
    }).iterator();
    return new AbstractIterator<String>() {
        @Override
        protected String computeNext() {
            if (iter.hasNext()) {
                File file = iter.next();
                return FilenameUtils.removeExtension(file.getName());
            }
            return endOfData();
        }
    };
}