List of usage examples for org.apache.commons.io FileUtils toFile
public static File toFile(URL url)
URL
to a File
. From source file:org.chtijbug.drools.platform.runtime.utils.XpathQueryRunnerTest.java
@Test public void should_get_target_namespace() throws Exception { URL wsdlFile = this.getClass().getResource("/newWSDL1.wsdl"); FileInputStream wsdlContent = FileUtils.openInputStream(FileUtils.toFile(wsdlFile)); XpathQueryRunner queryRunner = new XpathQueryRunner(wsdlContent); String xsdFilename = queryRunner.executeXpath(XPATH_TARGET_NAMESPACE); assertThat(xsdFilename).isEqualTo("http://j2ee.netbeans.org/wsdl/BpelModule1/src/newWSDL1"); }
From source file:org.chtijbug.drools.platform.runtime.utils.XpathQueryRunnerTest.java
@Test public void should_get_service_name() throws Exception { URL wsdlFile = this.getClass().getResource("/newWSDL1.wsdl"); FileInputStream wsdlContent = FileUtils.openInputStream(FileUtils.toFile(wsdlFile)); XpathQueryRunner queryRunner = new XpathQueryRunner(wsdlContent); String xsdFilename = queryRunner.executeXpath(XPATH_EXECUTION_SERVICE); assertThat(xsdFilename).isEqualTo("newWSDL1Service"); }
From source file:org.codehaus.mojo.webminifier.WebMinifierMojo.java
/** * Main entry point for the MOJO./*from www. j a va 2 s . c om*/ * * @throws MojoExecutionException if there's a problem in the normal course of execution. * @throws MojoFailureException if there's a problem with the MOJO itself. */ public void execute() throws MojoExecutionException, MojoFailureException { // Start off by copying all files over. We'll ultimately remove the js files that we don't need from there, and // create new ones in there (same goes for css files and anything else we minify). FileUtils.deleteQuietly(destinationFolder); try { FileUtils.copyDirectory(sourceFolder, destinationFolder); } catch (IOException e) { throw new MojoExecutionException("Cannot copy file to target folder", e); } // Process each HTML source file and concatenate into unminified output scripts int minifiedCounter = 0; // If a split point already exists on disk then we've been through the minification process. As // minification can be expensive, we would like to avoid performing it multiple times. Thus storing // a set of what we've previously minified enables us. Set<File> existingConcatenatedJsResources = new HashSet<File>(); Set<File> consumedJsResources = new HashSet<File>(); for (String targetHTMLFile : getArrayOfTargetHTMLFiles()) { File targetHTML = new File(destinationFolder, targetHTMLFile); // Parse HTML file and locate SCRIPT elements DocumentResourceReplacer replacer; try { replacer = new DocumentResourceReplacer(targetHTML); } catch (SAXException e) { throw new MojoExecutionException("Problem reading html document", e); } catch (IOException e) { throw new MojoExecutionException("Problem opening html document", e); } List<File> jsResources = replacer.findJSResources(); if (jsSplitPoints == null) { jsSplitPoints = new Properties(); } File concatenatedJsResource = null; URI destinationFolderUri = destinationFolder.toURI(); // Split the js resources into two lists: one containing all external dependencies, the other containing // project sources. We do this so that project sources can be minified without the dependencies (libraries // generally don't need to distribute the dependencies). int jsDependencyProjectResourcesIndex; if (splitDependencies) { List<File> jsDependencyResources = new ArrayList<File>(jsResources.size()); List<File> jsProjectResources = new ArrayList<File>(jsResources.size()); for (File jsResource : jsResources) { String jsResourceUri = destinationFolderUri.relativize(jsResource.toURI()).toString(); File jsResourceFile = new File(projectSourceFolder, jsResourceUri); if (jsResourceFile.exists()) { jsProjectResources.add(jsResource); } else { jsDependencyResources.add(jsResource); } } // Re-constitute the js resource list from dependency resources + project resources and note the index // in the list that represents the start of project sources in the list. We need this information later. jsDependencyProjectResourcesIndex = jsDependencyResources.size(); jsResources = jsDependencyResources; jsResources.addAll(jsProjectResources); } else { jsDependencyProjectResourcesIndex = 0; } // Walk backwards through the script declarations and note what files will map to what split point. Map<File, File> jsResourceTargetFiles = new LinkedHashMap<File, File>(jsResources.size()); ListIterator<File> jsResourcesIter = jsResources.listIterator(jsResources.size()); boolean splittingDependencies = false; while (jsResourcesIter.hasPrevious()) { int jsResourceIterIndex = jsResourcesIter.previousIndex(); File jsResource = jsResourcesIter.previous(); String candidateSplitPointNameUri = destinationFolderUri.relativize(jsResource.toURI()).toString(); String splitPointName = (String) jsSplitPoints.get(candidateSplitPointNameUri); // If we do not have a split point name and the resource is a dependency of this project i.e. it is not // within our src/main folder then we give it a split name of "dependencies". Factoring out dependencies // into their own split point is a useful thing to do and will always be required when building // libraries. if (splitDependencies && splitPointName == null && !splittingDependencies) { if (jsResourceIterIndex < jsDependencyProjectResourcesIndex) { splitPointName = Integer.valueOf(++minifiedCounter).toString(); splittingDependencies = true; } } // If we have no name and we've not been in here before, then assign an initial name based on a number. if (splitPointName == null && concatenatedJsResource == null) { splitPointName = Integer.valueOf(++minifiedCounter).toString(); } // We have a new split name so use it for this file and upwards in the script statements until we // either hit another split point or there are no more script statements. if (splitPointName != null) { concatenatedJsResource = new File(destinationFolder, splitPointName + ".js"); // Note that we've previously created this. if (concatenatedJsResource.exists()) { existingConcatenatedJsResources.add(concatenatedJsResource); } } jsResourceTargetFiles.put(jsResource, concatenatedJsResource); } for (File jsResource : jsResources) { concatenatedJsResource = jsResourceTargetFiles.get(jsResource); if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) { // Concatenate input file onto output resource file try { concatenateFile(jsResource, concatenatedJsResource); } catch (IOException e) { throw new MojoExecutionException("Problem concatenating JS files", e); } // Finally, remove the JS resource from the target folder as it is no longer required (we've // concatenated it). consumedJsResources.add(jsResource); } } // Reduce the list of js resource target files to a distinct set LinkedHashSet<File> concatenatedJsResourcesSet = new LinkedHashSet<File>( jsResourceTargetFiles.values()); File[] concatenatedJsResourcesArray = new File[concatenatedJsResourcesSet.size()]; concatenatedJsResourcesSet.toArray(concatenatedJsResourcesArray); List<File> concatenatedJsResources = Arrays.asList(concatenatedJsResourcesArray); // Minify the concatenated JS resource files if (jsCompressorType != JsCompressorType.NONE) { List<File> minifiedJSResources = new ArrayList<File>(concatenatedJsResources.size()); ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources .listIterator(concatenatedJsResources.size()); while (concatenatedJsResourcesIter.hasPrevious()) { concatenatedJsResource = concatenatedJsResourcesIter.previous(); File minifiedJSResource; try { String uri = concatenatedJsResource.toURI().toString(); int i = uri.lastIndexOf(".js"); String minUri; if (i > -1) { minUri = uri.substring(0, i) + "-min.js"; } else { minUri = uri; } minifiedJSResource = FileUtils.toFile(new URL(minUri)); } catch (MalformedURLException e) { throw new MojoExecutionException("Problem determining file URL", e); } minifiedJSResources.add(minifiedJSResource); // If we've not actually performed the minification before... then do so. This is the expensive bit // so we like to avoid it if we can. if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) { boolean warningsFound; try { warningsFound = minifyJSFile(concatenatedJsResource, minifiedJSResource); } catch (IOException e) { throw new MojoExecutionException("Problem reading/writing JS", e); } logCompressionRatio(minifiedJSResource.getName(), concatenatedJsResource.length(), minifiedJSResource.length()); // If there were warnings then the user may want to manually invoke the compressor for further // investigation. if (warningsFound) { getLog().warn("Warnings were found. " + concatenatedJsResource + " is available for your further investigations."); } } } // Update source references replacer.replaceJSResources(destinationFolder, targetHTML, minifiedJSResources); } else { List<File> unminifiedJSResources = new ArrayList<File>(concatenatedJsResources.size()); ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources .listIterator(concatenatedJsResources.size()); while (concatenatedJsResourcesIter.hasPrevious()) { concatenatedJsResource = concatenatedJsResourcesIter.previous(); unminifiedJSResources.add(concatenatedJsResource); } replacer.replaceJSResources(destinationFolder, targetHTML, unminifiedJSResources); getLog().info("Concatenated resources with no compression"); } // Write HTML file to output dir try { replacer.writeHTML(targetHTML, encoding); } catch (TransformerException e) { throw new MojoExecutionException("Problem transforming html", e); } catch (IOException e) { throw new MojoExecutionException("Problem writing html", e); } } // Clean up including the destination folder recursively where directories have nothing left in them. for (File consumedJsResource : consumedJsResources) { consumedJsResource.delete(); } removeEmptyFolders(destinationFolder); }
From source file:org.codice.solr.factory.EmbeddedSolrFactory.java
public static File getConfigFile(String configFileName, ConfigurationFileProxy configProxy) { return FileUtils.toFile(configProxy.getResource(configFileName)); }
From source file:org.codice.solr.factory.impl.EmbeddedSolrFiles.java
/** * Constructor./*www.j a v a 2 s.c om*/ * * @param coreName name of the Solr core * @param configXml name of the Solr configuration file * @param schemaXmls file names of the Solr core schemas to attempt to load (will start with the * first and fallback to the others in order if unavailable) * @param configProxy {@link ConfigurationFileProxy} instance to use * @throws IllegalArgumentException if <code>coreName</code>, <code>configXml</code>, <code> * schemaXmls</code>, or <code>configProxy</code> is <code>null</code> or if unable to find * any files */ public EmbeddedSolrFiles(String coreName, String configXml, String[] schemaXmls, ConfigurationFileProxy configProxy) { Validate.notNull(coreName, "invalid null Solr core name"); Validate.notNull(configXml, "invalid null Solr config file"); Validate.notNull(schemaXmls, "invalid null Solr schema files"); Validate.notEmpty(schemaXmls, "missing Solr schema files"); Validate.noNullElements(schemaXmls, "invalid null Solr schema file"); Validate.notNull(configProxy, "invalid null Solr config proxy"); this.coreName = coreName; this.configName = configXml; this.configProxy = configProxy; this.configFile = FileUtils.toFile(configProxy.getResource(configXml, coreName)); Validate.notNull(configFile, "Unable to find Solr configuration file: " + configXml); File solrSchemaFile = null; String schemaXml = null; for (final String s : schemaXmls) { schemaXml = s; solrSchemaFile = FileUtils.toFile(configProxy.getResource(schemaXml, coreName)); if (solrSchemaFile != null) { break; } } Validate.notNull(solrSchemaFile, "Unable to find Solr schema file(s): " + Arrays.toString(schemaXmls)); this.schemaFile = solrSchemaFile; this.schemaName = schemaXml; }
From source file:org.diffkit.common.DKRuntime.java
private File findApplicationLocation() { String applicationName = this.getApplicationName(); if (applicationName == null) return null; URL location = this.getClassLocation(); if (location == null) return null; if (!location.toExternalForm().toUpperCase().contains(applicationName.toUpperCase())) return null; File applicationFile = FileUtils.toFile(location); return applicationFile.getParentFile(); }
From source file:org.dishevelled.variation.adam.AdamVariationServiceTest.java
private void copyResources(final String resourceName) throws Exception { URL resourceUrl = getClass().getResource(resourceName); File resourceFile = FileUtils.toFile(resourceUrl); FileUtils.copyDirectory(resourceFile, file); }
From source file:org.dswarm.graph.delta.util.GraphDBPrintUtil.java
public static void writeDeltaRelationships(final GraphDatabaseService graphDB, final URL fileURL) throws DMPGraphException { try (final Transaction tx = graphDB.beginTx()) { final Iterable<Relationship> relationships = GlobalGraphOperations.at(graphDB).getAllRelationships(); final StringBuilder sb = new StringBuilder(); for (final Relationship relationship : relationships) { final String printedRel = printDeltaRelationship(relationship); sb.append(printedRel).append("\n"); }//from w w w . j a v a2 s . com final File file = FileUtils.toFile(fileURL); FileUtils.writeStringToFile(file, sb.toString()); tx.success(); } catch (final Exception e) { final String message = "couldn't write relationships"; GraphDBPrintUtil.LOG.error(message, e); throw new DMPGraphException(message); } }
From source file:org.ebayopensource.turmeric.eclipse.ui.util.SOADomainUtil.java
private static Map<String, List<String>> loadDomainsFromCache() { final Map<String, List<String>> result = new LinkedHashMap<String, List<String>>(); if (SOA_DOMAIN_PREF_LOCATION != null) { if (FileUtils.toFile(SOA_DOMAIN_PREF_LOCATION).canRead() == false) { logger.warning("The cache file does not exist->", SOA_DOMAIN_PREF_LOCATION); return result; }//from w w w .j a va2 s. c om InputStream input = null; try { input = SOA_DOMAIN_PREF_LOCATION.openStream(); final Properties props = new Properties(); props.load(input); final String data = props.getProperty(SOA_DOMAIN_PREFERENCE_NODE, ""); logger.info("Domain names loaded from cache->", data); return SOADomainAccessor.parseStringToDomainMap(data); } catch (Exception e) { logger.warning(e); } finally { IOUtils.closeQuietly(input); } } return result; }
From source file:org.ebayopensource.turmeric.eclipse.ui.util.SOADomainUtil.java
private static void saveDomainsToCache(Map<String, List<String>> domains) { if (SOA_DOMAIN_PREF_LOCATION != null) { InputStream input = null; OutputStream output = null; try {//from www . j a v a 2 s . c om File file = FileUtils.toFile(SOA_DOMAIN_PREF_LOCATION); if (file.exists() == false) { if (file.getParentFile().exists() == false) { FileUtils.forceMkdir(file.getParentFile()); } file.createNewFile(); } else if (file.canWrite() == false) { logger.warning("The SOA domain cache file is not writable->", file); return; } final String data = SOADomainAccessor.paseDomainMapToString(domains); final Properties props = new Properties(); output = new FileOutputStream(file); props.setProperty(SOA_DOMAIN_PREFERENCE_NODE, data); props.store(output, "SOA Domain names.\n"); logger.info("Storing domain names->", data); } catch (Exception e) { logger.warning(e); } finally { IOUtils.closeQuietly(input); IOUtils.closeQuietly(output); } } }