List of usage examples for org.apache.commons.io FileUtils forceMkdir
public static void forceMkdir(File directory) throws IOException
From source file:ml.shifu.shifu.core.processor.ExportModelProcessor.java
@Override public int run() throws Exception { setUp(ModelStep.EXPORT);// w w w. j ava2 s . com int status = 0; File pmmls = new File("pmmls"); FileUtils.forceMkdir(pmmls); if (StringUtils.isBlank(type)) { type = PMML; } String modelsPath = pathFinder.getModelsPath(SourceType.LOCAL); if (type.equalsIgnoreCase(ONE_BAGGING_MODEL)) { if (!"nn".equalsIgnoreCase(modelConfig.getAlgorithm()) && !CommonUtils.isTreeModel(modelConfig.getAlgorithm())) { log.warn("Currently one bagging model is only supported in NN/GBT/RF algorithm."); } else { List<BasicML> models = ModelSpecLoaderUtils.loadBasicModels(modelsPath, ALGORITHM.valueOf(modelConfig.getAlgorithm().toUpperCase())); if (models.size() < 1) { log.warn("No model is found in {}.", modelsPath); } else { log.info("Convert nn models into one binary bagging model."); Configuration conf = new Configuration(); Path output = new Path(pathFinder.getBaggingModelPath(SourceType.LOCAL), "model.b" + modelConfig.getAlgorithm()); if ("nn".equalsIgnoreCase(modelConfig.getAlgorithm())) { BinaryNNSerializer.save(modelConfig, columnConfigList, models, FileSystem.getLocal(conf), output); } else if (CommonUtils.isTreeModel(modelConfig.getAlgorithm())) { List<List<TreeNode>> baggingTrees = new ArrayList<List<TreeNode>>(); for (int i = 0; i < models.size(); i++) { TreeModel tm = (TreeModel) models.get(i); // TreeModel only has one TreeNode instance although it is list inside baggingTrees.add(tm.getIndependentTreeModel().getTrees().get(0)); } int[] inputOutputIndex = DTrainUtils .getNumericAndCategoricalInputAndOutputCounts(this.columnConfigList); // numerical + categorical = # of all input int inputCount = inputOutputIndex[0] + inputOutputIndex[1]; BinaryDTSerializer.save(modelConfig, columnConfigList, baggingTrees, modelConfig.getParams().get("Loss").toString(), inputCount, FileSystem.getLocal(conf), output); } log.info("Please find one unified bagging model in local {}.", output); } } } else if (type.equalsIgnoreCase(PMML)) { // typical pmml generation List<BasicML> models = ModelSpecLoaderUtils.loadBasicModels(modelsPath, ALGORITHM.valueOf(modelConfig.getAlgorithm().toUpperCase())); PMMLTranslator translator = PMMLConstructorFactory.produce(modelConfig, columnConfigList, isConcise(), false); for (int index = 0; index < models.size(); index++) { String path = "pmmls" + File.separator + modelConfig.getModelSetName() + Integer.toString(index) + ".pmml"; log.info("\t Start to generate " + path); PMML pmml = translator.build(Arrays.asList(new BasicML[] { models.get(index) })); PMMLUtils.savePMML(pmml, path); } } else if (type.equalsIgnoreCase(ONE_BAGGING_PMML_MODEL)) { // one unified bagging pmml generation log.info("Convert models into one bagging pmml model {} format", type); if (!"nn".equalsIgnoreCase(modelConfig.getAlgorithm())) { log.warn("Currently one bagging pmml model is only supported in NN algorithm."); } else { List<BasicML> models = ModelSpecLoaderUtils.loadBasicModels(modelsPath, ALGORITHM.valueOf(modelConfig.getAlgorithm().toUpperCase())); PMMLTranslator translator = PMMLConstructorFactory.produce(modelConfig, columnConfigList, isConcise(), true); String path = "pmmls" + File.separator + modelConfig.getModelSetName() + ".pmml"; log.info("\t Start to generate one unified model to: " + path); PMML pmml = translator.build(models); PMMLUtils.savePMML(pmml, path); } } else if (type.equalsIgnoreCase(COLUMN_STATS)) { saveColumnStatus(); } else if (type.equalsIgnoreCase(WOE_MAPPING)) { List<ColumnConfig> exportCatColumns = new ArrayList<ColumnConfig>(); List<String> catVariables = getRequestVars(); for (ColumnConfig columnConfig : this.columnConfigList) { if (CollectionUtils.isEmpty(catVariables) || isRequestColumn(catVariables, columnConfig)) { exportCatColumns.add(columnConfig); } } if (CollectionUtils.isNotEmpty(exportCatColumns)) { List<String> woeMappings = new ArrayList<String>(); for (ColumnConfig columnConfig : exportCatColumns) { String woeMapText = rebinAndExportWoeMapping(columnConfig); woeMappings.add(woeMapText); } FileUtils.write(new File("woemapping.txt"), StringUtils.join(woeMappings, ",\n")); } } else if (type.equalsIgnoreCase(WOE)) { List<String> woeInfos = new ArrayList<String>(); for (ColumnConfig columnConfig : this.columnConfigList) { if (columnConfig.getBinLength() > 1 && ((columnConfig.isCategorical() && CollectionUtils.isNotEmpty(columnConfig.getBinCategory())) || (columnConfig.isNumerical() && CollectionUtils.isNotEmpty(columnConfig.getBinBoundary()) && columnConfig.getBinBoundary().size() > 1))) { List<String> varWoeInfos = generateWoeInfos(columnConfig); if (CollectionUtils.isNotEmpty(varWoeInfos)) { woeInfos.addAll(varWoeInfos); woeInfos.add(""); } } FileUtils.writeLines(new File("varwoe_info.txt"), woeInfos); } } else if (type.equalsIgnoreCase(CORRELATION)) { // export correlation into mapping list if (!ShifuFileUtils.isFileExists(pathFinder.getLocalCorrelationCsvPath(), SourceType.LOCAL)) { log.warn("The correlation file doesn't exist. Please make sure you have ran `shifu stats -c`."); return 2; } return exportVariableCorr(); } else { log.error("Unsupported output format - {}", type); status = -1; } clearUp(ModelStep.EXPORT); log.info("Done."); return status; }
From source file:net.sourceforge.atunes.kernel.modules.webservices.lastfm.LastFmCache.java
private synchronized File getSubmissionDataDir() throws IOException { if (!submissionCacheDir.exists()) { FileUtils.forceMkdir(submissionCacheDir); }/*from w w w . jav a 2 s .c o m*/ return submissionCacheDir; }
From source file:com.alibaba.otter.shared.common.utils.extension.classpath.FileSystemClassScanner.java
public void setExtendsDir(String extendsDir) { this.extendsDir = extendsDir; File dir = new File(extendsDir); if (!dir.exists()) { try {/*from w w w . j av a2 s .c om*/ FileUtils.forceMkdir(dir); } catch (IOException e) { logger.error("##ERROR", e); } } }
From source file:com.github.hadoop.maven.plugin.PackMojo.java
/** * Create the hadoop deploy artifacts/*www . j ava2 s . c om*/ * * @throws IOException * @return File that contains the root of jar file to be packed. * @throws InvalidDependencyVersionException * @throws ArtifactNotFoundException * @throws ArtifactResolutionException */ private File createHadoopDeployArtifacts() throws IOException { FileUtils.deleteDirectory(outputDirectory); File rootDir = new File(outputDirectory.getAbsolutePath() + File.separator + "root"); FileUtils.forceMkdir(rootDir); File jarlibdir = new File(rootDir.getAbsolutePath() + File.separator + "lib"); FileUtils.forceMkdir(jarlibdir); File classesdir = new File(project.getBuild().getDirectory() + File.separator + "classes"); FileUtils.copyDirectory(classesdir, rootDir); Set<Artifact> filteredArtifacts = this.filterArtifacts(this.artifacts); getLog().info(""); getLog().info("Dependencies of this project independent of hadoop classpath " + filteredArtifacts); getLog().info(""); for (Artifact artifact : filteredArtifacts) { FileUtils.copyFileToDirectory(artifact.getFile(), jarlibdir); } return rootDir; }
From source file:com.btoddb.fastpersitentqueue.MemorySegmentSerializer.java
public void init() throws IOException { FileUtils.forceMkdir(directory); }
From source file:de.tudarmstadt.ukp.dkpro.core.io.tgrep.TGrepWriter.java
@Override public void initialize(UimaContext aContext) throws ResourceInitializationException { super.initialize(aContext); if (compression != CompressionMethod.NONE && compression != CompressionMethod.GZIP && compression != CompressionMethod.BZIP2) { throw new ResourceInitializationException( new IllegalArgumentException("Only gzip and bzip2 compression are supported by TGrep2, but [" + compression + "] was specified.")); }/*from w w w. ja v a 2s .com*/ try { FileUtils.forceMkdir(outputPath); } catch (IOException e) { throw new ResourceInitializationException(e); } writers = new HashMap<String, PrintWriter>(); }
From source file:com.googlecode.fascinator.storage.jclouds.BlobStoreClient.java
/** * Establish a connection to the BlobStore, then return the instantiated * BlobStore client used to connect.//w ww. j av a 2s.co m * * @return BlobStore: The client used to connect to the API * @throws StorageException * if there was an error */ private static BlobStore blobStoreConnect() throws StorageException { if (blobStore != null && connectCount < 100) { return blobStore; } connectCount = 0; ContextBuilder contextBuilder = ContextBuilder.newBuilder(provider); // If we're using filesystem, set local directory to write objects to if ("filesystem".equals(provider)) { if (supportsUserMetadataSetting != null) { supportsUserMetadata = supportsUserMetadataSetting; } else { File storageDir = new File(fileSystemLocation); if (!storageDir.exists()) { try { FileUtils.forceMkdir(storageDir); // Java doesn't support extended attributes in some file // systems like FAT32 and HFS. As JClouds use them to // store // user metadata we'll need to store them differently on // these file systems. if (!Files.getFileStore(storageDir.toPath()) .supportsFileAttributeView(UserDefinedFileAttributeView.class)) { supportsUserMetadata = false; } } catch (IOException e) { throw new StorageException("Failed to create storage directory", e); } } } Properties properties = new Properties(); properties.setProperty(FilesystemConstants.PROPERTY_BASEDIR, fileSystemLocation); contextBuilder.overrides(properties); } else if ("gridfs".equals(provider)) { Properties properties = new Properties(); properties.setProperty(Constants.PROPERTY_ENDPOINT, gridFsConnectionString); contextBuilder.overrides(properties); } context = contextBuilder.credentials(identity, credential) .endpoint("https://keystone.rc.nectar.org.au:5000/v2.0").buildView(BlobStoreContext.class); blobStore = context.getBlobStore(); Location loc = null; if (StringUtils.isNotEmpty(location)) { for (Location assignableLoc : blobStore.listAssignableLocations()) { if (assignableLoc.getId().equalsIgnoreCase(location)) { loc = assignableLoc; break; } } if (loc == null) { throw new StorageException(location + " location not found in Blobstore"); } } blobStore.createContainerInLocation(loc, containerName); return blobStore; }
From source file:com.cloudbees.hudson.plugins.folder.computed.FolderComputation.java
/** * {@inheritDoc}/*ww w.j a v a 2 s. com*/ */ @Override public void run() { StreamBuildListener listener; try { File logFile = getLogFile(); FileUtils.forceMkdir(logFile.getParentFile()); OutputStream os; if (BACKUP_LOG_COUNT != null) { os = new ReopenableRotatingFileOutputStream(logFile, BACKUP_LOG_COUNT); ((ReopenableRotatingFileOutputStream) os).rewind(); } else { os = new FileOutputStream(logFile); } listener = new StreamBuildListener(os, Charsets.UTF_8); } catch (IOException x) { LOGGER.log(Level.WARNING, null, x); result = Result.FAILURE; return; } timestamp = System.currentTimeMillis(); // TODO print start time listener.started(getCauses()); Result _result = Result.NOT_BUILT; // cf. isLogUpdated, do not set this.result until listener closed try { folder.updateChildren(listener); _result = Result.SUCCESS; } catch (InterruptedException x) { LOGGER.log(Level.FINE, "recomputation of " + folder.getFullName() + " was aborted", x); listener.getLogger().println("Aborted"); _result = Result.ABORTED; } catch (Exception x) { LOGGER.log(Level.FINE, "recomputation of " + folder.getFullName() + " failed", x); if (x instanceof AbortException) { listener.fatalError(x.getMessage()); } else { x.printStackTrace( listener.fatalError("Failed to recompute children of " + folder.getFullDisplayName())); } _result = Result.FAILURE; } finally { duration = System.currentTimeMillis() - timestamp; if (durations == null) { durations = new ArrayList<Long>(); } while (durations.size() > 32) { durations.remove(0); } durations.add(duration); listener.finished(_result); listener.closeQuietly(); result = _result; try { save(); } catch (IOException x) { LOGGER.log(Level.WARNING, null, x); } } }
From source file:com.alibaba.jstorm.cluster.StormConfig.java
public static String default_worker_shared_dir(Map conf) throws IOException { String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR)) + FILE_SEPERATEOR + WORKER_DATA_SUBDIR; FileUtils.forceMkdir(new File(ret)); return ret;//from w ww .ja v a2s . c o m }
From source file:hu.bme.mit.sette.common.descriptors.eclipse.EclipseProject.java
/** * Saves the project files to the specified directory. * * @param directory/* w w w .ja v a2 s. c o m*/ * The directory. If it does not exist, it will be created. * @throws IOException * If the directory cannot be created or the file already exists * but is not a directory. * @throws ParserConfigurationException * If a DocumentBuilder cannot be created which satisfies the * configuration requested or when it is not possible to create * a Transformer instance. * @throws TransformerException * If an unrecoverable error occurs during the course of the * transformation. */ public void save(final File directory) throws IOException, ParserConfigurationException, TransformerException { Validate.notNull(directory, "The directory must not be null"); // create directory if not exists if (!directory.exists()) { FileUtils.forceMkdir(directory); } // save .project file File projectFile = new File(directory, ".project"); XmlUtils.writeXml(projectDescriptor.createXmlDocument(), projectFile); // save .classpath file if classpath is specified if (classpathDescriptor != null) { File classpathFile = new File(directory, ".classpath"); XmlUtils.writeXml(classpathDescriptor.createXmlDocument(), classpathFile); } }