Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:co.cask.tephra.hbase94.coprocessor.TransactionProcessorTest.java

License:Apache License

private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor cfd = new HColumnDescriptor(family);
    if (ttl > 0) {
        cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
    }//from w w  w . ja  v  a 2 s.  co m
    cfd.setMaxVersions(10);
    htd.addFamily(cfd);
    htd.addCoprocessor(TransactionProcessor.class.getName());
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    Path oldPath = new Path("/tmp/.oldLogs-" + tableName);
    Configuration hConf = conf;
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hlog = new HLog(fs, hlogPath, oldPath, hConf);
    return new HRegion(tablePath, hlog, fs, hConf, new HRegionInfo(Bytes.toBytes(tableName)), htd,
            new MockRegionServerServices());
}

From source file:co.cask.tephra.hbase96.coprocessor.TransactionProcessorTest.java

License:Apache License

private HRegion createRegion(String tableName, byte[] family, long ttl) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor cfd = new HColumnDescriptor(family);
    if (ttl > 0) {
        cfd.setValue(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
    }/*from w  ww .  j  a v  a2 s. c o m*/
    cfd.setMaxVersions(10);
    htd.addFamily(cfd);
    htd.addCoprocessor(TransactionProcessor.class.getName());
    Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), htd.getTableName());
    Path hlogPath = new Path(FSUtils.getRootDir(conf) + "/hlog");
    FileSystem fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, conf);
    HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, conf, htd, new MockRegionServerServices(conf, null));
}

From source file:co.cask.tigon.data.hbase.HBaseTestBase.java

License:Apache License

public Path createHBaseRootDir(Configuration conf) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    Path hbaseRootdir = new Path(fs.makeQualified(fs.getHomeDirectory()), "hbase");
    conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
    fs.mkdirs(hbaseRootdir);
    FSUtils.setVersion(fs, hbaseRootdir);
    return hbaseRootdir;
}

From source file:co.cask.tigon.data.increment.hbase96.IncrementSummingScannerTest.java

License:Apache License

private HRegion createRegion(TableName tableName, byte[] family) throws Exception {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor cfd = new HColumnDescriptor(family);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    htd.addFamily(cfd);/*  w  w  w.  ja va2s.c o  m*/
    htd.addCoprocessor(IncrementHandler.class.getName());
    Path tablePath = new Path("/tmp/" + tableName.getNameAsString());
    Path hlogPath = new Path("/tmp/hlog-" + tableName.getNameAsString());
    Configuration hConf = conf;
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName.getNameAsString(), hConf);
    HRegionInfo regionInfo = new HRegionInfo(tableName);
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, htd, new MockRegionServerServices(hConf, null));
}

From source file:colossal.pipe.ColFile.java

License:Apache License

public void clearAndPrepareOutput(Configuration conf) {
    try {// w ww .j  ava  2s  .  c  om
        Path dfsPath = new Path(path);
        FileSystem fs = dfsPath.getFileSystem(conf);
        if (fs.exists(dfsPath)) {
            FileStatus[] statuses = fs.listStatus(dfsPath);
            for (FileStatus status : statuses) {
                if (status.isDir()) {
                    if (!status.getPath().toString().endsWith("/_logs")
                            && !status.getPath().toString().endsWith("/_temporary")) {
                        throw new IllegalArgumentException(
                                "Trying to overwrite directory with child directories: " + path);
                    }
                }
            }
        } else {
            fs.mkdirs(dfsPath);
        }
        fs.delete(dfsPath, true);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:com.alexholmes.hdfsslurper.Configurator.java

License:Apache License

public static void testCreateDir(Path p, Configuration conf)
        throws IOException, ConfigSettingException, FileSystemMkdirFailed {
    FileSystem fs = p.getFileSystem(conf);
    if (fs.exists(p) && !fs.getFileStatus(p).isDir()) {
        throw new ConfigSettingException("Directory appears to be a file: '" + p + "'");
    }// w  w w  .  j a  v  a 2s  . c o m

    if (!fs.exists(p)) {
        log.info("Attempting creation of directory: " + p);
        if (!fs.mkdirs(p)) {
            throw new FileSystemMkdirFailed("Failed to create directory: '" + p + "'");
        }
    }
}

From source file:com.alexholmes.hdfsslurper.WorkerThread.java

License:Apache License

private void process(FileStatus srcFileStatus) throws IOException, InterruptedException {

    Path stagingFile = null;/*  w  ww  .  j  a v a2  s  . com*/
    FileSystem destFs = null;
    String filenameBatchidDelimiter = config.getFileNameBatchIdDelimiter();

    try {
        FileSystem srcFs = srcFileStatus.getPath().getFileSystem(config.getConfig());

        // run a script which can change the name of the file as well as
        // write out a new version of the file
        //
        if (config.getWorkScript() != null) {
            Path newSrcFile = stageSource(srcFileStatus);
            srcFileStatus = srcFileStatus.getPath().getFileSystem(config.getConfig()).getFileStatus(newSrcFile);
        }

        Path srcFile = srcFileStatus.getPath();

        // get the target HDFS file
        //
        Path destFile = getHdfsTargetPath(srcFileStatus);

        if (config.getCodec() != null) {
            String ext = config.getCodec().getDefaultExtension();
            if (!destFile.getName().endsWith(ext)) {
                destFile = new Path(destFile.toString() + ext);
            }
        }

        destFs = destFile.getFileSystem(config.getConfig());

        // get the staging HDFS file
        //
        stagingFile = fileSystemManager.getStagingFile(srcFileStatus, destFile);
        String batchId = srcFile.toString().substring(
                srcFile.toString().lastIndexOf(filenameBatchidDelimiter) + 1, srcFile.toString().length());

        log.info("event#Copying source file '" + srcFile + "' to staging destination '" + stagingFile + "'"
                + "$batchId#" + batchId);

        // if the directory of the target file doesn't exist, attempt to
        // create it
        //
        Path destParentDir = destFile.getParent();
        if (!destFs.exists(destParentDir)) {
            log.info("event#Attempting creation of target directory: " + destParentDir.toUri());
            if (!destFs.mkdirs(destParentDir)) {
                throw new IOException("event#Failed to create target directory: " + destParentDir.toUri());
            }
        }

        // if the staging directory doesn't exist, attempt to create it
        //
        Path destStagingParentDir = stagingFile.getParent();
        if (!destFs.exists(destStagingParentDir)) {
            log.info("event#Attempting creation of staging directory: " + destStagingParentDir.toUri());
            if (!destFs.mkdirs(destStagingParentDir)) {
                throw new IOException("event#Failed to create staging directory: " + destParentDir.toUri());
            }
        }

        // copy the file
        //
        InputStream is = null;
        OutputStream os = null;
        CRC32 crc = new CRC32();
        try {
            is = new BufferedInputStream(srcFs.open(srcFile));
            if (config.isVerify()) {
                is = new CheckedInputStream(is, crc);
            }
            os = destFs.create(stagingFile);

            if (config.getCodec() != null) {
                os = config.getCodec().createOutputStream(os);
            }

            IOUtils.copyBytes(is, os, 4096, false);
        } finally {
            IOUtils.closeStream(is);
            IOUtils.closeStream(os);
        }

        long srcFileSize = srcFs.getFileStatus(srcFile).getLen();
        long destFileSize = destFs.getFileStatus(stagingFile).getLen();
        if (config.getCodec() == null && srcFileSize != destFileSize) {
            throw new IOException(
                    "event#File sizes don't match, source = " + srcFileSize + ", dest = " + destFileSize);
        }

        log.info("event#Local file size = " + srcFileSize + ", HDFS file size = " + destFileSize + "$batchId#"
                + batchId);

        if (config.isVerify()) {
            verify(stagingFile, crc.getValue());
        }

        if (destFs.exists(destFile)) {
            destFs.delete(destFile, false);
        }

        log.info("event#Moving staging file '" + stagingFile + "' to destination '" + destFile + "'"
                + "$batchId#" + batchId);
        if (!destFs.rename(stagingFile, destFile)) {
            throw new IOException("event#Failed to rename file");
        }

        if (config.isCreateLzopIndex() && destFile.getName().endsWith(lzopExt)) {
            Path lzoIndexPath = new Path(destFile.toString() + LzoIndex.LZO_INDEX_SUFFIX);
            if (destFs.exists(lzoIndexPath)) {
                log.info("event#Deleting index file as it already exists");
                destFs.delete(lzoIndexPath, false);
            }
            indexer.index(destFile);
        }

        fileSystemManager.fileCopyComplete(srcFileStatus);

    } catch (Throwable t) {
        log.error("event#Caught exception working on file " + srcFileStatus.getPath(), t);

        // delete the staging file if it still exists
        //
        try {
            if (destFs != null && destFs.exists(stagingFile)) {
                destFs.delete(stagingFile, false);
            }
        } catch (Throwable t2) {
            log.error("event#Failed to delete staging file " + stagingFile, t2);
        }

        fileSystemManager.fileCopyError(srcFileStatus);
    }

}

From source file:com.alibaba.jstorm.hdfs.spout.HdfsSpout.java

License:Apache License

private static void validateOrMakeDir(FileSystem fs, Path dir, String dirDescription) {
    try {//from w w w.  j av  a  2s.co m
        if (fs.exists(dir)) {
            if (!fs.isDirectory(dir)) {
                LOG.error(dirDescription + " directory is a file, not a dir. " + dir);
                throw new RuntimeException(dirDescription + " directory is a file, not a dir. " + dir);
            }
        } else if (!fs.mkdirs(dir)) {
            LOG.error("Unable to create " + dirDescription + " directory " + dir);
            throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir);
        }
    } catch (IOException e) {
        LOG.error("Unable to create " + dirDescription + " directory " + dir, e);
        throw new RuntimeException("Unable to create " + dirDescription + " directory " + dir, e);
    }
}

From source file:com.anhth12.lambda.ml.MLUpdate.java

@Override
public void runUpdate(JavaSparkContext sparkContext, long timestamp, JavaPairRDD<String, M> newKeyMessageData,
        JavaPairRDD<String, M> pastKeyMessageData, String modelDirString,
        TopicProducer<String, String> modelUpdateTopic) throws IOException, InterruptedException {

    Preconditions.checkNotNull(newKeyMessageData);

    JavaRDD<M> newData = newKeyMessageData.values();
    JavaRDD<M> pastData = pastKeyMessageData == null ? null : pastKeyMessageData.values();

    if (newData != null) {
        newData.cache();/*from   w w w  . ja  v  a  2 s  . com*/
        newData.foreachPartition(Functions.<Iterator<M>>noOp());
    }
    if (pastData != null) {
        pastData.cache();
        pastData.foreachPartition(Functions.<Iterator<M>>noOp());
    }

    List<HyperParamValues<?>> hyperParamValues = getHyperParamValues();

    int valuesPerHyperParam = HyperParams.chooseValuesPerHyperParam(hyperParamValues.size(), candidates);

    List<List<?>> hyperParameterCombos = HyperParams.chooseHyperParameterCombos(hyperParamValues, candidates,
            valuesPerHyperParam);

    FileSystem fs = FileSystem.get(sparkContext.hadoopConfiguration());

    Path modelDir = new Path(modelDirString);
    Path tempModelPath = new Path(modelDir, ".temporary");
    Path candiatesPath = new Path(tempModelPath, Long.toString(System.currentTimeMillis()));
    fs.mkdirs(candiatesPath);

    Path bestCandidatePath = findBestCandidatePath(sparkContext, newData, pastData, hyperParameterCombos,
            candiatesPath);

    Path finalPath = new Path(modelDir, Long.toString(System.currentTimeMillis()));
    if (bestCandidatePath == null) {
        log.info("Unable to build any model");
    } else {
        fs.rename(bestCandidatePath, finalPath);
    }

    fs.delete(candiatesPath, true);

    Path bestModelPath = new Path(finalPath, MODEL_FILE_NAME);

    if (fs.exists(bestModelPath)) {
        PMML bestModel;
        try (InputStream in = new GZIPInputStream(fs.open(finalPath), 1 << 16)) {
            bestModel = PMMLUtils.read(in);
        }

        modelUpdateTopic.send("MODEL", PMMLUtils.toString(bestModel));
        publishAdditionalModelData(sparkContext, bestModel, newData, pastData, candiatesPath, modelUpdateTopic);
    }

    if (newData != null) {
        newData.unpersist();
    }

    if (pastData != null) {
        pastData.unpersist();
    }

}

From source file:com.asakusafw.operation.tools.directio.file.FileMakeDirectoryCommand.java

License:Apache License

private void mkdir(PrintWriter writer, Path path) {
    LOG.debug("mkdir: {}", path);
    try {/*from   www.j  a v a  2 s. c  o  m*/
        FileSystem fs = path.getFileSystem(dataSourceParameter.getConfiguration());
        if (stat(fs, path).filter(FileStatus::isDirectory).isPresent()) {
            verboseParameter.printf(writer, "already exists: %s%n", path);
        } else {
            if (fs.mkdirs(path) == false
                    && stat(fs, path).filter(s -> s.isDirectory() == false).isPresent() == false) {
                throw new CommandException(MessageFormat.format("cannot create directory: {0}", path));
            }
            verboseParameter.printf(writer, "create directory: %s%n", path);
        }
    } catch (IOException e) {
        throw new CommandException(MessageFormat.format("error occurred while creating directory: {0}", path),
                e);
    }
}