Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:com.inmobi.databus.local.LocalStreamService.java

License:Apache License

private void commit(Map<Path, Path> commitPaths) throws Exception {
    LOG.info("Committing " + commitPaths.size() + " paths.");
    FileSystem fs = FileSystem.get(cluster.getHadoopConf());
    for (Map.Entry<Path, Path> entry : commitPaths.entrySet()) {
        LOG.info("Renaming " + entry.getKey() + " to " + entry.getValue());
        fs.mkdirs(entry.getValue().getParent());
        if (fs.rename(entry.getKey(), entry.getValue()) == false) {
            LOG.warn("Rename failed, aborting transaction COMMIT to avoid "
                    + "dataloss. Partial data replay could happen in next run");
            throw new Exception("Abort transaction Commit. Rename failed from [" + entry.getKey() + "] to ["
                    + entry.getValue() + "]");
        }/*from   ww  w .  java  2 s . co  m*/
    }

}

From source file:com.inmobi.databus.purge.DataPurgerServiceTest.java

License:Apache License

private void createTestPurgefiles(FileSystem fs, Cluster cluster, Calendar date) throws Exception {
    for (String streamname : cluster.getSourceStreams()) {
        String[] files = new String[NUM_OF_FILES];
        String datapath = Cluster.getDateAsYYYYMMDDHHMNPath(date.getTime());
        String commitpath = cluster.getLocalFinalDestDirRoot() + File.separator + streamname + File.separator
                + datapath;//ww w .  java  2 s  .  c o  m
        String mergecommitpath = cluster.getFinalDestDirRoot() + File.separator + streamname + File.separator
                + datapath;
        String trashpath = cluster.getTrashPath() + File.separator + CalendarHelper.getDateAsString(date)
                + File.separator;
        fs.mkdirs(new Path(commitpath));

        for (int j = 0; j < NUM_OF_FILES; ++j) {
            files[j] = new String(cluster.getName() + "-"
                    + TestLocalStreamService.getDateAsYYYYMMDDHHmm(new Date()) + "_" + idFormat.format(j));
            {
                Path path = new Path(commitpath + File.separator + files[j]);
                // LOG.info("Creating streams_local File " + path.getName());
                FSDataOutputStream streamout = fs.create(path);
                streamout.writeBytes("Creating Test data for teststream " + path.toString());
                streamout.close();
                Assert.assertTrue(fs.exists(path));
            }
            {
                Path path = new Path(mergecommitpath + File.separator + files[j]);
                // LOG.info("Creating streams File " + path.getName());
                FSDataOutputStream streamout = fs.create(path);
                streamout.writeBytes("Creating Test data for teststream " + path.toString());
                streamout.close();
                Assert.assertTrue(fs.exists(path));
            }

            {
                Path path = new Path(trashpath + File.separator + String.valueOf(date.get(Calendar.HOUR_OF_DAY))
                        + File.separator + files[j]);
                // LOG.info("Creating trash File " + path.toString());
                FSDataOutputStream streamout = fs.create(path);
                streamout.writeBytes("Creating Test trash data for teststream " + path.getName());
                streamout.close();
                Assert.assertTrue(fs.exists(path));
            }
        }
    }

}

From source file:com.inmobi.messaging.consumer.databus.TestAbstractDatabusConsumer.java

License:Apache License

public void setup(int numFileToMove) throws Exception {

    ClientConfig config = loadConfig();//from   ww w. ja va  2  s  . co m
    config.set(DatabusConsumerConfig.hadoopConfigFileKey, "hadoop-conf.xml");
    testConsumer = getConsumerInstance();
    //System.out.println(testConsumer.getClass().getCanonicalName());
    testConsumer.initializeConfig(config);
    conf = testConsumer.getHadoopConf();
    Assert.assertEquals(conf.get("myhadoop.property"), "myvalue");
    // setup stream, collector dirs and data files
    Set<String> sourceNames = new HashSet<String>();
    sourceNames.add(testStream);
    chkpointPathPrefix = config.getString(DatabusConsumerConfig.checkpointDirConfig);
    setUpCheckpointPaths();
    rootDirs = testConsumer.getRootDirs();
    for (int i = 0; i < rootDirs.length; i++) {
        Map<String, String> clusterConf = new HashMap<String, String>();
        FileSystem fs = rootDirs[i].getFileSystem(conf);
        clusterConf.put("hdfsurl", fs.getUri().toString());
        clusterConf.put("jturl", "local");
        clusterConf.put("name", "databusCluster" + i);
        clusterConf.put("jobqueue", "default");

        String rootDir = rootDirs[i].toUri().toString();
        if (rootDirs[i].toString().startsWith("file:")) {
            String[] rootDirSplit = rootDirs[i].toString().split("file:");
            rootDir = rootDirSplit[1];
        }
        ClusterUtil cluster = new ClusterUtil(clusterConf, rootDir, sourceNames);
        fs.delete(new Path(cluster.getRootDir()), true);
        Path streamDir = new Path(cluster.getDataDir(), testStream);
        fs.delete(streamDir, true);
        fs.mkdirs(streamDir);
        for (String collector : collectors) {
            Path collectorDir = new Path(streamDir, collector);
            fs.delete(collectorDir, true);
            fs.mkdirs(collectorDir);
            TestUtil.setUpFiles(cluster, collector, dataFiles, null, null, numFileToMove, numFileToMove);
        }
    }
}

From source file:com.inmobi.messaging.consumer.databus.TestDatabusConsumer.java

License:Apache License

@Test
public void testConsumerWithStopTimeBeyondCheckpoint() throws Exception {
    ClientConfig config = loadConfig();/* w  ww.  ja  v a  2s .  c  o  m*/
    config.set(DatabusConsumerConfig.databusRootDirsConfig, rootDirs[0].toUri().toString());

    FileSystem fs = rootDirs[0].getFileSystem(conf);
    try {
        // Deleting the dummy collector(COLLECTOR_PREFIX i.e. which does not have
        // any files to read).
        // Collector won't have any checkpoint if there are no files to read.
        // In this test, we wanted to test whether consumer is stopped if the
        // stop time is beyond the checkpoint.
        // If checkpoint is not present then consumer won't be closed completely.
        fs.delete(new Path(rootDirs[0].toUri().toString(), "data/" + testStream + "/" + COLLECTOR_PREFIX));
        Date absoluteStartTime = CollectorStreamReader.getDateFromCollectorFile(dataFiles[0]);
        config.set(MessageConsumerFactory.ABSOLUTE_START_TIME,
                AbstractMessageConsumer.minDirFormat.get().format(absoluteStartTime));
        config.set(DatabusConsumerConfig.checkpointDirConfig, ck12);
        Date stopDate = CollectorStreamReader.getDateFromCollectorFile(dataFiles[1]);
        Date stopDateForCheckpoint = CollectorStreamReader.getDateFromCollectorFile(dataFiles[0]);
        config.set(DatabusConsumerConfig.stopDateConfig,
                AbstractMessageConsumer.minDirFormat.get().format(stopDate));
        ConsumerUtil.testConsumerWithStopTimeBeyondCheckpoint(config, testStream, consumerName,
                absoluteStartTime, false, stopDateForCheckpoint);
    } finally {
        // create a dummy collector directory back
        fs.mkdirs(new Path(rootDirs[0].toUri().toString(), "data/" + testStream + "/" + COLLECTOR_PREFIX));
    }
}

From source file:com.inmobi.messaging.consumer.util.HadoopUtil.java

License:Apache License

public static void setUpHadoopFiles(Path streamDirPrefix, Configuration conf, String[] files,
        String[] suffixDirs, Path[] finalFiles, boolean alternateEmptyFiles, Date minuteDirTimeStamp, int index,
        int startIndex) throws Exception {
    FileSystem fs = streamDirPrefix.getFileSystem(conf);
    Path rootDir = streamDirPrefix.getParent();
    Path tmpDataDir = new Path(rootDir, "data");
    boolean emptyFile = false;
    // setup data dirs
    if (files != null) {
        int i = startIndex;
        int j = index;
        for (String file : files) {
            if (alternateEmptyFiles && emptyFile) {
                MessageUtil.createEmptySequenceFile(file, fs, tmpDataDir, conf);
                emptyFile = false;//from   w  w  w. ja  va  2s  .c  o  m
            } else {
                MessageUtil.createMessageSequenceFile(file, fs, tmpDataDir, i, conf);
                emptyFile = true;
                i += 100;
            }
            Path srcPath = new Path(tmpDataDir, file);
            Date commitTime = getCommitDateForFile(file, minuteDirTimeStamp);
            TestUtil.publishMissingPaths(fs, streamDirPrefix, lastCommitTime, commitTime);
            lastCommitTime = commitTime;
            Path targetDateDir = getTargetDateDir(streamDirPrefix, commitTime);
            List<Path> targetDirs = new ArrayList<Path>();
            if (suffixDirs != null) {
                for (String suffixDir : suffixDirs) {
                    targetDirs.add(new Path(targetDateDir, suffixDir));
                }
            } else {
                targetDirs.add(targetDateDir);
            }
            for (Path targetDir : targetDirs) {
                fs.mkdirs(targetDir);
                Path targetPath = new Path(targetDir, file);
                fs.copyFromLocalFile(srcPath, targetPath);
                LOG.info("Copied " + srcPath + " to " + targetPath);
                if (finalFiles != null) {
                    finalFiles[j] = targetPath;
                    j++;
                }
                Thread.sleep(1000);
            }
            fs.delete(srcPath, true);
        }
        TestUtil.publishLastPath(fs, streamDirPrefix, lastCommitTime);
    }
}

From source file:com.inmobi.messaging.consumer.util.HadoopUtil.java

License:Apache License

public static void setupHadoopCluster(Configuration conf, String[] files, String[] suffixDirs,
        Path[] finalFiles, Path finalDir, boolean withEmptyFiles, boolean createFilesInNextHour)
        throws Exception {
    FileSystem fs = finalDir.getFileSystem(conf);

    Path rootDir = finalDir.getParent();
    fs.delete(rootDir, true);//from  www . ja v a2s .co  m
    Path tmpDataDir = new Path(rootDir, "data");
    fs.mkdirs(tmpDataDir);

    if (!createFilesInNextHour) {
        setUpHadoopFiles(finalDir, conf, files, suffixDirs, finalFiles, withEmptyFiles, null, 0, 0);
    } else {
        // start from 1 hour back as we need files in two diff hours.
        Calendar cal = Calendar.getInstance();
        cal.setTime(startCommitTime);
        cal.add(Calendar.HOUR_OF_DAY, -1);

        setUpHadoopFiles(finalDir, conf, files, suffixDirs, finalFiles, withEmptyFiles, cal.getTime(), 0, 0);
        // go to next hour
        cal.add(Calendar.HOUR_OF_DAY, 1);
        int index = files.length;
        // find number of non empty(i.e. data) files in 1 hour
        int numberOfNonEmptyFiles = withEmptyFiles ? (int) Math.ceil(index / 2.0) : index;
        int startIndex = numberOfNonEmptyFiles * 100;
        setUpHadoopFiles(finalDir, conf, files, suffixDirs, finalFiles, withEmptyFiles, cal.getTime(), index,
                startIndex);
    }
}

From source file:com.inmobi.messaging.consumer.util.TestUtil.java

License:Apache License

private static ClusterUtil setupCluster(String className, String testStream, PartitionId pid, String hdfsUrl,
        String[] collectorFiles, String[] emptyFiles, Path[] databusFiles, int numFilesToMoveToStreamLocal,
        int numFilesToMoveToStreams, String testRootDir) throws Exception {
    Set<String> sourceNames = new HashSet<String>();
    sourceNames.add(testStream);// w  ww  .j ava 2  s . c o m
    Map<String, String> clusterConf = new HashMap<String, String>();
    clusterConf.put("hdfsurl", hdfsUrl);
    clusterConf.put("jturl", "local");
    clusterConf.put("name", pid.getCluster());
    clusterConf.put("jobqueue", "default");

    ClusterUtil cluster = new ClusterUtil(clusterConf, new Path(testRootDir, className).toString(),
            sourceNames);

    // setup stream and collector dirs
    FileSystem fs = FileSystem.get(cluster.getHadoopConf());
    Path collectorDir = getCollectorDir(cluster, testStream, pid.getCollector());
    fs.delete(collectorDir, true);
    fs.delete(new Path(cluster.getLocalFinalDestDirRoot()), true);
    fs.delete(new Path(cluster.getFinalDestDirRoot()), true);
    fs.mkdirs(collectorDir);

    setUpFiles(cluster, pid.getCollector(), collectorFiles, emptyFiles, databusFiles,
            numFilesToMoveToStreamLocal, numFilesToMoveToStreams);

    return cluster;
}

From source file:com.inmobi.messaging.consumer.util.TestUtil.java

License:Apache License

static void publishMissingPaths(FileSystem fs, Path baseDir, Date lastCommitTime, Date uptoCommit)
        throws IOException {
    LOG.debug("publishMissingPaths lastCommitTime:" + lastCommitTime + " uptoCommit:" + uptoCommit);
    if (lastCommitTime != null) {
        Calendar cal = Calendar.getInstance();
        cal.setTime(lastCommitTime);//w  w  w  .  ja  va2  s . c om
        cal.add(Calendar.MINUTE, 1);
        while (cal.getTime().before(uptoCommit)) {
            Path minDir = DatabusStreamReader.getMinuteDirPath(baseDir, cal.getTime());
            fs.mkdirs(minDir);
            LOG.info("Created minDir:" + minDir);
            cal.add(Calendar.MINUTE, 1);
        }
    } else {
        LOG.info("Nothing to publish");
    }
}

From source file:com.inmobi.messaging.consumer.util.TestUtil.java

License:Apache License

static void publishLastPath(FileSystem fs, Path baseDir, Date lastCommitTime) throws IOException {
    if (lastCommitTime != null) {
        Calendar cal = Calendar.getInstance();
        cal.setTime(lastCommitTime);/*w  w  w .ja  v  a  2 s .c om*/
        cal.add(Calendar.MINUTE, 1);
        Path minDir = DatabusStreamReader.getMinuteDirPath(baseDir, cal.getTime());
        fs.mkdirs(minDir);
        LOG.info("Created minDir:" + minDir);
    }
}

From source file:com.jointhegrid.hive_test.HiveTestBase.java

License:Apache License

public void setUp() throws Exception {
    super.setUp();

    String jarFile = org.apache.hadoop.hive.ql.exec.MapRedTask.class.getProtectionDomain().getCodeSource()
            .getLocation().getFile();/*from ww  w  .j  ava 2  s.  c  o m*/
    System.setProperty(HiveConf.ConfVars.HIVEJAR.toString(), jarFile);

    Path rootDir = getDir(ROOT_DIR);
    Configuration conf = createJobConf();
    FileSystem fs = FileSystem.get(conf);
    fs.delete(rootDir, true);
    Path metastorePath = new Path("/tmp/metastore_db");
    fs.delete(metastorePath, true);
    Path warehouse = new Path("/tmp/warehouse");
    fs.delete(warehouse, true);
    fs.mkdirs(warehouse);
}