Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:com.blackberry.logtools.logmultisearch.java

License:Apache License

@SuppressWarnings("static-access")
public int run(String[] argv) throws Exception {

    //Configuring configuration and filesystem to work on HDFS
    final Configuration conf = getConf(); //Configuration processed by ToolRunner
    FileSystem fs = FileSystem.get(conf);
    //Initiate tools used for running search
    LogTools tools = new LogTools();

    //Other options
    String date_format = "RFC5424";
    String field_separator = "";
    ArrayList<String> D_options = new ArrayList<String>();
    boolean quiet = true;
    boolean silent = false;
    boolean log = false;
    boolean forcelocal = false;
    boolean forceremote = false;

    //The arguments are 
    // - strings//from   w w w .  j  ava 2 s  . c  o  m
    // - dc number
    // - service
    // - component
    // - startTime (Something 'date' can parse, or just a time in ms from epoch)
    // - endTime (Same as start)
    // - outputDir

    //Indexing for arguments to be passed for Mapreduce
    int stringsNum = 0;
    int dcNum = 1;
    int svcNum = 2;
    int compNum = 3;
    int startNum = 4;
    int endNum = 5;
    int outNum = 6;

    //Parsing through user arguments
    String[] args = new String[7];
    int count = 0; //Count created to track the parse of all arguments
    int argcount = 0; //Count created to track number of arguments to be passed on
    while (count < argv.length) {
        String arg = argv[count];
        count++;
        if (arg.equals("--")) {
            break;
        } else if (arg.startsWith("-")) {
            if (arg.equals("--v")) {
                quiet = tools.parseV(silent);
            } else if (arg.equals("--i")) {
                conf.set("logdriver.search.case.insensitive", "true");
            } else if (arg.equals("--a")) {
                LogTools.logConsole(quiet, silent, warn, "AND searching selected");
                conf.set("logdriver.search.and", "true");
            } else if (arg.startsWith("--dateFormat=")) {
                arg = arg.replace("--dateFormat=", "");
                date_format = arg;
            } else if (arg.startsWith("--fieldSeparator=")) {
                arg = arg.replace("--fieldSeparator=", "");
                field_separator = arg;
            } else if (arg.startsWith("-strings=")) {
                arg = arg.replace("-strings=", "");
                args[stringsNum] = arg;
                argcount++;
            } else if (arg.startsWith("-dc=")) {
                arg = arg.replace("-dc=", "");
                args[dcNum] = arg;
                argcount++;
            } else if (arg.startsWith("-svc=")) {
                arg = arg.replace("-svc=", "");
                args[svcNum] = arg;
                argcount++;
            } else if (arg.startsWith("-comp=")) {
                arg = arg.replace("-comp=", "");
                args[compNum] = arg;
                argcount++;
            } else if (arg.startsWith("-start=")) {
                arg = arg.replace("-start=", "");
                args[startNum] = arg;
                argcount++;
            } else if (arg.startsWith("-end=")) {
                arg = arg.replace("-end=", "");
                args[endNum] = arg;
                argcount++;
            }
            //User inputs output directory that is to be created
            //Check to see if parent directory exists && output directory does not exist
            else if (arg.startsWith("--out=")) {
                args[outNum] = tools.parseOut(arg, fs);
                argcount++;
            } else if (arg.startsWith("-D")) {
                D_options.add(arg);
            } else if (arg.equals("--silent")) {
                silent = tools.parseSilent(quiet);
            } else if (arg.equals("--log")) {
                log = true;
            } else if (arg.equals("--l")) {
                forcelocal = tools.parsePigMode(forceremote);
            } else if (arg.equals("--r")) {
                forceremote = tools.parsePigMode(forcelocal);
            } else {
                LogTools.logConsole(quiet, silent, error, "Unrecognized option: " + arg);
                System.exit(1);
            }
        } else {
            LogTools.logConsole(quiet, silent, error, "Unrecognized option: " + arg);
            System.exit(1);
        }
    }

    //Default output should be stdout represented by "-"
    if (args[outNum] == null) {
        args[outNum] = "-";
        argcount++;
        LogTools.logConsole(quiet, silent, info, "Output set to default stdout.");
    }

    if (argcount < 7) {
        System.err.println(";****************************************" + "\n\t\t\t NOT ENOUGH ARGUMENTS\n"
                + "\n\tUSAGE: logmultisearch [REQUIRED ARGUMENTS] [OPTIONS] (Order does not matter)"
                + "\n\tREQUIRED ARGUMENTS:"
                + "\n\t\t-strings=[STRINGS_DIR|STRINGS_FILE|STRING]   String/file/directory of strings to search."
                + "\n\t\t-dc=[DATACENTER]            Data Center."
                + "\n\t\t-svc=[SERVICE]               Service."
                + "\n\t\t-comp=[COMPONENT]                Component."
                + "\n\t\t-start=[START]               Start time." + "\n\t\t-end=[END]               End time."
                + "\n\tOptions:"
                + "\n\t\t--out=[DIRECTORY]                  Desired output directory. If not defined, output to stdout."
                + "\n\t\t--v                              Verbose output."
                + "\n\t\t--r                              Force remote sort."
                + "\n\t\t--l                              Force local sort."
                + "\n\t\t--dateFormat=[FORMAT]              Valid formats are RFC822, RFC3164 (zero padded day),"
                + "\n\t                                   RFC5424 (default), or any valid format string for FastDateFormat."
                + "\n\t\t--fieldSeparator=X                  The separator to use to separate fields in intermediate"
                + "\n\t                                      files.  Defaults to 'INFORMATION SEPARATOR ONE' (U+001F)."
                + "\n\t\t--silent               Output only the data."
                + "\n\t\t--i                              Make search case insensitive."
                + "\n\t\t--a                              Enable AND searching."
                + "\n\t\t--log                  Save all the logs.\n"
                + ";****************************************");
        System.exit(1);
    }

    //Parse time inputs for start and end of search
    args[startNum] = tools.parseDate(args[startNum]);
    args[endNum] = tools.parseDate(args[endNum]);
    tools.checkTime(args[startNum], args[endNum]);

    //Retrieve 'strings' argument to be able to pass search strings to HDFS
    //Retrieve 'out' argument to determine where output of results should be sent
    String strings = args[stringsNum];
    String out = args[outNum];

    //Generate files to temporarily store output of mapreduce jobs and pig logs locally                
    File local_output = File.createTempFile("tmp.", RandomStringUtils.randomAlphanumeric(10));
    if (log != true) {
        local_output.deleteOnExit();
    }
    File pig_tmp = File.createTempFile("tmp.", RandomStringUtils.randomAlphanumeric(10));
    if (log != true) {
        pig_tmp.deleteOnExit();
    }

    //Name the temp directory for storing results in HDFS
    String tmp = "tmp/logmultisearch-" + RandomStringUtils.randomAlphanumeric(10);

    //Set args[stringsNum] to be location of search strings to be used for the Multisearch
    args[stringsNum] = (StringEscapeUtils.escapeJava(tmp) + "/strings");

    //Set args[outNum] to be temp output directory to be passed onto MultiSearchByTime instead of UserInput argument
    args[outNum] = (StringEscapeUtils.escapeJava(tmp) + "/rawlines");

    //Managing console output - deal with --v/--silent
    Logger LOG = LoggerFactory.getLogger(logmultisearch.class);
    tools.setConsoleOutput(local_output, quiet, silent);

    //Create temp directory in HDFS to store logsearch logs before sorting
    tools.tmpDirHDFS(quiet, silent, fs, conf, tmp, log);

    //If the strings argument is the path of a file, copy the file to HDFS.
    //If the strings argument is the path of a directory, copy all files in the directory to HDFS.
    //If the strings argument is not a path to a file/directory, write to a newly created file in HDFS.
    try {
        File f = new File(strings);
        if (f.isFile()) {
            LogTools.logConsole(quiet, silent, warn, "Strings input is a File...");

            //dos2unix file conversion
            File dos2unix = File.createTempFile("tmp.", RandomStringUtils.randomAlphanumeric(10));
            dos2unix.deleteOnExit();
            tools.dosTounix(f, dos2unix);

            //Copy over temp directory into a new directory in HDFS to be used for logmultisearch
            fs.copyFromLocalFile(new Path(dos2unix.getAbsolutePath()), new Path(tmp + "/strings"));
        } else if (f.isDirectory()) {
            LogTools.logConsole(quiet, silent, warn, "Strings input is a Directory...");

            //Get list of all files in directory to convert from dos2unix
            String[] fileList = f.list();

            //Create temp directory to store all converted files
            File tempDir = Files.createTempDir();
            tempDir.deleteOnExit();

            //Convert all files from dos2unix and write to temp directory
            for (int i = 0; i < fileList.length; i++) {
                File dos2unix = File.createTempFile("unix", fileList[i], tempDir);
                dos2unix.deleteOnExit();
                tools.dosTounix(new File(f.getAbsolutePath() + "/" + fileList[i]), dos2unix);
            }

            //Copy over temp directory into a new directory in HDFS to be used for logmultisearch
            fs.copyFromLocalFile(new Path(tempDir.getAbsolutePath()), new Path(tmp + "/strings"));
        } else {
            LogTools.logConsole(quiet, silent, warn, "Strings input is a search string...");
            //Make directory and file for strings
            fs.mkdirs(new Path(tmp + "/strings"));
            fs.createNewFile(new Path(tmp + "/strings/strings"));
            //Write search strings to file
            FSDataOutputStream hdfsOut = fs.create(new Path(tmp + "/strings/strings"));
            hdfsOut.writeUTF(strings);
            hdfsOut.close();
        }
    } catch (Exception e) {
        e.printStackTrace();
        System.exit(1);
    }

    LogTools.logConsole(quiet, silent, warn, "Searching...");
    LogTools.logConsole(quiet, silent, warn,
            "Passing Arguments: SearchStrings=" + strings + " DC=" + args[dcNum] + " Service=" + args[svcNum]
                    + " Component=" + args[compNum] + " StartTime=" + args[startNum] + " EndTime="
                    + args[endNum] + " Output=" + out);

    //Set standard configuration for running Mapreduce and PIG
    String queue_name = "logsearch";

    //Start Mapreduce job
    tools.runMRJob(quiet, silent, conf, D_options, out, LOG, field_separator, queue_name, args,
            "MultiSearchByTime", new MultiSearchByTime());

    //Before sorting, determine the number of records and size of the results found
    long foundresults = tools.getResults(local_output);
    long size = tools.getSize(foundresults, tmp, fs);

    //Run PIG job if results found
    tools.runPig(silent, quiet, foundresults, size, tmp, out, D_options, queue_name, date_format,
            field_separator, pig_tmp, fs, conf, forcelocal, forceremote);

    //Display location of tmp files if log enabled
    tools.logs(log, local_output, pig_tmp, tmp);

    return 0;
}

From source file:com.blackberry.logtools.LogTools.java

License:Apache License

public void tmpDirHDFS(boolean quiet, boolean silent, FileSystem fs, Configuration conf, String tmp,
        boolean log) {
    logConsole(quiet, silent, info, "Creating new Temp Directory in HDFS: " + tmp);

    try {/*from  w ww .  j a  v  a2  s . c om*/
        Path path = new Path(tmp);
        if (!(fs.exists(path))) {
            //Create directory
            fs.mkdirs(path);
            if (log != true) {
                fs.deleteOnExit(path);
            }
        }
    } catch (IOException e) {
        if (e.toString().contains("Failed to find any Kerberos")) {
            logConsole(true, true, error, "No/bad Kerberos ticket - please authenticate.");
            System.exit(1);
        } else if (e.toString().contains("quota") && e.toString().contains("exceeded")) {
            logConsole(true, true, error, "Disk quota Exceeded.");
            System.exit(1);
        }
        e.printStackTrace();
        System.exit(1);
    }
}

From source file:com.chinacache.robin.format.action.SimpleMoveFileAction.java

License:Apache License

@Override
public void execute(FileSystem fileSystem, Path filePath) throws IOException {
    String newDestPath = destination;
    newDestPath += "/BILog/src/" + dfs.format(new Date()) + "/" + userName + "/" + channelId + "/";
    Path destPath = new Path(newDestPath);
    LOG.info("Moving file {} to {}", filePath, destPath);
    fileSystem.mkdirs(destPath);
    fileSystem.rename(filePath, destPath);
}

From source file:com.ci.backports.hadoop.hbase.ZHFileOutputFormat.java

License:Apache License

public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(final TaskAttemptContext context)
        throws IOException, InterruptedException {
    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong("hbase.hregion.max.filesize", HConstants.DEFAULT_MAX_FILE_SIZE);
    final int blocksize = conf.getInt("hfile.min.blocksize.size", HFile.DEFAULT_BLOCKSIZE);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String compression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
        // Map of families to writers and how much has been output on the writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, KeyValue kv) throws IOException {
            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();//w  ww  .  j a  v a  2s .co  m
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();
            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory exists
            if (wl == null) {
                fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /* Create a new HFile.Writer.
         * @param family
         * @return A WriterLength, containing a new HFile.Writer.
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            wl.writer = new HFile.Writer(fs, StoreFile.getUniqueFile(fs, familydir), blocksize, compression,
                    KeyValue.KEY_COMPARATOR);
            this.writers.put(family, wl);
            return wl;
        }

        private void close(final HFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}

From source file:com.citic.zxyjs.zwlscx.mapreduce.lib.input.HFileOutputFormatBase.java

License:Apache License

public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(final TaskAttemptContext context)
        throws IOException, InterruptedException {
    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    final Path ignoreOutputPath = new Path(outputPath + "_ignore");

    final Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config. Add to hbase-*.xml if other than default
    // compression.
    final String defaultCompression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
            false);//from  ww w. j a va 2  s  .c o  m

    if (fs.exists(ignoreOutputPath)) {
        LOG.info("Deleted " + ignoreOutputPath.toString() + " success.");
        fs.delete(ignoreOutputPath, true);
    }

    // create a map from column family to the compression algorithm
    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
    final HFileDataBlockEncoder encoder;
    if (dataBlockEncodingStr == null) {
        encoder = NoOpDataBlockEncoder.INSTANCE;
    } else {
        try {
            encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
        } catch (IllegalArgumentException ex) {
            throw new RuntimeException("Invalid data block encoding type configured for the param "
                    + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
        }
    }

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
        // Map of families to writers and how much has been output on the
        // writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private final FSDataOutputStream dos = fs.create(ignoreOutputPath);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, KeyValue kv) throws IOException {
            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();

            if (ignore(kv)) {
                byte[] readBuf = rowKey;
                dos.write(readBuf, 0, readBuf.length);
                dos.write(Bytes.toBytes("\n"));
                return;
            }
            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory
            // exists
            if (wl == null) {
                Path path = null;
                path = new Path(outputdir, Bytes.toString(family));
                fs.mkdirs(path);
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family, conf);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /*
         * Create a new StoreFile.Writer.
         * @param family
         * @return A WriterLength, containing a new StoreFile.Writer.
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            String compression = compressionMap.get(family);
            compression = compression == null ? defaultCompression : compression;
            String bloomTypeStr = bloomTypeMap.get(family);
            BloomType bloomType = BloomType.NONE;
            if (bloomTypeStr != null) {
                bloomType = BloomType.valueOf(bloomTypeStr);
            }
            String blockSizeString = blockSizeMap.get(family);
            int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
                    : Integer.parseInt(blockSizeString);
            Configuration tempConf = new Configuration(conf);
            tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
            wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs, blockSize)
                    .withOutputDir(familydir)
                    .withCompression(AbstractHFileWriter.compressionByName(compression))
                    .withBloomType(bloomType).withComparator(KeyValue.COMPARATOR).withDataBlockEncoder(encoder)
                    .withChecksumType(HStore.getChecksumType(conf))
                    .withBytesPerChecksum(HStore.getBytesPerChecksum(conf)).build();

            this.writers.put(family, wl);
            return wl;
        }

        private void close(final StoreFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            dos.flush();
            dos.close();
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}

From source file:com.cloudera.beeswax.Server.java

License:Apache License

/**
 * Hive won't work unless /tmp and /user/hive/warehouse are usable,
 * so we create them for the user.//  w  w w .j a  v  a2  s  .c  o  m
 */
private static void createDirectoriesAsNecessary() {
    try {
        LOG.debug("Classpath: " + System.getProperty("java.class.path"));
        HiveConf conf = new HiveConf(Driver.class);
        FileSystem fs = FileSystem.get(conf);
        Path tmpDir = new Path("/tmp");
        Path metaDir = new Path(conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname));
        for (Path dir : new Path[] { tmpDir, metaDir }) {
            if (!fs.exists(dir)) {
                if (fs.mkdirs(dir)) {
                    fs.setPermission(dir, new FsPermission((short) 0777));
                    LOG.info("Created " + dir + " with world-writable permissions.");
                } else {
                    LOG.error("Could not create " + dir);
                }
            }
        }
    } catch (IOException e) {
        HiveConf conf = new HiveConf(Driver.class);
        LOG.error("Error while trying to check/create /tmp and warehouse directory "
                + conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname), e);
    }
}

From source file:com.cloudera.CacheTool.java

License:Apache License

public static void createFile(FileSystem fs, Path fileName, long fileLen) throws IOException {
    int bufferLen = 1024;
    assert bufferLen > 0;
    if (!fs.mkdirs(fileName.getParent())) {
        throw new IOException("Mkdirs failed to create " + fileName.getParent().toString());
    }//from  ww w. j  a va 2 s .  c o m
    FSDataOutputStream out = null;
    try {
        out = fs.create(fileName, true,
                fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 1,
                fs.getDefaultBlockSize(fileName));
        if (fileLen > 0) {
            byte[] toWrite = new byte[bufferLen];
            Random rb = new Random(0);
            long bytesToWrite = fileLen;
            while (bytesToWrite > 0) {
                rb.nextBytes(toWrite);
                int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen : (int) bytesToWrite;

                out.write(toWrite, 0, bytesToWriteNext);
                bytesToWrite -= bytesToWriteNext;
            }
        }
    } finally {
        if (out != null) {
            out.close();
        }
    }
}

From source file:com.cloudera.cdk.data.filesystem.FileSystemDatasetRepository.java

License:Apache License

/**
 * Creates, if necessary, the given the location for {@code descriptor}.
 *
 * @param conf A Configuration//from  w ww.  j  a  v a2s  .  c om
 * @param descriptor A DatasetDescriptor
 */
static void ensureExists(DatasetDescriptor descriptor, Configuration conf) {
    Preconditions.checkArgument(descriptor.getLocation() != null,
            "Cannot get FileSystem for a descriptor with no location");
    final Path dataPath = new Path(descriptor.getLocation());

    final FileSystem fs = fsForPath(dataPath, conf);

    try {
        if (!fs.exists(dataPath)) {
            fs.mkdirs(dataPath);
        }
    } catch (IOException ex) {
        throw new DatasetRepositoryException("Cannot access data location", ex);
    }
}

From source file:com.cloudera.cdk.data.filesystem.FileSystemWriters.java

License:Apache License

@SuppressWarnings("unchecked") // See https://github.com/Parquet/parquet-mr/issues/106
public static <E> DatasetWriter<E> newFileWriter(FileSystem fs, Path path, DatasetDescriptor descriptor) {
    // ensure the path exists
    try {/*from ww  w  . j  av  a2 s . co  m*/
        fs.mkdirs(path);
    } catch (IOException ex) {
        throw new DatasetWriterException("Could not create path:" + path, ex);
    }

    final Format format = descriptor.getFormat();
    final Path file = new Path(path, uniqueFilename(descriptor.getFormat()));

    if (Formats.PARQUET.equals(format)) {
        return new ParquetFileSystemDatasetWriter(fs, file, descriptor.getSchema());
    } else if (Formats.AVRO.equals(format)) {
        return new FileSystemDatasetWriter.Builder().fileSystem(fs).path(file).schema(descriptor.getSchema())
                .build();
    } else {
        throw new UnknownFormatException("Unknown format:" + format);
    }
}

From source file:com.cloudera.circus.test.TestXTest.java

License:Open Source License

@Test
@TestHadoop/*from  www. j ava 2  s  . c o  m*/
public void testHadoopMapReduce() throws Exception {
    JobConf conf = getHadoopConf();
    FileSystem fs = FileSystem.get(conf);
    JobClient jobClient = new JobClient(conf);
    try {
        Path inputDir = new Path(getHadoopTestDir(), "input");
        Path outputDir = new Path(getHadoopTestDir(), "output");

        fs.mkdirs(inputDir);
        Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
        writer.write("a\n");
        writer.write("b\n");
        writer.write("c\n");
        writer.close();

        JobConf jobConf = getHadoopConf();
        jobConf.setInt("mapred.map.tasks", 1);
        jobConf.setInt("mapred.map.max.attempts", 1);
        jobConf.setInt("mapred.reduce.max.attempts", 1);
        jobConf.set("mapred.input.dir", inputDir.toString());
        jobConf.set("mapred.output.dir", outputDir.toString());
        final RunningJob runningJob = jobClient.submitJob(jobConf);
        waitFor(60 * 1000, true, new Predicate() {
            @Override
            public boolean evaluate() throws Exception {
                return runningJob.isComplete();
            }
        });
        Assert.assertTrue(runningJob.isSuccessful());
        Assert.assertTrue(fs.exists(new Path(outputDir, "part-00000")));
        BufferedReader reader = new BufferedReader(
                new InputStreamReader(fs.open(new Path(outputDir, "part-00000"))));
        Assert.assertTrue(reader.readLine().trim().endsWith("a"));
        Assert.assertTrue(reader.readLine().trim().endsWith("b"));
        Assert.assertTrue(reader.readLine().trim().endsWith("c"));
        Assert.assertNull(reader.readLine());
        reader.close();
    } finally {
        fs.close();
        jobClient.close();
    }
}