Example usage for org.apache.hadoop.fs FileSystem setPermission

List of usage examples for org.apache.hadoop.fs FileSystem setPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setPermission.

Prototype

public void setPermission(Path p, FsPermission permission) throws IOException 

Source Link

Document

Set permission of a path.

Usage

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

public boolean moveFile(String srcPath, String dstPath) throws HDFSServerManagementException {

    FsPermission fp = HDFSConstants.DEFAULT_FILE_PERMISSION;
    FileSystem hdfsFS = null;
    try {//from w  ww .j  av a2  s .  c  o  m
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();
    } catch (IOException e) {
        String msg = "Error occurred while trying to mount file system.";
        handleException(msg, e);
    }

    try {
        if (hdfsFS != null) {
            hdfsFS.rename(new Path(srcPath), new Path(dstPath));
            hdfsFS.setPermission(new Path(dstPath), fp);
            return true;
        }

    } catch (IOException e) {
        String msg = "Error occurred while trying to move file.";
        handleException(msg, e);
    }

    return false;
}

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

/**
 * Creates a folder in the File system./*from ww w. jav a  2 s.co m*/
 * @param folderPath   the folder path of the folder to be created.
 * @return   true -if creation is successful.
 *          false - if creation is unsuccessful.
 * @throws HDFSServerManagementException
 */
public boolean makeDirectory(String folderPath) throws HDFSServerManagementException {

    FsPermission fp = HDFSConstants.DEFAULT_FILE_PERMISSION;
    FileSystem hdfsFS = null;
    boolean folderExists = false;
    try {
        Path folder = new Path(folderPath);
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();

        if (hdfsFS != null && !hdfsFS.exists(folder)) {
            hdfsFS.mkdirs(folder, fp);
            hdfsFS.setPermission(folder, fp);
            HDFSAdminHelper.getInstance().setOwnerOfPath(folder);
        } else {
            folderExists = true;
        }
    } catch (IOException e) {
        String msg = "Error occurred while trying to make a directory.";
        handleException(msg, e);
        return false;
    }
    handleItemExistState(folderExists, true, true);
    return true;
}

From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java

License:Open Source License

public void setPermission(String fsPath, String fsPermission) throws HDFSServerManagementException {
    FileSystem hdfsFS = null;
    try {/*from w  w  w.j  a v  a  2 s.c  om*/
        hdfsFS = hdfsAdminHelperInstance.getFSforUser();
        if (hdfsFS != null) {
            hdfsFS.setPermission(new Path(fsPath), new FsPermission(fsPermission));
        }

    } catch (IOException e) {
        String msg = "Error occurred while trying to mount file system.";
        handleException(msg, e);
    }
}

From source file:testjar.GenerateTaskChildProcess.java

License:Apache License

/** 
 * It uses for creating the child processes for a task.
 * @param conf configuration for a job./*  w ww.  j  a v a  2s.c o  m*/
 * @param jobName the name of the mapper job.
 * @throws IOException if an I/O error occurs.
 */
private static void createChildProcess(JobConf conf, String jobName) throws IOException {
    FileSystem fs = FileSystem.getLocal(conf);
    File TMP_ROOT_DIR = new File("/tmp");
    String TEST_ROOT_DIR = TMP_ROOT_DIR.getAbsolutePath() + Path.SEPARATOR + "ChildProc_" + jobName;
    Path scriptDir = new Path(TEST_ROOT_DIR);
    int numOfChildProcesses = 2;

    if (fs.exists(scriptDir)) {
        fs.delete(scriptDir, true);
    }
    fs.mkdirs(scriptDir);
    fs.setPermission(scriptDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));

    String scriptDirName = scriptDir.toUri().getPath();
    Random rm = new Random();
    String scriptName = "ShellScript_" + jobName + "_" + rm.nextInt() + ".sh";
    Path scriptPath = new Path(scriptDirName, scriptName);
    String shellScript = scriptPath.toString();
    String script = null;
    if (jobName.equals("AppendStr")) {
        script = "#!/bin/sh\n" + "umask 000\n" + "StrVal=\"Hadoop is framework for data intensive "
                + "distributed applications.\"\n" + "StrVal=\"${StrVal}Hadoop enables applications to work "
                + "with thousands of nodes.\"\n" + "echo $StrVal\n" + "if [ \"X$1\" != \"X0\" ]\nthen\n"
                + "  sh " + shellScript + " $(($1-1))\n" + "else\n" + "  while(true)\n" + "  do\n"
                + "    StrVal=\"$StrVal Hadoop \"\n" + "  done\n" + "fi";
    } else if (jobName.equals("DispStr")) {
        script = "#!/bin/sh\n" + "umask 000\n" + "msg=Welcome\n" + "echo $msg\n"
                + " if [ \"X$1\" != \"X0\" ]\nthen\n" + "  sh " + shellScript + " $(($1-1))\n" + "else\n"
                + "  while(true)\n" + "  do\n" + "    sleep 2 \n" + "  done\n" + "fi";
    } else {
        script = "#!/bin/sh\n" + "umask 000\n" + "msg=Welcome\n" + "echo $msg\n"
                + " if [ \"X$1\" != \"X0\" ]\nthen\n" + "  sh " + shellScript + " $(($1-1))\n" + "else\n"
                + "  for count in {1..1000}\n" + "  do\n" + "    echo \"$msg_$count\" \n" + "  done\n" + "fi";
    }
    DataOutputStream file = fs.create(scriptPath);
    file.writeBytes(script);
    file.close();
    File scriptFile = new File(scriptDirName, scriptName);
    scriptFile.setExecutable(true);
    LOG.info("script absolute path:" + scriptFile.getAbsolutePath());
    String[] cmd = new String[] { scriptFile.getAbsolutePath(), String.valueOf(numOfChildProcesses) };
    ShellCommandExecutor shellExec = new ShellCommandExecutor(cmd);
    shellExec.execute();
}

From source file:voldemort.store.readonly.disk.HadoopStoreWriter.java

License:Apache License

@Override
public void close() throws IOException {

    this.indexFileStream.close();
    this.valueFileStream.close();

    if (this.nodeId == -1 || this.chunkId == -1 || this.partitionId == -1) {
        // Issue 258 - No data was read in the reduce phase, do not create
        // any output
        return;// w  ww .j  a va  2  s  .  co m
    }

    // If the replica type read was not valid, shout out
    if (getSaveKeys() && this.replicaType == -1) {
        throw new RuntimeException("Could not read the replica type correctly for node " + nodeId
                + " ( partition - " + this.partitionId + " )");
    }

    String fileNamePrefix = null;
    if (getSaveKeys()) {
        fileNamePrefix = new String(Integer.toString(this.partitionId) + "_"
                + Integer.toString(this.replicaType) + "_" + Integer.toString(this.chunkId));
    } else {
        fileNamePrefix = new String(Integer.toString(this.partitionId) + "_" + Integer.toString(this.chunkId));
    }

    // Initialize the node directory
    Path nodeDir = new Path(this.outputDir, "node-" + this.nodeId);

    // Create output directory, if it doesn't exist
    FileSystem outputFs = nodeDir.getFileSystem(this.conf);
    outputFs.mkdirs(nodeDir);
    outputFs.setPermission(nodeDir, new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
    logger.info("Setting permission to 755 for " + nodeDir);

    // Write the checksum and output files
    if (this.checkSumType != CheckSumType.NONE) {

        if (this.checkSumDigestIndex != null && this.checkSumDigestValue != null) {
            Path checkSumIndexFile = new Path(nodeDir, fileNamePrefix + ".index.checksum");
            Path checkSumValueFile = new Path(nodeDir, fileNamePrefix + ".data.checksum");

            if (outputFs.exists(checkSumIndexFile)) {
                outputFs.delete(checkSumIndexFile);
            }
            FSDataOutputStream output = outputFs.create(checkSumIndexFile);
            outputFs.setPermission(checkSumIndexFile,
                    new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
            output.write(this.checkSumDigestIndex.getCheckSum());
            output.close();

            if (outputFs.exists(checkSumValueFile)) {
                outputFs.delete(checkSumValueFile);
            }
            output = outputFs.create(checkSumValueFile);
            outputFs.setPermission(checkSumValueFile,
                    new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
            output.write(this.checkSumDigestValue.getCheckSum());
            output.close();
        } else {
            throw new RuntimeException("Failed to open checksum digest for node " + nodeId + " ( partition - "
                    + this.partitionId + ", chunk - " + chunkId + " )");
        }
    }

    // Generate the final chunk files
    Path indexFile = new Path(nodeDir, fileNamePrefix + ".index");
    Path valueFile = new Path(nodeDir, fileNamePrefix + ".data");

    logger.info("Moving " + this.taskIndexFileName + " to " + indexFile);
    if (outputFs.exists(indexFile)) {
        outputFs.delete(indexFile);
    }
    outputFs.rename(taskIndexFileName, indexFile);

    logger.info("Moving " + this.taskValueFileName + " to " + valueFile);
    if (outputFs.exists(valueFile)) {
        outputFs.delete(valueFile);
    }
    outputFs.rename(this.taskValueFileName, valueFile);
}

From source file:voldemort.store.readonly.disk.HadoopStoreWriterPerBucket.java

License:Apache License

@Override
public void close() throws IOException {

    for (int chunkId = 0; chunkId < getNumChunks(); chunkId++) {
        this.indexFileStream[chunkId].close();
        this.valueFileStream[chunkId].close();
    }/* w ww.j a v a  2 s  .co  m*/

    if (this.nodeId == -1 || this.partitionId == -1) {
        // Issue 258 - No data was read in the reduce phase, do not create
        // any output
        return;
    }

    // If the replica type read was not valid, shout out
    if (getSaveKeys() && this.replicaType == -1) {
        throw new RuntimeException("Could not read the replica type correctly for node " + nodeId
                + " ( partition - " + this.partitionId + " )");
    }

    String fileNamePrefix = null;
    if (getSaveKeys()) {
        fileNamePrefix = new String(
                Integer.toString(this.partitionId) + "_" + Integer.toString(this.replicaType) + "_");
    } else {
        fileNamePrefix = new String(Integer.toString(this.partitionId) + "_");
    }

    // Initialize the node directory
    Path nodeDir = new Path(this.outputDir, "node-" + this.nodeId);

    // Create output directory, if it doesn't exist
    FileSystem outputFs = nodeDir.getFileSystem(this.conf);
    outputFs.mkdirs(nodeDir);
    outputFs.setPermission(nodeDir, new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
    logger.info("Setting permission to 755 for " + nodeDir);

    // Write the checksum and output files
    for (int chunkId = 0; chunkId < getNumChunks(); chunkId++) {

        String chunkFileName = fileNamePrefix + Integer.toString(chunkId);
        if (this.checkSumType != CheckSumType.NONE) {

            if (this.checkSumDigestIndex[chunkId] != null && this.checkSumDigestValue[chunkId] != null) {
                Path checkSumIndexFile = new Path(nodeDir, chunkFileName + ".index.checksum");
                Path checkSumValueFile = new Path(nodeDir, chunkFileName + ".data.checksum");

                if (outputFs.exists(checkSumIndexFile)) {
                    outputFs.delete(checkSumIndexFile);
                }
                FSDataOutputStream output = outputFs.create(checkSumIndexFile);
                outputFs.setPermission(checkSumIndexFile,
                        new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
                output.write(this.checkSumDigestIndex[chunkId].getCheckSum());
                output.close();

                if (outputFs.exists(checkSumValueFile)) {
                    outputFs.delete(checkSumValueFile);
                }
                output = outputFs.create(checkSumValueFile);
                outputFs.setPermission(checkSumValueFile,
                        new FsPermission(HadoopStoreBuilder.HADOOP_FILE_PERMISSION));
                output.write(this.checkSumDigestValue[chunkId].getCheckSum());
                output.close();
            } else {
                throw new RuntimeException("Failed to open checksum digest for node " + nodeId
                        + " ( partition - " + this.partitionId + ", chunk - " + chunkId + " )");
            }
        }

        // Generate the final chunk files
        Path indexFile = new Path(nodeDir, chunkFileName + ".index");
        Path valueFile = new Path(nodeDir, chunkFileName + ".data");

        logger.info("Moving " + this.taskIndexFileName[chunkId] + " to " + indexFile);
        if (outputFs.exists(indexFile)) {
            outputFs.delete(indexFile);
        }
        fs.rename(taskIndexFileName[chunkId], indexFile);

        logger.info("Moving " + this.taskValueFileName[chunkId] + " to " + valueFile);
        if (outputFs.exists(valueFile)) {
            outputFs.delete(valueFile);
        }
        fs.rename(this.taskValueFileName[chunkId], valueFile);

    }

}

From source file:voldemort.store.readonly.mr.HadoopStoreBuilder.java

License:Apache License

/**
 * Run the job//from  w ww  .j  a va2 s.c  o m
 */
public void build() {
    try {
        JobConf conf = new JobConf(config);
        conf.setInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE);
        conf.set("cluster.xml", new ClusterMapper().writeCluster(cluster));
        conf.set("stores.xml",
                new StoreDefinitionsMapper().writeStoreList(Collections.singletonList(storeDef)));
        conf.setBoolean("save.keys", saveKeys);
        conf.setBoolean("reducer.per.bucket", reducerPerBucket);
        if (!isAvro) {
            conf.setPartitionerClass(HadoopStoreBuilderPartitioner.class);
            conf.setMapperClass(mapperClass);
            conf.setMapOutputKeyClass(BytesWritable.class);
            conf.setMapOutputValueClass(BytesWritable.class);
            if (reducerPerBucket) {
                conf.setReducerClass(HadoopStoreBuilderReducerPerBucket.class);
            } else {
                conf.setReducerClass(HadoopStoreBuilderReducer.class);
            }
        }
        conf.setInputFormat(inputFormatClass);
        conf.setOutputFormat(SequenceFileOutputFormat.class);
        conf.setOutputKeyClass(BytesWritable.class);
        conf.setOutputValueClass(BytesWritable.class);
        conf.setJarByClass(getClass());
        conf.setReduceSpeculativeExecution(false);
        FileInputFormat.setInputPaths(conf, inputPath);
        conf.set("final.output.dir", outputDir.toString());
        conf.set("checksum.type", CheckSum.toString(checkSumType));
        FileOutputFormat.setOutputPath(conf, tempDir);

        FileSystem outputFs = outputDir.getFileSystem(conf);
        if (outputFs.exists(outputDir)) {
            throw new IOException("Final output directory already exists.");
        }

        // delete output dir if it already exists
        FileSystem tempFs = tempDir.getFileSystem(conf);
        tempFs.delete(tempDir, true);

        long size = sizeOfPath(tempFs, inputPath);
        logger.info("Data size = " + size + ", replication factor = " + storeDef.getReplicationFactor()
                + ", numNodes = " + cluster.getNumberOfNodes() + ", chunk size = " + chunkSizeBytes);

        // Derive "rough" number of chunks and reducers
        int numReducers;
        if (saveKeys) {

            if (this.numChunks == -1) {
                this.numChunks = Math.max((int) (storeDef.getReplicationFactor() * size
                        / cluster.getNumberOfPartitions() / storeDef.getReplicationFactor() / chunkSizeBytes),
                        1);
            } else {
                logger.info(
                        "Overriding chunk size byte and taking num chunks (" + this.numChunks + ") directly");
            }

            if (reducerPerBucket) {
                numReducers = cluster.getNumberOfPartitions() * storeDef.getReplicationFactor();
            } else {
                numReducers = cluster.getNumberOfPartitions() * storeDef.getReplicationFactor() * numChunks;
            }
        } else {

            if (this.numChunks == -1) {
                this.numChunks = Math.max((int) (storeDef.getReplicationFactor() * size
                        / cluster.getNumberOfPartitions() / chunkSizeBytes), 1);
            } else {
                logger.info(
                        "Overriding chunk size byte and taking num chunks (" + this.numChunks + ") directly");
            }

            if (reducerPerBucket) {
                numReducers = cluster.getNumberOfPartitions();
            } else {
                numReducers = cluster.getNumberOfPartitions() * numChunks;
            }
        }
        conf.setInt("num.chunks", numChunks);
        conf.setNumReduceTasks(numReducers);

        if (isAvro) {
            conf.setPartitionerClass(AvroStoreBuilderPartitioner.class);
            // conf.setMapperClass(mapperClass);
            conf.setMapOutputKeyClass(ByteBuffer.class);
            conf.setMapOutputValueClass(ByteBuffer.class);

            conf.setInputFormat(inputFormatClass);

            conf.setOutputFormat((Class<? extends OutputFormat>) AvroOutputFormat.class);
            conf.setOutputKeyClass(ByteBuffer.class);
            conf.setOutputValueClass(ByteBuffer.class);

            // AvroJob confs for the avro mapper
            AvroJob.setInputSchema(conf, Schema.parse(config.get("avro.rec.schema")));

            AvroJob.setOutputSchema(conf,
                    Pair.getPairSchema(Schema.create(Schema.Type.BYTES), Schema.create(Schema.Type.BYTES)));

            AvroJob.setMapperClass(conf, mapperClass);

            if (reducerPerBucket) {
                conf.setReducerClass(AvroStoreBuilderReducerPerBucket.class);
            } else {
                conf.setReducerClass(AvroStoreBuilderReducer.class);
            }

        }

        logger.info("Number of chunks: " + numChunks + ", number of reducers: " + numReducers + ", save keys: "
                + saveKeys + ", reducerPerBucket: " + reducerPerBucket);
        logger.info("Building store...");
        RunningJob job = JobClient.runJob(conf);

        // Once the job has completed log the counter
        Counters counters = job.getCounters();

        if (saveKeys) {
            if (reducerPerBucket) {
                logger.info("Number of collisions in the job - "
                        + counters.getCounter(KeyValueWriter.CollisionCounter.NUM_COLLISIONS));
                logger.info("Maximum number of collisions for one entry - "
                        + counters.getCounter(KeyValueWriter.CollisionCounter.MAX_COLLISIONS));
            } else {
                logger.info("Number of collisions in the job - "
                        + counters.getCounter(KeyValueWriter.CollisionCounter.NUM_COLLISIONS));
                logger.info("Maximum number of collisions for one entry - "
                        + counters.getCounter(KeyValueWriter.CollisionCounter.MAX_COLLISIONS));
            }
        }

        // Do a CheckSumOfCheckSum - Similar to HDFS
        CheckSum checkSumGenerator = CheckSum.getInstance(this.checkSumType);
        if (!this.checkSumType.equals(CheckSumType.NONE) && checkSumGenerator == null) {
            throw new VoldemortException("Could not generate checksum digest for type " + this.checkSumType);
        }

        // Check if all folder exists and with format file
        for (Node node : cluster.getNodes()) {

            ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata();

            if (saveKeys) {
                metadata.add(ReadOnlyStorageMetadata.FORMAT, ReadOnlyStorageFormat.READONLY_V2.getCode());
            } else {
                metadata.add(ReadOnlyStorageMetadata.FORMAT, ReadOnlyStorageFormat.READONLY_V1.getCode());
            }

            Path nodePath = new Path(outputDir.toString(), "node-" + node.getId());

            if (!outputFs.exists(nodePath)) {
                logger.info("No data generated for node " + node.getId() + ". Generating empty folder");
                outputFs.mkdirs(nodePath); // Create empty folder
                outputFs.setPermission(nodePath, new FsPermission(HADOOP_FILE_PERMISSION));
                logger.info("Setting permission to 755 for " + nodePath);
            }

            if (checkSumType != CheckSumType.NONE) {

                FileStatus[] storeFiles = outputFs.listStatus(nodePath, new PathFilter() {

                    public boolean accept(Path arg0) {
                        if (arg0.getName().endsWith("checksum") && !arg0.getName().startsWith(".")) {
                            return true;
                        }
                        return false;
                    }
                });

                if (storeFiles != null && storeFiles.length > 0) {
                    Arrays.sort(storeFiles, new IndexFileLastComparator());
                    FSDataInputStream input = null;

                    for (FileStatus file : storeFiles) {
                        try {
                            input = outputFs.open(file.getPath());
                            byte fileCheckSum[] = new byte[CheckSum.checkSumLength(this.checkSumType)];
                            input.read(fileCheckSum);
                            logger.debug("Checksum for file " + file.toString() + " - "
                                    + new String(Hex.encodeHex(fileCheckSum)));
                            checkSumGenerator.update(fileCheckSum);
                        } catch (Exception e) {
                            logger.error("Error while reading checksum file " + e.getMessage(), e);
                        } finally {
                            if (input != null)
                                input.close();
                        }
                        outputFs.delete(file.getPath(), false);
                    }

                    metadata.add(ReadOnlyStorageMetadata.CHECKSUM_TYPE, CheckSum.toString(checkSumType));

                    String checkSum = new String(Hex.encodeHex(checkSumGenerator.getCheckSum()));
                    logger.info("Checksum for node " + node.getId() + " - " + checkSum);

                    metadata.add(ReadOnlyStorageMetadata.CHECKSUM, checkSum);
                }
            }

            // Write metadata
            Path metadataPath = new Path(nodePath, ".metadata");
            FSDataOutputStream metadataStream = outputFs.create(metadataPath);
            outputFs.setPermission(metadataPath, new FsPermission(HADOOP_FILE_PERMISSION));
            logger.info("Setting permission to 755 for " + metadataPath);
            metadataStream.write(metadata.toJsonString().getBytes());
            metadataStream.flush();
            metadataStream.close();

        }

    } catch (Exception e) {
        logger.error("Error in Store builder", e);
        throw new VoldemortException(e);
    }

}