Example usage for org.apache.hadoop.fs FileSystem setPermission

List of usage examples for org.apache.hadoop.fs FileSystem setPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setPermission.

Prototype

public void setPermission(Path p, FsPermission permission) throws IOException 

Source Link

Document

Set permission of a path.

Usage

From source file:com.cloudera.beeswax.Server.java

License:Apache License

/**
 * Hive won't work unless /tmp and /user/hive/warehouse are usable,
 * so we create them for the user.//w w w  . j  a  v  a2  s  .c om
 */
private static void createDirectoriesAsNecessary() {
    try {
        LOG.debug("Classpath: " + System.getProperty("java.class.path"));
        HiveConf conf = new HiveConf(Driver.class);
        FileSystem fs = FileSystem.get(conf);
        Path tmpDir = new Path("/tmp");
        Path metaDir = new Path(conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname));
        for (Path dir : new Path[] { tmpDir, metaDir }) {
            if (!fs.exists(dir)) {
                if (fs.mkdirs(dir)) {
                    fs.setPermission(dir, new FsPermission((short) 0777));
                    LOG.info("Created " + dir + " with world-writable permissions.");
                } else {
                    LOG.error("Could not create " + dir);
                }
            }
        }
    } catch (IOException e) {
        HiveConf conf = new HiveConf(Driver.class);
        LOG.error("Error while trying to check/create /tmp and warehouse directory "
                + conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname), e);
    }
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.attrs.ModeHandler.java

License:Apache License

@Override
public boolean set(NFS4Handler server, Session session, FileSystem fs, FileStatus fileStatus, StateID stateID,
        Mode attr) throws NFS4Exception, IOException {
    FsPermission perm = new FsPermission((short) attr.getMode());
    fs.setPermission(fileStatus.getPath(), perm);
    return true;//ww  w  . j  ava  2s  .  c  o  m
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testSetPermission() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);//w  ww  .ja v a 2 s. c  o  m
    os.close();
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);
}

From source file:com.cloudera.hoop.fs.FSSetPermission.java

License:Open Source License

/**
 * Executes the filesystem operation.//  w  ww.j a v  a 2  s  .c om
 *
 * @param fs filesystem instance to use.
 * @return void.
 * @throws IOException thrown if an IO error occured.
 */
@Override
public Void execute(FileSystem fs) throws IOException {
    FsPermission fsPermission = FSUtils.getPermission(permission);
    fs.setPermission(path, fsPermission);
    return null;
}

From source file:com.cloudera.recordbreaker.analyzer.DataQuery.java

License:Open Source License

String grabTable(DataDescriptor desc) throws SQLException, IOException {
    // Set up Hive table
    Path p = desc.getFilename();// ww  w .  j  a v a  2 s. c o m
    String tablename = tableCache.get(p);
    if (tablename == null) {
        tablename = "datatable" + Math.abs(r.nextInt());
        Statement stmt = hiveCon.createStatement();
        try {
            String creatTxt = desc.getHiveCreateTableStatement(tablename);
            LOG.info("Create: " + creatTxt);
            stmt.execute(creatTxt);
            tables.put(p, tablename);
        } finally {
            stmt.close();
        }

        // Copy avro version of data into secret location prior to Hive import
        FileSystem fs = FileSystem.get(conf);
        Path tmpTables = new Path(tmpTablesDir);
        if (!fs.exists(tmpTables)) {
            fs.mkdirs(tmpTables, new FsPermission("-rwxrwxrwx"));
        }
        Path secretDst = new Path(tmpTables, "r" + r.nextInt());
        LOG.info("Preparing Avro data at " + secretDst);
        desc.prepareAvroFile(fs, fs, secretDst, conf);
        fs.setPermission(secretDst, new FsPermission("-rwxrwxrwx"));

        // Import data
        stmt = hiveCon.createStatement();
        try {
            LOG.info("Import data into Hive: " + desc.getHiveImportDataStatement(tablename, secretDst));
            stmt.execute(desc.getHiveImportDataStatement(tablename, secretDst));
            isLoaded.add(p);
        } finally {
            stmt.close();
        }

        // Refresh impala metadata
        stmt = impalaCon.createStatement();
        try {
            try {
                LOG.info("Rebuilding Impala metadata...");
                stmt.execute("INVALIDATE METADATA");
            } catch (Exception iex) {
                LOG.info("Impala metadata rebuild failed: " + iex.toString());
            }
        } finally {
            stmt.close();
        }

        // Insert into table cache
        tableCache.put(p, tablename);
    }
    return tablename;
}

From source file:com.collective.celos.ci.testing.fixtures.deploy.hive.HiveTableDeployer.java

License:Apache License

private Path createTempHdfsFileForInsertion(FixTable fixTable, TestRun testRun) throws Exception {

    Path pathToParent = new Path(testRun.getHdfsPrefix(), ".hive");
    Path pathTo = new Path(pathToParent, UUID.randomUUID().toString());
    FileSystem fileSystem = testRun.getCiContext().getFileSystem();
    fileSystem.mkdirs(pathTo.getParent());
    FSDataOutputStream outputStream = fileSystem.create(pathTo);

    CSVWriter writer = new CSVWriter(new OutputStreamWriter(outputStream), '\t', CSVWriter.NO_QUOTE_CHARACTER);

    for (FixTable.FixRow fixRow : fixTable.getRows()) {
        List<String> rowData = Lists.newArrayList();
        for (String colName : fixTable.getColumnNames()) {
            rowData.add(fixRow.getCells().get(colName));
        }//w  w  w. j a  v  a 2s.c om
        String[] dataArray = rowData.toArray(new String[rowData.size()]);
        writer.writeNext(dataArray);
    }

    writer.close();

    fileSystem.setPermission(pathToParent, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    fileSystem.setPermission(pathTo, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    return pathTo;
}

From source file:com.datatorrent.stram.util.FSUtil.java

License:Apache License

/**
 * Copied from FileUtil to transfer ownership
 *
 * @param srcFS/*from   w  w  w.ja v  a 2 s.c om*/
 * @param srcStatus
 * @param dstFS
 * @param dst
 * @param deleteSource
 * @param overwrite
 * @param conf
 * @return
 * @throws IOException
 */
public static boolean copy(FileSystem srcFS, FileStatus srcStatus, FileSystem dstFS, Path dst,
        boolean deleteSource, boolean overwrite, Configuration conf) throws IOException {
    Path src = srcStatus.getPath();
    //dst = checkDest(src.getName(), dstFS, dst, overwrite);
    if (srcStatus.isDirectory()) {
        //checkDependencies(srcFS, src, dstFS, dst);
        if (!mkdirs(dstFS, dst)) {
            return false;
        }

        FileStatus contents[] = srcFS.listStatus(src);
        for (int i = 0; i < contents.length; i++) {
            copy(srcFS, contents[i], dstFS, new Path(dst, contents[i].getPath().getName()), deleteSource,
                    overwrite, conf);
        }
    } else {
        InputStream in = null;
        OutputStream out = null;
        try {
            in = srcFS.open(src);
            out = dstFS.create(dst, overwrite);
            org.apache.hadoop.io.IOUtils.copyBytes(in, out, conf, true);
        } catch (IOException e) {
            org.apache.hadoop.io.IOUtils.closeStream(out);
            org.apache.hadoop.io.IOUtils.closeStream(in);
            throw e;
        }
    }

    // TODO: change group and limit write to group
    if (srcStatus.isDirectory()) {
        dstFS.setPermission(dst, new FsPermission((short) 0777));
    } else {
        dstFS.setPermission(dst, new FsPermission((short) 0777)/*"ugo+w"*/);
    }
    //dstFS.setOwner(dst, null, srcStatus.getGroup());

    /*
        try {
          // transfer owner
          // DOES NOT WORK only super user can change file owner
          dstFS.setOwner(dst, srcStatus.getOwner(), srcStatus.getGroup());
        } catch (IOException e) {
          LOG.warn("Failed to change owner on {} to {}", dst, srcStatus.getOwner(), e);
          throw e;
        }
    */
    if (deleteSource) {
        return srcFS.delete(src, true);
    } else {
        return true;
    }

}

From source file:com.datatorrent.stram.util.FSUtil.java

License:Apache License

public static void setPermission(FileSystem fs, Path dst, FsPermission permission) throws IOException {
    FileStatus contents[] = fs.listStatus(dst);
    for (int i = 0; i < contents.length; i++) {
        fs.setPermission(contents[i].getPath(), permission);
    }/* www. j  a  v  a2  s .c  o  m*/
    fs.setPermission(dst, permission);
}

From source file:com.indeed.imhotep.builder.tsv.TsvConverter.java

License:Apache License

private static void makeWorldWritable(FileSystem fs, Path path) throws IOException {
    fs.setPermission(path, FsPermission.valueOf("-rwxrwxrwx"));
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

License:Apache License

private static void changeUserGroup(String user, String group) throws IOException {
    FileSystem fs = cluster.getFileSystem();
    FsPermission changedPermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    for (Path path : pathList)
        if (fs.isFile(path)) {
            fs.setOwner(path, user, group);
            fs.setPermission(path, changedPermission);
        }//  w w  w. j  a  v  a 2 s . c  o m
}