Example usage for org.apache.hadoop.fs FileSystem getConf

List of usage examples for org.apache.hadoop.fs FileSystem getConf

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getConf.

Prototype

@Override
    public Configuration getConf() 

Source Link

Usage

From source file:org.apache.oozie.action.hadoop.FsActionExecutor.java

License:Apache License

/**
 * Delete path//from  ww  w  .  j  a  v  a 2s. com
 *
 * @param context
 * @param fsConf
 * @param nameNodePath
 * @param path
 * @throws ActionExecutorException
 */
public void delete(Context context, XConfiguration fsConf, Path nameNodePath, Path path, boolean skipTrash)
        throws ActionExecutorException {
    URI uri = path.toUri();
    URIHandler handler;
    try {
        handler = Services.get().get(URIHandlerService.class).getURIHandler(uri);
        if (handler instanceof FSURIHandler) {
            // Use legacy code to handle hdfs partition deletion
            path = resolveToFullPath(nameNodePath, path, true);
            final FileSystem fs = getFileSystemFor(path, context, fsConf);
            Path[] pathArr = FileUtil.stat2Paths(fs.globStatus(path));
            if (pathArr != null && pathArr.length > 0) {
                checkGlobMax(pathArr);
                for (final Path p : pathArr) {
                    if (fs.exists(p)) {
                        if (!skipTrash) {
                            // Moving directory/file to trash of user.
                            UserGroupInformationService ugiService = Services.get()
                                    .get(UserGroupInformationService.class);
                            UserGroupInformation ugi = ugiService
                                    .getProxyUser(fs.getConf().get(OozieClient.USER_NAME));
                            ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
                                @Override
                                public FileSystem run() throws Exception {
                                    Trash trash = new Trash(fs.getConf());
                                    if (!trash.moveToTrash(p)) {
                                        throw new ActionExecutorException(
                                                ActionExecutorException.ErrorType.ERROR, "FS005",
                                                "Could not move path [{0}] to trash on delete", p);
                                    }
                                    return null;
                                }
                            });
                        } else if (!fs.delete(p, true)) {
                            throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "FS005",
                                    "delete, path [{0}] could not delete path", p);
                        }
                    }
                }
            }
        } else {
            handler.delete(uri, handler.getContext(uri, fsConf, context.getWorkflow().getUser(), false));
        }
    } catch (Exception ex) {
        throw convertException(ex);
    }
}

From source file:org.apache.oozie.action.hadoop.TestPipesMain.java

License:Apache License

public Void call() throws Exception {

    Path programPath = new Path(getFsTestCaseDir(), "wordcount-simple");

    FileSystem fs = getFileSystem();

    InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream("wordcount-simple");
    if (is != null) {
        OutputStream os = fs.create(programPath);
        IOUtils.copyStream(is, os);/*from   w  ww .j  a v a  2s .c o  m*/

        Path inputDir = new Path(getFsTestCaseDir(), "input");
        fs.mkdirs(inputDir);
        Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
        writer.write("hello");
        writer.close();

        Path outputDir = new Path(getFsTestCaseDir(), "output");

        XConfiguration jobConf = new XConfiguration();
        XConfiguration.copy(createJobConf(), jobConf);

        jobConf.set("user.name", getTestUser());

        jobConf.setInt("mapred.map.tasks", 1);
        jobConf.setInt("mapred.map.max.attempts", 1);
        jobConf.setInt("mapred.reduce.max.attempts", 1);

        jobConf.set("mapred.input.dir", inputDir.toString());
        jobConf.set("mapred.output.dir", outputDir.toString());

        jobConf.set("oozie.pipes.program", programPath.toUri().getPath());
        jobConf.setBoolean("hadoop.pipes.java.recordreader", true);

        DistributedCache.addCacheFile(new URI(programPath.toUri().getPath()), fs.getConf());

        File actionXml = new File(getTestCaseDir(), "action.xml");
        os = new FileOutputStream(actionXml);
        jobConf.writeXml(os);
        os.close();

        File newId = new File(getTestCaseDir(), "newId");

        System.setProperty("oozie.action.conf.xml", actionXml.getAbsolutePath());
        System.setProperty("oozie.action.newId", newId.getAbsolutePath());

        String[] args = {};

        PipesMain.main(args);

        assertTrue(newId.exists());

        is = new FileInputStream(newId);
        Properties props = new Properties();
        props.load(is);
        is.close();

        assertTrue(props.containsKey("id"));
    } else {
        System.out.println(
                "SKIPPING TEST: TestPipesMain, binary 'wordcount-simple' not available in the classpath");
    }
    return null;
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop2() throws Exception {
    if (dfsCluster != null && dfsCluster2 == null) {
        // Trick dfs location for MiniDFSCluster since it doesn't accept location as input)
        String testBuildDataSaved = System.getProperty("test.build.data", "build/test/data");
        try {/*w  ww . java  2 s.  co  m*/
            System.setProperty("test.build.data", FilenameUtils.concat(testBuildDataSaved, "2"));
            // Only DFS cluster is created based upon current need
            dfsCluster2 = new MiniDFSCluster(createDFSConfig(), 2, true, null);
            FileSystem fileSystem = dfsCluster2.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            System.setProperty(OOZIE_TEST_NAME_NODE2, fileSystem.getConf().get("fs.default.name"));
        } catch (Exception ex) {
            shutdownMiniCluster2();
            throw ex;
        } finally {
            // Restore previus value
            System.setProperty("test.build.data", testBuildDataSaved);
        }
    }
}

From source file:org.apache.parquet.cli.util.Schemas.java

License:Apache License

public static Schema fromParquet(Configuration conf, URI location) throws IOException {
    Path path = new Path(location);
    FileSystem fs = path.getFileSystem(conf);

    ParquetMetadata footer = ParquetFileReader.readFooter(fs.getConf(), path);

    String schemaString = footer.getFileMetaData().getKeyValueMetaData().get("parquet.avro.schema");
    if (schemaString == null) {
        // try the older property
        schemaString = footer.getFileMetaData().getKeyValueMetaData().get("avro.schema");
    }//  www .  j a va2s  .co m

    if (schemaString != null) {
        return new Schema.Parser().parse(schemaString);
    } else {
        return new AvroSchemaConverter().convert(footer.getFileMetaData().getSchema());
    }
}

From source file:org.apache.sentry.core.common.utils.PolicyFiles.java

License:Apache License

/**
 * Save the specified Sentry configuration file to the desired location
 *
 * @param iniFile The Sentry configuration ini file to be saved
 * @param fileSystem The {@linkplain FileSystem} instance to be used
 * @param path The path on the {@linkplain FileSystem} where the configuration file should be stored.
 * @throws IOException in case of I/O errors
 *///from  w w  w.  j a  va  2 s. co  m
public static void writeToPath(Ini iniFile, FileSystem fileSystem, Path path) throws IOException {
    if (fileSystem.exists(path)) {
        throw new IllegalArgumentException("The specified path " + path + " already exist!");
    }

    List<String> sectionStrs = new ArrayList<>();
    for (String sectionName : PolicyFileConstants.SECTION_NAMES) {
        sectionStrs.add(toString(sectionName, iniFile.getSection(sectionName)));
    }

    String contents = Joiner.on(NL).join(sectionStrs.iterator());
    try (OutputStream out = fileSystem.create(path)) {
        ByteArrayInputStream in = new ByteArrayInputStream(contents.getBytes(StandardCharsets.UTF_8));
        IOUtils.copyBytes(in, out, fileSystem.getConf());
    }
}

From source file:org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory.java

License:Apache License

public static HiveServer create(HiveServer2Type type, Map<String, String> properties, File baseDir,
        File confDir, File logDir, String policyFile, FileSystem fileSystem) throws Exception {

    if (type.equals(HiveServer2Type.UnmanagedHiveServer2)) {
        LOGGER.info("Creating UnmanagedHiveServer");
        return new UnmanagedHiveServer();
    }/*w  w w  .  j  ava  2 s.c  om*/
    if (!properties.containsKey(WAREHOUSE_DIR)) {
        LOGGER.info("fileSystem " + fileSystem.getClass().getSimpleName());
        if (fileSystem instanceof DistributedFileSystem) {
            @SuppressWarnings("static-access")
            String dfsUri = fileSystem.getDefaultUri(fileSystem.getConf()).toString();
            LOGGER.info("dfsUri " + dfsUri);
            properties.put(WAREHOUSE_DIR, dfsUri + "/data");
            fileSystem.mkdirs(new Path("/data/"), new FsPermission((short) 0777));
        } else {
            properties.put(WAREHOUSE_DIR, new File(baseDir, "warehouse").getPath());
            fileSystem.mkdirs(new Path("/", "warehouse"), new FsPermission((short) 0777));
        }
    }
    Boolean policyOnHDFS = Boolean.valueOf(System.getProperty("sentry.e2etest.policyonhdfs", "false"));
    if (policyOnHDFS) {
        // Initialize "hive.exec.scratchdir", according the description of
        // "hive.exec.scratchdir", the permission should be (733).
        // <description>HDFS root scratch dir for Hive jobs which gets created with write
        // all (733) permission. For each connecting user, an HDFS scratch dir:
        // ${hive.exec.scratchdir}/&lt;username&gt; is created,
        // with ${hive.scratch.dir.permission}.</description>
        fileSystem.mkdirs(new Path("/tmp/hive/"));
        fileSystem.setPermission(new Path("/tmp/hive/"), new FsPermission((short) 0733));
    } else {
        LOGGER.info("Setting an readable path to hive.exec.scratchdir");
        properties.put("hive.exec.scratchdir", new File(baseDir, "scratchdir").getPath());
    }
    if (!properties.containsKey(METASTORE_CONNECTION_URL)) {
        properties.put(METASTORE_CONNECTION_URL,
                String.format("jdbc:derby:;databaseName=%s;create=true;createDatabaseIfNotExist=true",
                        new File(baseDir, "metastore").getPath()));
        properties.put("datanucleus.schema.autoCreateTables", "true");
    }
    if (!properties.containsKey(ACCESS_TESTING_MODE)) {
        properties.put(ACCESS_TESTING_MODE, "true");
    }
    if (!properties.containsKey(AUTHZ_PROVIDER_RESOURCE)) {
        LOGGER.info("Policy File location: " + policyFile);
        properties.put(AUTHZ_PROVIDER_RESOURCE, policyFile);
    }
    if (!properties.containsKey(AUTHZ_PROVIDER)) {
        properties.put(AUTHZ_PROVIDER, LocalGroupResourceAuthorizationProvider.class.getName());
    }
    if (!properties.containsKey(AUTHZ_SERVER_NAME)) {
        properties.put(AUTHZ_SERVER_NAME, DEFAULT_AUTHZ_SERVER_NAME);
    }
    if (!properties.containsKey(HS2_PORT)) {
        properties.put(HS2_PORT, String.valueOf(findPort()));
    }
    if (!properties.containsKey(SUPPORT_CONCURRENCY)) {
        properties.put(SUPPORT_CONCURRENCY, "false");
    }
    if (!properties.containsKey(HADOOPBIN)) {
        properties.put(HADOOPBIN, "./target/test-classes/hadoop");
    }

    // Modify the test resource to have executable permission
    java.nio.file.Path hadoopPath = FileSystems.getDefault().getPath("target/test-classes", "hadoop");
    if (hadoopPath != null) {
        hadoopPath.toFile().setExecutable(true);
    }

    if (HiveServer2Type.InternalMetastore.equals(type)) {
        // The configuration sentry.metastore.service.users is for the user who
        // has all access to get the metadata.
        properties.put(METASTORE_BYPASS, "accessAllMetaUser");

        if (!properties.containsKey(METASTORE_URI)) {
            properties.put(METASTORE_URI, "thrift://localhost:" + String.valueOf(findPort()));
        }

        if (!properties.containsKey(METASTORE_HOOK)) {
            properties.put(METASTORE_HOOK, "org.apache.sentry.binding.metastore.MetastoreAuthzBinding");
        }
        properties.put(ConfVars.METASTORESERVERMINTHREADS.varname, "5");
    }

    properties.put(ConfVars.HIVE_AUTHORIZATION_ENABLED.varname, "true");
    properties.put(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, SentryHiveAuthorizerFactory.class.getName());

    // CBO has a bug on Hive 2.0.0 with VIEWS because ReadIdentity objects are sent without
    // parent information for partitioned columns
    properties.put(ConfVars.HIVE_CBO_ENABLED.varname, "false");

    // Hive 2.x set the following configuration to TRUE by default and it causes test issues on
    // Sentry because we're trying to change columns with different column types
    properties.put(ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES.varname, "false");

    // This configuration will avoid starting the HS2 WebUI that was causing test failures when
    // HS2 is configured for concurrency
    properties.put(ConfVars.HIVE_IN_TEST.varname, "true");

    // This configuration is used by SentryHiveAuthorizerFactory to change the client type
    // to HIVESERVER2 if we're using the authorization V2 in test mode.
    properties.put(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE.varname, "true");

    // Sets the hadoop temporary directory specified by the java.io.tmpdir (already set to the
    // maven build directory to avoid writing to the /tmp directly
    String hadoopTempDir = System.getProperty("java.io.tmpdir") + File.separator + "hadoop-tmp";
    properties.put("hadoop.tmp.dir", hadoopTempDir);

    // This configuration will avoid that the HMS fails if the metastore schema has not version
    // information. For some reason, HMS does not set a version initially on our tests.
    properties.put(ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "false");

    // Disable join cartesian checks to allow Sentry tests to pass
    properties.put(ConfVars.HIVE_STRICT_CHECKS_CARTESIAN.varname, "false");

    // Disable capability checks (these checks do not work when Hive is in testing mode)
    properties.put(ConfVars.METASTORE_CAPABILITY_CHECK.varname, "false");

    if (!properties.containsKey(METASTORE_BYPASS)) {
        properties.put(METASTORE_BYPASS, "hive,impala," + System.getProperty("user.name", ""));
    } else {
        String tempByPass = properties.get(METASTORE_BYPASS);
        tempByPass = "hive,impala," + System.getProperty("user.name", "") + "," + tempByPass;
        properties.put(METASTORE_BYPASS, tempByPass);
    }

    if (!properties.containsKey(HiveAuthzConf.AuthzConfVars.AUTHZ_SERVER_NAME.getVar())) {
        properties.put(HiveAuthzConf.AuthzConfVars.AUTHZ_SERVER_NAME.getVar(), "server1");
    }

    properties.put(METASTORE_SETUGI, "true");
    properties.put(METASTORE_CLIENT_TIMEOUT, "100");
    properties.put(ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true");

    properties.put(ConfVars.HIVESTATSAUTOGATHER.varname, "false");
    properties.put(ConfVars.HIVE_STATS_COLLECT_SCANCOLS.varname, "true");
    String hadoopBinPath = properties.get(HADOOPBIN);
    Assert.assertNotNull(hadoopBinPath, "Hadoop Bin");
    File hadoopBin = new File(hadoopBinPath);
    if (!hadoopBin.isFile()) {
        Assert.fail("Path to hadoop bin " + hadoopBin.getPath() + " is invalid. "
                + "Perhaps you missed the download-hadoop profile.");
    }

    /*
     * This hack, setting the hiveSiteURL field removes a previous hack involving
     * setting of system properties for each property. Although both are hacks,
     * I prefer this hack because once the system properties are set they can
     * affect later tests unless those tests clear them. This hack allows for
     * a clean switch to a new set of defaults when a new HiveConf object is created.
     */
    Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(null);
    HiveConf hiveConf = new HiveConf();
    HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml"));
    for (Map.Entry<String, String> entry : properties.entrySet()) {
        LOGGER.info(entry.getKey() + " => " + entry.getValue());
        hiveConf.set(entry.getKey(), entry.getValue());
        authzConf.set(entry.getKey(), entry.getValue());
    }
    File hiveSite = new File(confDir, "hive-site.xml");
    File accessSite = new File(confDir, HiveAuthzConf.AUTHZ_SITE_FILE);
    OutputStream out = new FileOutputStream(accessSite);
    authzConf.writeXml(out);
    out.close();
    // points hive-site.xml at access-site.xml
    hiveConf.set(HiveAuthzConf.HIVE_SENTRY_CONF_URL, "file:///" + accessSite.getPath());

    if (!properties.containsKey(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname)) {
        hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname,
                "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook");
    }
    hiveConf.set(HIVESERVER2_IMPERSONATION, "false");
    out = new FileOutputStream(hiveSite);
    hiveConf.writeXml(out);
    out.close();

    Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(hiveSite.toURI().toURL());

    switch (type) {
    case EmbeddedHiveServer2:
        LOGGER.info("Creating EmbeddedHiveServer");
        return new EmbeddedHiveServer();
    case InternalHiveServer2:
        LOGGER.info("Creating InternalHiveServer");
        return new InternalHiveServer(hiveConf);
    case InternalMetastore:
        LOGGER.info("Creating InternalMetastoreServer");
        return new InternalMetastoreServer(hiveConf);
    case ExternalHiveServer2:
        LOGGER.info("Creating ExternalHiveServer");
        return new ExternalHiveServer(hiveConf, confDir, logDir);
    default:
        throw new UnsupportedOperationException(type.name());
    }
}

From source file:org.apache.sentry.tests.e2e.hiveserver.HiveServerFactory.java

License:Apache License

private static HiveServer create(HiveServer2Type type, Map<String, String> properties, File baseDir,
        File confDir, File logDir, File policyFile, FileSystem fileSystem) throws Exception {
    if (!properties.containsKey(WAREHOUSE_DIR)) {
        LOGGER.error("fileSystem " + fileSystem.getClass().getSimpleName());
        if (fileSystem instanceof DistributedFileSystem) {
            @SuppressWarnings("static-access")
            String dfsUri = fileSystem.getDefaultUri(fileSystem.getConf()).toString();
            LOGGER.error("dfsUri " + dfsUri);
            properties.put(WAREHOUSE_DIR, dfsUri + "/data");
        } else {//w w w  . j a v  a  2s . c  o  m
            properties.put(WAREHOUSE_DIR, new File(baseDir, "warehouse").getPath());
        }
    }
    if (!properties.containsKey(METASTORE_CONNECTION_URL)) {
        properties.put(METASTORE_CONNECTION_URL, String.format("jdbc:derby:;databaseName=%s;create=true",
                new File(baseDir, "metastore").getPath()));
    }
    if (policyFile.exists()) {
        LOGGER.info("Policy file " + policyFile + " exists");
    } else {
        LOGGER.info("Creating policy file " + policyFile);
        FileOutputStream to = new FileOutputStream(policyFile);
        Resources.copy(Resources.getResource(AUTHZ_PROVIDER_FILENAME), to);
        to.close();
    }
    if (!properties.containsKey(ACCESS_TESTING_MODE)) {
        properties.put(ACCESS_TESTING_MODE, "true");
    }
    if (!properties.containsKey(AUTHZ_PROVIDER_RESOURCE)) {
        properties.put(AUTHZ_PROVIDER_RESOURCE, policyFile.getPath());
    }
    if (!properties.containsKey(AUTHZ_PROVIDER)) {
        properties.put(AUTHZ_PROVIDER, LocalGroupResourceAuthorizationProvider.class.getName());
    }
    if (!properties.containsKey(AUTHZ_SERVER_NAME)) {
        properties.put(AUTHZ_SERVER_NAME, DEFAULT_AUTHZ_SERVER_NAME);
    }
    if (!properties.containsKey(HS2_PORT)) {
        properties.put(HS2_PORT, String.valueOf(findPort()));
    }
    if (!properties.containsKey(SUPPORT_CONCURRENCY)) {
        properties.put(SUPPORT_CONCURRENCY, "false");
    }
    if (!properties.containsKey(HADOOPBIN)) {
        properties.put(HADOOPBIN, "./target/hadoop/bin/hadoop");
    }
    String hadoopBinPath = properties.get(HADOOPBIN);
    Assert.assertNotNull(hadoopBinPath, "Hadoop Bin");
    File hadoopBin = new File(hadoopBinPath);
    if (!hadoopBin.isFile()) {
        Assert.fail("Path to hadoop bin " + hadoopBin.getPath() + "is invalid. "
                + "Perhaps you missed the download-hadoop profile.");
    }
    /*
     * This hack, setting the hiveSiteURL field removes a previous hack involving
     * setting of system properties for each property. Although both are hacks,
     * I prefer this hack because once the system properties are set they can
     * affect later tests unless those tests clear them. This hack allows for
     * a clean switch to a new set of defaults when a new HiveConf object is created.
     */
    Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(null);
    HiveConf hiveConf = new HiveConf();
    HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml"));
    for (Map.Entry<String, String> entry : properties.entrySet()) {
        LOGGER.info(entry.getKey() + " => " + entry.getValue());
        hiveConf.set(entry.getKey(), entry.getValue());
        authzConf.set(entry.getKey(), entry.getValue());
    }
    File hiveSite = new File(confDir, "hive-site.xml");
    File accessSite = new File(confDir, HiveAuthzConf.AUTHZ_SITE_FILE);
    OutputStream out = new FileOutputStream(accessSite);
    authzConf.writeXml(out);
    out.close();
    // points hive-site.xml at access-site.xml
    hiveConf.set(HiveAuthzConf.HIVE_ACCESS_CONF_URL, accessSite.toURI().toURL().toExternalForm());
    if (!properties.containsKey(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname)) {
        hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname,
                "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook");
    }
    out = new FileOutputStream(hiveSite);
    hiveConf.writeXml(out);
    out.close();

    Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(hiveSite.toURI().toURL());

    switch (type) {
    case EmbeddedHiveServer2:
        LOGGER.info("Creating EmbeddedHiveServer");
        return new EmbeddedHiveServer();
    case InternalHiveServer2:
        LOGGER.info("Creating InternalHiveServer");
        return new InternalHiveServer(hiveConf);
    case ExternalHiveServer2:
        LOGGER.info("Creating ExternalHiveServer");
        return new ExternalHiveServer(hiveConf, confDir, logDir);
    case UnmanagedHiveServer2:
        LOGGER.info("Creating UnmanagedHiveServer");
        return new UnmanagedHiveServer(hiveConf);
    default:
        throw new UnsupportedOperationException(type.name());
    }
}

From source file:org.apache.slider.test.ContractTestUtils.java

License:Apache License

/**
 * Write a file.// w w  w  .j  a va 2  s  .c  o  m
 * Optional flags control
 * whether file overwrite operations should be enabled
 * @param fs filesystem
 * @param path path to write to
 * @param len length of data
 * @param overwrite should the create option allow overwrites?
 * @throws IOException IO problems
 */
public static void writeDataset(FileSystem fs, Path path, byte[] src, int len, int buffersize,
        boolean overwrite) throws IOException {
    assertTrue("Not enough data in source array to write " + len + " bytes", src.length >= len);
    FSDataOutputStream out = fs.create(path, overwrite, fs.getConf().getInt(IO_FILE_BUFFER_SIZE, 4096),
            (short) 1, buffersize);
    out.write(src, 0, len);
    out.close();
    assertFileHasLength(fs, path, len);
}

From source file:org.apache.slider.test.ContractTestUtils.java

License:Apache License

/**
 * Creates and reads a file with the given size. The test file is generated
 * according to a specific pattern so it can be easily verified even if it's
 * a multi-GB one./*  www  .ja v  a2s . com*/
 * During the read phase the incoming data stream is also checked against
 * this pattern.
 *
 * @param fs FileSystem
 * @param parent Test file parent dir path
 * @throws IOException
 *    thrown if an I/O error occurs while writing or reading the test file
 */
public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize) throws IOException {
    int testBufferSize = fs.getConf().getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE);
    int modulus = fs.getConf().getInt(IO_CHUNK_MODULUS_SIZE, DEFAULT_IO_CHUNK_MODULUS_SIZE);

    final String objectName = UUID.randomUUID().toString();
    final Path objectPath = new Path(parent, objectName);

    // Write test file in a specific pattern
    assertEquals(fileSize, generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
    assertPathExists(fs, "not created successful", objectPath);

    // Now read the same file back and verify its content
    try {
        verifyReceivedData(fs, objectPath, fileSize, testBufferSize, modulus);
    } finally {
        // Delete test file
        fs.delete(objectPath, false);
    }
}

From source file:org.apache.solr.store.hdfs.HdfsFileWriter.java

License:Apache License

public HdfsFileWriter(FileSystem fileSystem, Path path) throws IOException {
    LOG.debug("Creating writer on {}", path);
    this.path = path;

    Configuration conf = fileSystem.getConf();
    FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
    if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
        flags.add(CreateFlag.SYNC_BLOCK);
    }/*from  w  w w.  j a  va 2 s .  c o m*/
    outputStream = fileSystem.create(path, FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)),
            flags, fsDefaults.getFileBufferSize(), fsDefaults.getReplication(), fsDefaults.getBlockSize(),
            null);
}