Example usage for org.apache.hadoop.fs.permission FsPermission getDefault

List of usage examples for org.apache.hadoop.fs.permission FsPermission getDefault

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsPermission getDefault.

Prototype

public static FsPermission getDefault() 

Source Link

Document

Get the default permission for directory and symlink.

Usage

From source file:audr.text.utils.FileUtils.java

License:Open Source License

/**
 * create index dirs for each text category
 * @param root// ww w.  jav a  2s .c o  m
 * @throws IOException 
 */
public static void makeIndexDirs() throws IOException {
    FileSystem fs = FileSystem.get(new Configuration());
    for (int i = 0; i < TextCategoryFields.TEXT_CATEGOTIES_ENUM.length; ++i) {
        String oriDir = Constants.INPUT_PATH.replace("%Category%", TextCategoryFields.TEXT_CATEGOTIES_ENUM[i]);
        String lfDir = Constants.INPUT_PATH_LF.replace("%Category%",
                TextCategoryFields.TEXT_CATEGOTIES_ENUM[i]);
        FileSystem.mkdirs(fs, new Path(oriDir), FsPermission.getDefault());
        FileSystem.mkdirs(fs, new Path(lfDir), FsPermission.getDefault());

        for (int j = 0; j < Constants.INDEX_SHARDS.length; ++j) {
            String indexDir = Constants.INDEX_SHARDS[j].replace("%Category%",
                    TextCategoryFields.TEXT_CATEGOTIES_ENUM[i]);
            FileSystem.mkdirs(fs, new Path(indexDir), FsPermission.getDefault());
        }
    }
}

From source file:com.baifendian.swordfish.common.hadoop.HdfsClient.java

License:Apache License

/**
 * 
 *
 * @param dir
 * @throws HdfsException
 */
public void mkdir(String dir) throws HdfsException {
    mkdir(dir, FsPermission.getDefault());
}

From source file:com.btoddb.chronicle.apps.AvroTools.java

License:Open Source License

private void testFileAndFix(Path inFile) throws IOException {
    FileContext context = FileContext.getFileContext(hdfsConfig);
    AvroFSInput input = new AvroFSInput(context, inFile);

    ReflectDatumReader<Object> reader = new ReflectDatumReader<>();
    FileReader<Object> fileReader = DataFileReader.openReader(input, reader);

    Path outFile = inFile.suffix(".fixing");
    FSDataOutputStream output = FileSystem.create(outFile.getFileSystem(hdfsConfig), outFile,
            FsPermission.getDefault());
    DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>());
    writer.setCodec(CodecFactory.snappyCodec());

    boolean corrupted = false;
    long count = 0;

    try {//from ww  w  . j av  a2s.c o m
        Schema schema = fileReader.getSchema();
        writer.create(schema, output);

        for (;;) {
            try {
                if (fileReader.hasNext()) {
                    Object obj = fileReader.next();
                    count++;
                    writer.append(obj);
                } else {
                    break;
                }
            } catch (AvroRuntimeException e) {
                corrupted = true;
                System.out.println("  - file pointer = " + input.tell());
                if (e.getCause() instanceof EOFException) {
                    System.out.println("  - EOF occurred so we're done : " + e.getMessage());
                    break;
                } else if (e.getCause() instanceof IOException) {
                    System.out.println("  - will try to 'next' past the error : " + e.getMessage());
                    try {
                        fileReader.next();
                        System.out.println("  - 'next' worked - didn't really expect it to, but great!");
                    } catch (Exception e2) {
                        System.out.println("  - 'next' did not work - will continue on and see what happens : "
                                + e2.getMessage());
                    }
                    continue;
                }
                break;
            } catch (Exception e) {
                corrupted = true;
                System.out.println("  - file pointer = " + input.tell());
                e.printStackTrace();
                break;
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        System.out.println(("  - processed " + count + " records"));
        if (null != fileReader) {
            try {
                fileReader.close();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
        if (null != writer) {
            try {
                writer.close();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    if (!corrupted) {
        outFile.getFileSystem(hdfsConfig).delete(outFile, false);
    } else {
        outFile.getFileSystem(hdfsConfig).rename(outFile, inFile.suffix(".fixed"));
    }
}

From source file:com.cloudera.hoop.fs.FSUtils.java

License:Open Source License

/**
 * Converts a Unix permission symbolic representation
 * (i.e. -rwxr--r--) into a Hadoop permission.
 *
 * @param str Unix permission symbolic representation.
 * @return the Hadoop permission. If the given string was
 * 'default', it returns <code>FsPermission.getDefault()</code>.
 *///from  w w  w. j a va 2s  .c o  m
public static FsPermission getPermission(String str) {
    FsPermission permission;
    if (str.equals(DEFAULT_PERMISSION)) {
        permission = FsPermission.getDefault();
    } else {
        //TODO: there is something funky here, it does not detect 'x'
        permission = FsPermission.valueOf(str);
    }
    return permission;
}

From source file:com.ebay.erl.mobius.core.fs.MobiusLocalFileSystem.java

License:Apache License

public boolean mkdirs(Path f) throws IOException {
    return mkdirs(f, FsPermission.getDefault());
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemTestBase.java

License:Open Source License

/**
 * We override certain methods in FileSystem simply to provide debug tracing. (Search for
 * "Overridden functions for debug tracing" in GoogleHadoopFileSystemBase.java).
 * We do not add or update any functionality for such methods. The following
 * tests simply exercise that path to ensure coverage. Consequently, they do not
 * really test any functionality./*from   w  w  w . j  a va  2  s .  c om*/
 *
 * Having coverage for these methods lets us easily determine the amount of
 * coverage that is missing in the rest of the code.
 */
@Test
public void provideCoverageForUnmodifiedMethods() throws IOException {
    // -------------------------------------------------------
    // Create test data.

    // Temporary file in GHFS.
    URI tempFileUri = GoogleCloudStorageFileSystemIntegrationTest.getTempFilePath();
    Path tempFilePath = ghfsHelper.castAsHadoopPath(tempFileUri);
    Path tempDirPath = tempFilePath.getParent();
    String text = "Hello World!";
    ghfsHelper.writeFile(tempFilePath, text, 1, false);

    // Another temporary file in GHFS.
    URI tempFileUri2 = GoogleCloudStorageFileSystemIntegrationTest.getTempFilePath();
    Path tempFilePath2 = ghfsHelper.castAsHadoopPath(tempFileUri2);

    // Temporary file in local FS.
    File localTempFile = File.createTempFile("ghfs-test-", null);
    Path localTempFilePath = new Path(localTempFile.getPath());
    Path localTempDirPath = localTempFilePath.getParent();

    // -------------------------------------------------------
    // Call methods to provide coverage for. Note that we do not attempt to
    // test their functionality as we are not testing Hadoop engine here.
    try {
        ghfs.deleteOnExit(tempFilePath);
        ghfs.getContentSummary(tempFilePath);
        ghfs.getDelegationToken("foo");
        ghfs.copyFromLocalFile(false, true, localTempFilePath, tempDirPath);
        ghfs.copyFromLocalFile(false, true, new Path[] { localTempFilePath }, tempDirPath);
        localTempFile.delete();
        ghfs.copyToLocalFile(true, tempFilePath, localTempDirPath);
        File localCopiedFile = new File(localTempDirPath.toString(), tempFilePath.getName());
        localCopiedFile.delete();
        Path localOutputPath = ghfs.startLocalOutput(tempFilePath2, localTempFilePath);
        FileWriter writer = new FileWriter(localOutputPath.toString());
        writer.write(text);
        writer.close();
        ghfs.completeLocalOutput(tempFilePath2, localOutputPath);
        ghfs.getUsed();
        ghfs.setVerifyChecksum(false);
        ghfs.getFileChecksum(tempFilePath2);
        ghfs.setPermission(tempFilePath2, FsPermission.getDefault());
        try {
            ghfs.setOwner(tempFilePath2, "foo-user", "foo-group");
        } catch (IOException ioe) {
            // Some filesystems (like the LocalFileSystem) are strict about existence of owners.
            // TODO(user): Abstract out the behaviors around owners/permissions and properly test
            // the different behaviors between different filesystems.
        }
        ghfs.setTimes(tempFilePath2, 0, 0);
    } finally {
        // We do not need to separately delete the temp files created in GHFS because
        // we delete all test buckets recursively at the end of the tests.
        if (localTempFile.exists()) {
            localTempFile.delete();
        }
    }
}

From source file:com.google.cloud.hadoop.fs.gcs.HadoopFileSystemIntegrationHelper.java

License:Open Source License

/**
 * Writes a file with the given buffer repeated numWrites times.
 *
 * @param hadoopPath Path of the file to create.
 * @param buffer Data to write.//from  w  w  w.  j av a  2  s  .  c o m
 * @param numWrites Number of times to repeat the data.
 * @param overwrite If true, overwrite any existing file.
 * @return Number of bytes written.
 */
public int writeFile(Path hadoopPath, ByteBuffer buffer, int numWrites, boolean overwrite) throws IOException {
    int numBytesWritten = -1;
    int totalBytesWritten = 0;

    long fileSystemBytesWritten = 0;
    FileSystem.Statistics stats = FileSystem.getStatistics(ghfsFileSystemDescriptor.getScheme(),
            ghfs.getClass());
    if (stats != null) {
        // Let it be null in case no stats have been added for our scheme yet.
        fileSystemBytesWritten = stats.getBytesWritten();
    }
    FSDataOutputStream writeStream = null;
    boolean allWritesSucceeded = false;

    try {
        writeStream = ghfs.create(hadoopPath, FsPermission.getDefault(), overwrite,
                GoogleHadoopFileSystemBase.BUFFERSIZE_DEFAULT,
                GoogleHadoopFileSystemBase.REPLICATION_FACTOR_DEFAULT,
                GoogleHadoopFileSystemBase.BLOCK_SIZE_DEFAULT, null); // progressable

        for (int i = 0; i < numWrites; i++) {
            buffer.clear();
            writeStream.write(buffer.array(), 0, buffer.capacity());
            numBytesWritten = buffer.capacity();
            totalBytesWritten += numBytesWritten;
        }
        allWritesSucceeded = true;
    } finally {
        if (writeStream != null) {
            try {
                writeStream.close();
            } catch (IOException e) {
                // Ignore IO exceptions while closing if write failed otherwise the
                // exception that caused the write to fail gets superseded.
                // On the other hand, if all writes succeeded then we need to know about the exception
                // that was thrown during closing.
                if (allWritesSucceeded) {
                    throw e;
                }
            }
        }
    }

    // After the write, the stats better be non-null for our ghfs scheme.
    stats = FileSystem.getStatistics(ghfsFileSystemDescriptor.getScheme(), ghfs.getClass());
    Assert.assertNotNull(stats);
    long endFileSystemBytesWritten = stats.getBytesWritten();
    int bytesWrittenStats = (int) (endFileSystemBytesWritten - fileSystemBytesWritten);
    if (statistics == FileSystemStatistics.EXACT) {
        Assert.assertEquals(String.format("FS statistics mismatch fetched from class '%s'", ghfs.getClass()),
                totalBytesWritten, bytesWrittenStats);
    } else if (statistics == FileSystemStatistics.GREATER_OR_EQUAL) {
        Assert.assertTrue(String.format("Expected %d <= %d", totalBytesWritten, bytesWrittenStats),
                totalBytesWritten <= bytesWrittenStats);
    } else if (statistics == FileSystemStatistics.NONE) {
        // Do not perform any check because stats are either not maintained or are erratic.
    } else if (statistics == FileSystemStatistics.IGNORE) {
        // NO-OP
    }

    return totalBytesWritten;
}

From source file:com.ikanow.aleph2.storage_service_hdfs.services.TestMockHdfsStorageSystem.java

License:Apache License

/**
 * @param storage_service/*from  ww w . j  ava 2 s.co m*/
 * @param bucket
 * @param extra_suffixes - start with $ to indicate a new secondary buffer, else is a normal suffix 
 */
protected void setup_bucket(MockHdfsStorageService storage_service, final DataBucketBean bucket,
        List<String> extra_suffixes) {
    final FileContext dfs = storage_service.getUnderlyingPlatformDriver(FileContext.class, Optional.empty())
            .get();

    final String bucket_root = storage_service.getBucketRootPath() + "/" + bucket.full_name();

    //(first delete root path)
    try {
        dfs.delete(new Path(bucket_root), true);
    } catch (Exception e) {
    }

    Stream.concat(
            Arrays.asList("/managed_bucket", "/managed_bucket/logs", "/managed_bucket/logs/harvest",
                    "/managed_bucket/logs/enrichment", "/managed_bucket/logs/storage", "/managed_bucket/assets",
                    "/managed_bucket/import", "/managed_bucket/import/stored",
                    "/managed_bucket/import/stored/raw/current", "/managed_bucket/import/stored/json/current",
                    "/managed_bucket/import/stored/processed/current",
                    "/managed_bucket/import/transient/current", "/managed_bucket/import/ready",
                    "/managed_bucket/import/temp").stream(),
            extra_suffixes.stream()
                    .flatMap(
                            s -> s.startsWith("$")
                                    ? Stream.of("/managed_bucket/import/stored/raw/" + s.substring(1),
                                            "/managed_bucket/import/stored/json/" + s.substring(1),
                                            "/managed_bucket/import/stored/processed/" + s.substring(1),
                                            "/managed_bucket/import/transient/" + s.substring(1))
                                    : Stream.of(s)))
            .map(s -> new Path(bucket_root + s))
            .forEach(Lambdas.wrap_consumer_u(p -> dfs.mkdir(p, FsPermission.getDefault(), true)));
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyCommitter.java

License:Apache License

@Test
public void testDeleteMissing() {
    TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
    JobContext jobContext = Mockito.mock(JobContext.class);
    Mockito.when(jobContext.getConfiguration()).thenReturn(config);
    JobID jobID = new JobID();
    Mockito.when(jobContext.getJobID()).thenReturn(jobID);
    Configuration conf = jobContext.getConfiguration();

    String sourceBase;//ww w.j  a  v  a 2  s.  c  om
    String targetBase;
    FileSystem fs = null;
    try {
        OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
        fs = FileSystem.get(conf);
        sourceBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
        targetBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
        String targetBaseAdd = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
        fs.rename(new Path(targetBaseAdd), new Path(targetBase));

        DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out"));
        options.setSyncFolder(true);
        options.setDeleteMissing(true);
        options.appendToConf(conf);

        CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
        Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
        listing.buildListing(listingFile, options);

        conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
        conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase);

        committer.commitJob(jobContext);
        if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
            Assert.fail("Source and target folders are not in sync");
        }
        if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
            Assert.fail("Source and target folders are not in sync");
        }

        //Test for idempotent commit
        committer.commitJob(jobContext);
        if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
            Assert.fail("Source and target folders are not in sync");
        }
        if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
            Assert.fail("Source and target folders are not in sync");
        }
    } catch (Throwable e) {
        LOG.error("Exception encountered while testing for delete missing", e);
        Assert.fail("Delete missing failure");
    } finally {
        TestDistCpUtils.delete(fs, "/tmp1");
    }

}

From source file:com.inmobi.conduit.distcp.tools.util.TestDistCpUtils.java

License:Apache License

public static String createTestSetup(FileSystem fs) throws IOException {
    return createTestSetup("/tmp1", fs, FsPermission.getDefault());
}