Example usage for org.apache.hadoop.fs FileContext getFileContext

List of usage examples for org.apache.hadoop.fs FileContext getFileContext

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileContext getFileContext.

Prototype

public static FileContext getFileContext(final URI defaultFsUri, final Configuration aConf)
        throws UnsupportedFileSystemException 

Source Link

Document

Create a FileContext for specified default URI using the specified config.

Usage

From source file:co.cask.cdap.internal.app.runtime.batch.distributed.MapReduceContainerHelper.java

License:Apache License

/**
 * Gets the MapReduce framework URI based on the {@code mapreduce.application.framework.path} setting.
 *
 * @param hConf the job configuration/* w  w w. j  a  va2s  .c  o m*/
 * @return the framework URI or {@code null} if not present or if the URI in the config is invalid.
 */
@Nullable
public static URI getFrameworkURI(Configuration hConf) {
    String framework = hConf.get(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH);
    if (framework == null) {
        return null;
    }

    try {
        // Parse the path. It can contains '#' to represent the localized file name
        URI uri = new URI(framework);
        String linkName = uri.getFragment();

        // The following resolution logic is copied from JobSubmitter in MR.
        FileSystem fs = FileSystem.get(hConf);
        Path frameworkPath = fs.makeQualified(new Path(uri.getScheme(), uri.getAuthority(), uri.getPath()));
        FileContext fc = FileContext.getFileContext(frameworkPath.toUri(), hConf);
        frameworkPath = fc.resolvePath(frameworkPath);
        uri = frameworkPath.toUri();

        // If doesn't have localized name (in the URI fragment), then use the last part of the URI path as name
        if (linkName == null) {
            linkName = uri.getPath();
            int idx = linkName.lastIndexOf('/');
            if (idx >= 0) {
                linkName = linkName.substring(idx + 1);
            }
        }
        return new URI(uri.getScheme(), uri.getAuthority(), uri.getPath(), null, linkName);
    } catch (URISyntaxException e) {
        LOG.warn("Failed to parse {} as a URI. MapReduce framework path is not used. Check the setting for {}.",
                framework, MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
    } catch (IOException e) {
        LOG.warn("Failed to resolve {} URI. MapReduce framework path is not used. Check the setting for {}.",
                framework, MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
    }
    return null;
}

From source file:com.datatorrent.common.util.FSStorageAgent.java

License:Apache License

public FSStorageAgent(String path, Configuration conf) {
    this.path = path;
    try {/*from   w w w  .j a  v a2s.c  o m*/
        logger.debug("Initialize storage agent with {}.", path);
        Path lPath = new Path(path);
        URI pathUri = lPath.toUri();

        if (pathUri.getScheme() != null) {
            fileContext = FileContext.getFileContext(pathUri, conf == null ? new Configuration() : conf);
        } else {
            fileContext = FileContext.getFileContext(conf == null ? new Configuration() : conf);
        }
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
}

From source file:com.ikanow.aleph2.storage_service_hdfs.services.HdfsStorageService.java

License:Apache License

/** Internal version of getUnderlyingPlatform driver, ie input to cache
 * @param driver_class/*from   w w  w  .  j  a  v  a2s . com*/
 * @param driver_options
 * @return
 */
@SuppressWarnings("unchecked")
public <T> Optional<T> getUnderlyingPlatformDriver_internal(Class<T> driver_class,
        Optional<String> driver_options) {
    T driver = null;
    try {
        if (driver_class != null) {
            final Configuration config = getConfiguration();
            URI uri = driver_options.isPresent() ? new URI(driver_options.get()) : getUri(config);

            if (driver_class.isAssignableFrom(AbstractFileSystem.class)) {

                AbstractFileSystem fs = AbstractFileSystem.createFileSystem(uri, config);

                return (Optional<T>) Optional.of(fs);
            } else if (driver_class.isAssignableFrom(FileContext.class)) {
                FileContext fs = FileContext.getFileContext(AbstractFileSystem.createFileSystem(uri, config),
                        config);
                return (Optional<T>) Optional.of(fs);
            } else if (driver_class.isAssignableFrom(RawLocalFileSystem.class)) {
                return Optional.of(driver_class.newInstance());
            }
        } // !=null
    } catch (Exception e) {
        _logger.error("Caught Exception:", e);
    }
    return Optional.ofNullable(driver);
}

From source file:com.uber.hoodie.common.file.HoodieAppendLog.java

License:Apache License

/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem./* www.j  ava2s  .  co  m*/
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer createWriter(FileSystem fs, Configuration conf, Path name, Class keyClass, Class valClass,
        int bufferSize, short replication, long blockSize, boolean createParent,
        CompressionType compressionType, CompressionCodec codec, Metadata metadata) throws IOException {
    return createWriter(FileContext.getFileContext(fs.getUri(), conf), conf, name, keyClass, valClass,
            compressionType, codec, metadata, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
            CreateOpts.bufferSize(bufferSize),
            createParent ? CreateOpts.createParent() : CreateOpts.donotCreateParent(),
            CreateOpts.repFac(replication), CreateOpts.blockSize(blockSize));
}

From source file:de.tiqsolutions.hdfs.HadoopFileSystem.java

License:Apache License

HadoopFileSystem(HadoopFileSystemProvider provider, URI uri, Configuration configuration)
        throws UnsupportedFileSystemException {
    if (provider == null)
        throw new NullPointerException();
    this.provider = provider;
    fileContext = FileContext.getFileContext(uri, configuration);

}

From source file:org.apache.apex.malhar.lib.utils.FileContextUtils.java

License:Apache License

public static FileContext getFileContext(@NotNull Path path, @Nullable Configuration conf)
        throws UnsupportedFileSystemException {
    Preconditions.checkNotNull(path, "path");
    URI pathUri = path.toUri();//www  .j av  a 2s  . c o m

    if (pathUri.getScheme() != null) {
        return FileContext.getFileContext(pathUri, conf == null ? new Configuration() : conf);
    } else {
        return FileContext.getFileContext(conf == null ? new Configuration() : conf);
    }
}

From source file:org.apache.gobblin.data.management.copy.writer.FileAwareInputStreamDataWriter.java

License:Apache License

public FileAwareInputStreamDataWriter(State state, int numBranches, int branchId, String writerAttemptId)
        throws IOException {
    super(state);

    if (numBranches > 1) {
        throw new IOException("Distcp can only operate with one branch.");
    }/*  w  w  w.  j  a  v a 2 s  .c  o m*/

    if (!(state instanceof WorkUnitState)) {
        throw new RuntimeException(
                String.format("Distcp requires a %s on construction.", WorkUnitState.class.getSimpleName()));
    }
    this.state = (WorkUnitState) state;

    this.taskBroker = this.state.getTaskBroker();

    this.writerAttemptIdOptional = Optional.fromNullable(writerAttemptId);

    String uriStr = this.state.getProp(ForkOperatorUtils.getPropertyNameForBranch(
            ConfigurationKeys.WRITER_FILE_SYSTEM_URI, numBranches, branchId), ConfigurationKeys.LOCAL_FS_URI);

    Configuration conf = WriterUtils.getFsConfiguration(state);
    URI uri = URI.create(uriStr);
    this.fs = FileSystem.get(uri, conf);
    this.fileContext = FileContext.getFileContext(uri, conf);

    this.stagingDir = this.writerAttemptIdOptional.isPresent()
            ? WriterUtils.getWriterStagingDir(state, numBranches, branchId, this.writerAttemptIdOptional.get())
            : WriterUtils.getWriterStagingDir(state, numBranches, branchId);
    this.outputDir = getOutputDir(state);
    this.copyableDatasetMetadata = CopyableDatasetMetadata
            .deserialize(state.getProp(CopySource.SERIALIZED_COPYABLE_DATASET));
    this.recoveryHelper = new RecoveryHelper(this.fs, state);
    this.actualProcessedCopyableFile = Optional.absent();

    this.copySpeedMeter = getMetricContext().meter(GOBBLIN_COPY_BYTES_COPIED_METER);

    this.bufferSize = state.getPropAsInt(CopyConfiguration.BUFFER_SIZE, StreamCopier.DEFAULT_BUFFER_SIZE);
    this.encryptionConfig = EncryptionConfigParser.getConfigForBranch(EncryptionConfigParser.EntityType.WRITER,
            this.state, numBranches, branchId);

    this.checkFileSize = state.getPropAsBoolean(GOBBLIN_COPY_CHECK_FILESIZE,
            DEFAULT_GOBBLIN_COPY_CHECK_FILESIZE);
    boolean taskOverwriteOnCommit = state.getPropAsBoolean(GOBBLIN_COPY_TASK_OVERWRITE_ON_COMMIT,
            DEFAULT_GOBBLIN_COPY_TASK_OVERWRITE_ON_COMMIT);
    if (taskOverwriteOnCommit) {
        this.renameOptions = Options.Rename.OVERWRITE;
    } else {
        this.renameOptions = Options.Rename.NONE;
    }
}

From source file:org.elasticsearch.repositories.hdfs.HdfsBlobStoreContainerTests.java

License:Apache License

@SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)")
private FileContext createContext(URI uri) {
    // mirrors HdfsRepository.java behaviour
    Configuration cfg = new Configuration(true);
    cfg.setClassLoader(HdfsRepository.class.getClassLoader());
    cfg.reloadConfiguration();//from   ww  w  . ja v a  2  s . c om

    Constructor<?> ctor;
    Subject subject;

    try {
        Class<?> clazz = Class.forName("org.apache.hadoop.security.User");
        ctor = clazz.getConstructor(String.class);
        ctor.setAccessible(true);
    } catch (ClassNotFoundException | NoSuchMethodException e) {
        throw new RuntimeException(e);
    }

    try {
        Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name"));
        subject = new Subject(false, Collections.singleton(principal), Collections.emptySet(),
                Collections.emptySet());
    } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
        throw new RuntimeException(e);
    }

    // disable file system cache
    cfg.setBoolean("fs.hdfs.impl.disable.cache", true);

    // set file system to TestingFs to avoid a bunch of security
    // checks, similar to what is done in HdfsTests.java
    cfg.set("fs.AbstractFileSystem." + uri.getScheme() + ".impl", TestingFs.class.getName());

    // create the FileContext with our user
    return Subject.doAs(subject, new PrivilegedAction<FileContext>() {
        @Override
        public FileContext run() {
            try {
                TestingFs fs = (TestingFs) AbstractFileSystem.get(uri, cfg);
                return FileContext.getFileContext(fs, cfg);
            } catch (UnsupportedFileSystemException e) {
                throw new RuntimeException(e);
            }
        }
    });
}