Example usage for org.apache.hadoop.fs FileSystem getConf

List of usage examples for org.apache.hadoop.fs FileSystem getConf

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getConf.

Prototype

@Override
    public Configuration getConf() 

Source Link

Usage

From source file:org.apache.spark.tez.utils.HadoopUtils.java

License:Apache License

/**
 * Will provision current classpath to YARN and return an array of 
 * {@link Path}s representing provisioned resources
 * If 'generate-jar' system property is set it will also generate the JAR for the current 
 * working directory (mainly used when executing from IDE)
 * // w  w w . ja  v  a2s. c  o  m
 * @return
 */
private static Path[] provisionClassPath(FileSystem fs, String applicationName, String[] classPathExclusions) {
    String genJarProperty = System.getProperty(TezConstants.GENERATE_JAR);
    boolean generateJar = genJarProperty != null && Boolean.parseBoolean(genJarProperty);
    List<Path> provisionedPaths = new ArrayList<Path>();
    List<File> generatedJars = new ArrayList<File>();

    boolean confFromHadoopConfDir = generateConfigJarFromHadoopConfDir(fs, applicationName, provisionedPaths,
            generatedJars);

    TezConfiguration tezConf = new TezConfiguration(fs.getConf());
    boolean provisionTez = true;
    if (tezConf.get("tez.lib.uris") != null) {
        provisionTez = false;
    }
    URL[] classpath = ((URLClassLoader) ClassLoader.getSystemClassLoader()).getURLs();
    for (URL classpathUrl : classpath) {
        File f = new File(classpathUrl.getFile());
        if (f.isDirectory()) {
            if (generateJar) {
                String jarFileName = ClassPathUtils.generateJarFileName("application");
                f = doGenerateJar(f, jarFileName, generatedJars, "application");
            } else if (f.getName().equals("conf") && !confFromHadoopConfDir) {
                String jarFileName = ClassPathUtils.generateJarFileName("conf_application");
                f = doGenerateJar(f, jarFileName, generatedJars, "configuration");
            } else {
                f = null;
            }
        }
        if (f != null) {
            if (f.getName().startsWith("tez-") && !provisionTez) {
                logger.info("Skipping provisioning of " + f.getName()
                        + " since Tez libraries are already provisioned");
                continue;
            }
            String destinationFilePath = applicationName + "/" + f.getName();
            Path provisionedPath = new Path(fs.getHomeDirectory(), destinationFilePath);
            if (shouldProvision(provisionedPath.getName(), classPathExclusions)) {
                provisioinResourceToFs(fs, new Path(f.getAbsolutePath()), provisionedPath);
                provisionedPaths.add(provisionedPath);
            }
        }

    }

    for (File generatedJar : generatedJars) {
        try {
            generatedJar.delete();
        } catch (Exception e) {
            logger.warn("Failed to delete generated jars", e);
        }
    }
    return provisionedPaths.toArray(new Path[] {});
}

From source file:org.apache.tajo.engine.planner.physical.BSTIndexScanExec.java

License:Apache License

public BSTIndexScanExec(TaskAttemptContext context, ScanNode scanNode, FileFragment fragment, Path fileName,
        Schema keySchema, TupleComparator comparator, Datum[] datum) throws IOException {
    super(context, scanNode.getInSchema(), scanNode.getOutSchema());
    this.scanNode = scanNode;
    this.qual = scanNode.getQual();
    this.datum = datum;

    this.fileScanner = StorageManager.getSeekableScanner(context.getConf(), scanNode.getTableDesc().getMeta(),
            scanNode.getInSchema(), fragment, outSchema);
    this.fileScanner.init();
    this.projector = new Projector(context, inSchema, outSchema, scanNode.getTargets());

    FileSystem fs = fileName.getFileSystem(context.getConf());
    this.reader = new BSTIndex(fs.getConf()).getIndexReader(fileName, keySchema, comparator);
    this.reader.open();
}

From source file:org.apache.tajo.storage.raw.TestDirectRawFile.java

License:Apache License

public TestDirectRawFile(boolean isLocal) throws IOException {
    FileSystem fs;
    if (isLocal) {
        fs = localFs;//from w  ww.jav a2s .com
    } else {
        fs = dfs;
    }

    this.tajoConf = new TajoConf(fs.getConf());
    this.testDir = getTestDir(fs, TEST_PATH);
}

From source file:org.apache.tajo.storage.TestFileSystems.java

License:Apache License

public TestFileSystems(FileSystem fs) throws IOException {
    this.fs = fs;
    this.conf = new TajoConf(fs.getConf());
    sm = (FileStorageManager) StorageManager.getFileStorageManager(conf);
    testDir = getTestDir(this.fs, TEST_PATH);
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public void copyToLocal(FileSystem srcFS, Path src, File dst) throws IOException {
    if (!srcFS.getFileStatus(src).isDir()) {
        File tmp = FileUtil.createLocalTempFile(dst.getAbsoluteFile(), COPYTOLOCAL_PREFIX, true);
        if (!FileUtil.copy(srcFS, src, tmp, false, srcFS.getConf())) {
            throw new IOException("Failed to copy " + src + " to " + dst);
        }/*from   w ww.  j av a2  s.com*/

        if (!tmp.renameTo(dst)) {
            throw new IOException(
                    "Failed to rename tmp file " + tmp + " to local destination \"" + dst + "\".");
        }
    } else {
        dst.mkdirs();
        for (FileStatus path : srcFS.listStatus(src)) {
            copyToLocal(srcFS, path.getPath(), new File(dst, path.getPath().getName()));
        }
    }
}

From source file:org.icgc.dcc.release.core.hadoop.FileGlobInputStream.java

License:Open Source License

private static InputStream createInputStream(FileSystem fileSystem, Path pathPattern, boolean compressed) {
    val inputStreams = Lists.<InputStream>newArrayList();

    try {/*  w  w  w. ja  va  2s  .c o m*/
        val factory = new CompressionCodecFactory(fileSystem.getConf());

        val paths = getPaths(fileSystem, pathPattern);
        for (val path : paths) {
            log.info("Creating input stream for '{}'", path);
            val inputStream = compressed ? createCompressedInputStream(fileSystem.getConf(), path)
                    : createDecodedInputStream(fileSystem, path, factory);

            inputStreams.add(inputStream);
        }
    } catch (IOException e) {
        throw new UncheckedIOException("Error reading: '" + pathPattern.toString() + "'", e);
    }

    return combineInputStreams(inputStreams);
}

From source file:org.icgc.dcc.submission.validation.key.KeyValidator.java

License:Open Source License

private static InputStream createInputStream(FileSystem fileSystem, Path path) {
    val factory = new CompressionCodecFactory(fileSystem.getConf());

    try {//ww  w .  ja  v a  2s  .c  om
        val codec = factory.getCodec(path);
        val baseInputStream = fileSystem.open(path);
        return codec == null ? baseInputStream : codec.createInputStream(fileSystem.open(path));
    } catch (IOException e) {
        throw new RuntimeException("Error reading: '" + path.toString() + "'", e);
    }
}

From source file:org.kitesdk.data.spi.filesystem.FileSystemMetadataProvider.java

License:Apache License

/**
 * Writes the contents of a {@code Descriptor} to files.
 *
 * @param fs                The {@link FileSystem} where data will be stored
 * @param metadataLocation  The directory {@link Path} where metadata files
 *                          will be located
 * @param name              The {@link Dataset} name
 * @param descriptor        The {@code Descriptor} contents to write
 *
 * @throws org.kitesdk.data.DatasetIOException
 *                          If the {@code metadataLocation} does not exist or
 *                          if any IOExceptions need to be propagated.
 *//*from  w  ww. ja v a  2 s.  c o m*/
@VisibleForTesting
static void writeDescriptor(FileSystem fs, Path metadataLocation, String name, DatasetDescriptor descriptor) {

    checkExists(fs, metadataLocation);

    // write the schema to the previous file location so
    // it can be read by earlier versions of Kite
    FSDataOutputStream outputStream = null;
    final Path schemaPath = new Path(metadataLocation, SCHEMA_FILE_NAME);
    boolean threw = true;
    try {
        outputStream = fs.create(schemaPath, true /* overwrite */ );
        outputStream.write(descriptor.getSchema().toString(true).getBytes(Charsets.UTF_8));
        outputStream.flush();
        threw = false;
    } catch (IOException e) {
        throw new DatasetIOException("Unable to save schema file: " + schemaPath + " for dataset: " + name, e);
    } finally {
        try {
            Closeables.close(outputStream, threw);
        } catch (IOException e) {
            throw new DatasetIOException("Cannot close", e);
        }
    }

    // use the SchemaManager for schema operations moving forward
    SchemaManager manager = SchemaManager.create(fs.getConf(),
            new Path(metadataLocation, SCHEMA_DIRECTORY_NAME));

    manager.writeSchema(descriptor.getSchema());

    Properties properties = new Properties();
    properties.setProperty(VERSION_FIELD_NAME, METADATA_VERSION);
    properties.setProperty(FORMAT_FIELD_NAME, descriptor.getFormat().getName());
    properties.setProperty(COMPRESSION_TYPE_FIELD_NAME, descriptor.getCompressionType().getName());

    final URI dataLocation = descriptor.getLocation();
    if (dataLocation != null) {
        properties.setProperty(LOCATION_FIELD_NAME, dataLocation.toString());
    }

    if (descriptor.isPartitioned()) {
        properties.setProperty(PARTITION_EXPRESSION_FIELD_NAME,
                Accessor.getDefault().toExpression(descriptor.getPartitionStrategy()));
    }

    // copy custom properties to the table
    for (String property : descriptor.listProperties()) {
        // no need to check the reserved list, those are not set on descriptors
        properties.setProperty(property, descriptor.getProperty(property));
    }

    final Path descriptorPath = new Path(metadataLocation, DESCRIPTOR_FILE_NAME);
    threw = true;
    try {
        outputStream = fs.create(descriptorPath, true /* overwrite */ );
        properties.store(outputStream, "Dataset descriptor for " + name);
        outputStream.flush();
        threw = false;
    } catch (IOException e) {
        throw new DatasetIOException(
                "Unable to save descriptor file: " + descriptorPath + " for dataset: " + name, e);
    } finally {
        try {
            Closeables.close(outputStream, threw);
        } catch (IOException e) {
            throw new DatasetIOException("Cannot close", e);
        }
    }
}

From source file:org.kitesdk.data.spi.filesystem.FileSystemWriter.java

License:Apache License

private FileSystemWriter(FileSystem fs, Path path, DatasetDescriptor descriptor) {
    Preconditions.checkNotNull(fs, "File system is not defined");
    Preconditions.checkNotNull(path, "Destination directory is not defined");
    Preconditions.checkNotNull(descriptor, "Descriptor is not defined");
    this.fs = fs;
    this.directory = path;
    this.descriptor = descriptor;
    this.conf = new Configuration(fs.getConf());
    this.state = ReaderWriterState.NEW;

    // copy file format settings from custom properties to the Configuration
    for (String prop : descriptor.listProperties()) {
        conf.set(prop, descriptor.getProperty(prop));
    }//from  w  ww  .ja v a2 s .  com
}

From source file:org.kitesdk.data.spi.filesystem.InputFormatReader.java

License:Apache License

public InputFormatReader(FileSystem fs, Path path, DatasetDescriptor descriptor) {
    this.fs = fs;
    this.path = path;
    this.descriptor = descriptor;
    this.state = ReaderWriterState.NEW;

    // set up the configuration from the descriptor properties
    this.conf = new Configuration(fs.getConf());
    for (String prop : descriptor.listProperties()) {
        conf.set(prop, descriptor.getProperty(prop));
    }//from   w w w .  j a va 2  s . c  om

    this.attemptContext = Hadoop.TaskAttemptContext.ctor.newInstance(conf, FAKE_ID);
}