Example usage for org.apache.hadoop.fs Path makeQualified

List of usage examples for org.apache.hadoop.fs Path makeQualified

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path makeQualified.

Prototype

@Deprecated
public Path makeQualified(FileSystem fs) 

Source Link

Document

Returns a qualified path object for the FileSystem 's working directory.

Usage

From source file:org.apache.nutch.admin.GuiConfigUtil.java

License:Apache License

/**
 * Push nutch-(default|site).xml from a given  folder/conf 
 * into a configuration. //from  w  w  w .j a va2s. c o m
 * 
 * @param configuration
 * @param folder
 */
private static void configure(Configuration configuration, Path folder) throws IOException {
    FileSystem fs = FileSystem.get(NutchConfiguration.create());
    Path confFolder = new Path(folder, "conf");

    if (fs.exists(confFolder)) {
        Path defaultConf = new Path(confFolder, "nutch-default.xml");
        if (fs.exists(defaultConf)) {
            configuration.addResource(defaultConf.makeQualified(fs));
        }

        Path siteConf = new Path(confFolder, "nutch-site.xml");
        if (fs.exists(siteConf)) {
            configuration.addResource(siteConf.makeQualified(fs));
        }
    }
}

From source file:org.apache.nutch.searcher.DistributedSearchBean.java

License:Apache License

public DistributedSearchBean(Configuration conf, Path luceneConfig, Path solrConfig) throws IOException {
    FileSystem fs = FileSystem.get(conf);

    this.timeout = conf.getLong("ipc.client.timeout", 60000);

    List<SearchBean> beanList = new ArrayList<SearchBean>();

    if (fs.exists(luceneConfig)) {
        LOG.info("Adding Nutch searchers in " + luceneConfig.makeQualified(fs).toUri());
        addLuceneBeans(beanList, luceneConfig, conf);
    }//w w  w . j  av a 2s . c  o m

    if (fs.exists(solrConfig)) {
        LOG.info("Adding Solr searchers in " + solrConfig.makeQualified(fs).toUri());
        addSolrBeans(beanList, solrConfig, conf);
    }
    LOG.info("Added " + beanList.size() + " remote searchers.");

    beans = beanList.toArray(new SearchBean[beanList.size()]);

    liveServers = new boolean[beans.length];
    for (int i = 0; i < liveServers.length; i++) {
        liveServers[i] = true;
    }

    searchTasks = new ArrayList<Callable<Hits>>();
    detailTasks = new ArrayList<Callable<HitDetails[]>>();
    pingWorkers = new ArrayList<PingWorker>();

    for (int i = 0; i < beans.length; i++) {
        searchTasks.add(new SearchTask(i));
        detailTasks.add(new DetailTask(i));
        pingWorkers.add(new PingWorker(i));
    }

    pingService = Executors.newScheduledThreadPool(beans.length);
    for (PingWorker worker : pingWorkers) {
        pingService.scheduleAtFixedRate(worker, 0, 10, TimeUnit.SECONDS);
    }

}

From source file:org.apache.nutch.searcher.LuceneSearchBean.java

License:Apache License

private void init(Path indexDir, Path indexesDir) throws IOException {
    Path absIndexDir = indexDir.makeQualified(indexDir.getFileSystem(conf));
    Path absIndexesDir = indexesDir.makeQualified(indexesDir.getFileSystem(conf));
    if (this.fs.exists(indexDir)) {
        LOG.info("opening merged index in " + absIndexDir.toUri());
        this.searcher = new IndexSearcher(indexDir, this.conf);
    } else {/*w  w w . j a v a2 s .c o  m*/
        if (!this.fs.exists(indexesDir)) {
            // should throw exception ?
            LOG.warn("Neither " + absIndexDir.toUri() + " nor " + absIndexesDir.toUri() + " found!");
        } else {
            LOG.info("opening indexes in " + absIndexesDir.toUri());
        }
        List<Path> vDirs = new ArrayList<Path>();
        FileStatus[] fstats = fs.listStatus(indexesDir, HadoopFSUtil.getPassDirectoriesFilter(fs));
        Path[] directories = HadoopFSUtil.getPaths(fstats);
        for (int i = 0; i < directories.length; i++) {
            Path indexdone = new Path(directories[i], Indexer.DONE_NAME);
            if (fs.isFile(indexdone)) {
                vDirs.add(directories[i]);
            }
        }

        directories = new Path[vDirs.size()];
        for (int i = 0; vDirs.size() > 0; i++) {
            directories[i] = vDirs.remove(0);
        }

        this.searcher = new IndexSearcher(directories, this.conf);
    }
}

From source file:org.apache.parquet.hadoop.ParquetFileWriter.java

License:Apache License

/**
 * writes _common_metadata file, and optionally a _metadata file depending on the {@link JobSummaryLevel} provided
 *//*from  w  ww  .  j a  v  a  2  s. c  o  m*/
public static void writeMetadataFile(Configuration configuration, Path outputPath, List<Footer> footers,
        JobSummaryLevel level) throws IOException {
    Preconditions.checkArgument(level == JobSummaryLevel.ALL || level == JobSummaryLevel.COMMON_ONLY,
            "Unsupported level: " + level);

    FileSystem fs = outputPath.getFileSystem(configuration);
    outputPath = outputPath.makeQualified(fs);
    ParquetMetadata metadataFooter = mergeFooters(outputPath, footers);

    if (level == JobSummaryLevel.ALL) {
        writeMetadataFile(outputPath, metadataFooter, fs, PARQUET_METADATA_FILE);
    }

    metadataFooter.getBlocks().clear();
    writeMetadataFile(outputPath, metadataFooter, fs, PARQUET_COMMON_METADATA_FILE);
}

From source file:org.apache.sqoop.accumulo.AccumuloUtil.java

License:Apache License

/**
 * Add the .jar elements of a directory to the DCache classpath, optionally
 * recursively.//  ww w  .  java  2 s.com
 */
private static void addDirToCache(File dir, FileSystem fs, Set<String> localUrls, boolean recursive) {
    if (dir != null) {
        File[] fileList = dir.listFiles();

        if (fileList != null) {
            for (File libFile : dir.listFiles()) {
                if (libFile.exists() && !libFile.isDirectory() && libFile.getName().endsWith("jar")) {
                    Path p = new Path(libFile.toString());
                    if (libFile.canRead()) {
                        String qualified = p.makeQualified(fs).toString();
                        LOG.info("Adding to job classpath: " + qualified);
                        localUrls.add(qualified);
                    } else {
                        LOG.warn("Ignoring unreadable file " + libFile);
                    }
                }
                if (recursive && libFile.isDirectory()) {
                    addDirToCache(libFile, fs, localUrls, recursive);
                }
            }
        } else {
            LOG.warn("No files under " + dir + " to add to distributed cache for Accumulo job");
        }
    }
}

From source file:org.apache.sqoop.io.LobReaderCache.java

License:Apache License

/**
 * Created a fully-qualified path object.
 * @param path the path to fully-qualify with its fs URI.
 * @param conf the current Hadoop FS configuration.
 * @return a new path representing the same location as the input 'path',
 * but with a fully-qualified URI.//from  ww w. j a  va2  s.  c o  m
 */
public static Path qualify(Path path, Configuration conf) throws IOException {
    if (null == path) {
        return null;
    }

    FileSystem fs = path.getFileSystem(conf);
    if (null == fs) {
        fs = FileSystem.get(conf);
    }
    return path.makeQualified(fs);
}

From source file:org.apache.sqoop.io.SplittingOutputStream.java

License:Apache License

/** Initialize the OutputStream to the next file to write to.
 *//*w  w w . j a  v  a  2s  . c  om*/
private void openNextFile() throws IOException {
    StringBuffer sb = new StringBuffer();
    Formatter fmt = new Formatter(sb);
    fmt.format("%05d", this.fileNum++);
    String filename = filePrefix + fmt.toString();
    if (codec != null) {
        filename = filename + codec.getDefaultExtension();
    }
    Path destFile = new Path(destDir, filename);
    FileSystem fs = destFile.getFileSystem(conf);
    LOG.debug("Opening next output file: " + destFile);
    if (fs.exists(destFile)) {
        Path canonicalDest = destFile.makeQualified(fs);
        throw new IOException("Destination file " + canonicalDest + " already exists");
    }

    OutputStream fsOut = fs.create(destFile);

    // Count how many actual bytes hit HDFS.
    this.countingFilterStream = new CountingOutputStream(fsOut);

    if (codec != null) {
        // Wrap that in a compressing stream.
        this.writeStream = codec.createOutputStream(this.countingFilterStream);
    } else {
        // Write to the counting stream directly.
        this.writeStream = this.countingFilterStream;
    }
}

From source file:org.apache.sqoop.mapreduce.ExportJobBase.java

License:Apache License

/**
 * @return the Path to the files we are going to export to the db.
 *///from w  w  w.j a v  a2 s. co  m
protected Path getInputPath() throws IOException {
    if (isHCatJob) {
        return null;
    }
    Path inputPath = new Path(context.getOptions().getExportDir());
    Configuration conf = options.getConf();
    inputPath = inputPath.makeQualified(FileSystem.get(conf));
    return inputPath;
}

From source file:org.apache.sqoop.mapreduce.hcat.SqoopHCatUtilities.java

License:Apache License

/**
 * Add the .jar elements of a directory to the DCache classpath, optionally
 * recursively.//from  www  .j  a  va 2 s  .  c o  m
 */
private static void addDirToCache(File dir, FileSystem fs, Set<String> localUrls, boolean recursive) {
    if (dir == null) {
        return;
    }

    File[] fileList = dir.listFiles();

    if (fileList == null) {
        LOG.warn("No files under " + dir + " to add to distributed cache for hcatalog job");
        return;
    }

    for (File libFile : dir.listFiles()) {
        if (libFile.exists() && !libFile.isDirectory() && libFile.getName().endsWith("jar")) {
            Path p = new Path(libFile.toString());
            if (libFile.canRead()) {
                String qualified = p.makeQualified(fs).toString();
                LOG.info("Adding to job classpath: " + qualified);
                localUrls.add(qualified);
            } else {
                LOG.warn("Ignoring unreadable file " + libFile);
            }
        }
        if (recursive && libFile.isDirectory()) {
            addDirToCache(libFile, fs, localUrls, recursive);
        }
    }
}

From source file:org.apache.sqoop.mapreduce.JobBase.java

License:Apache License

private void addToCache(String file, FileSystem fs, Set<String> localUrls) {
    if (null == file) {
        return;//from www  .j  a v a2 s  . co  m
    }

    Path p = new Path(file);
    String qualified = p.makeQualified(fs).toString();
    LOG.debug("Adding to job classpath: " + qualified);
    localUrls.add(qualified);
}