Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:com.jaeksoft.searchlib.crawler.cache.HadoopCrawlCache.java

License:Open Source License

private Path uriToPath(URI uri, String extension) throws UnsupportedEncodingException {
    String path = super.uriToPath(uri, PATH_HTTP_DOWNLOAD_CACHE, 10, Path.SEPARATOR, extension, 32);
    return new Path(path);
}

From source file:com.kadwa.hadoop.DistExec.java

License:Open Source License

/**
 * Make a path relative with respect to a root path.
 * absPath is always assumed to descend from root.
 * Otherwise returned path is null./*from w w  w.j  a v a 2 s .  c o m*/
 */
static String makeRelative(Path root, Path absPath) {
    if (!absPath.isAbsolute()) {
        throw new IllegalArgumentException("!absPath.isAbsolute(), absPath=" + absPath);
    }
    String p = absPath.toUri().getPath();

    StringTokenizer pathTokens = new StringTokenizer(p, "/");
    for (StringTokenizer rootTokens = new StringTokenizer(root.toUri().getPath(), "/"); rootTokens
            .hasMoreTokens();) {
        if (!rootTokens.nextToken().equals(pathTokens.nextToken())) {
            return null;
        }
    }
    StringBuilder sb = new StringBuilder();
    for (; pathTokens.hasMoreTokens();) {
        sb.append(pathTokens.nextToken());
        if (pathTokens.hasMoreTokens()) {
            sb.append(Path.SEPARATOR);
        }
    }
    return sb.length() == 0 ? "." : sb.toString();
}

From source file:com.mozilla.bagheera.hazelcast.persistence.HdfsMapStore.java

License:Apache License

public void init(HazelcastInstance hazelcastInstance, Properties properties, String mapName) {
    Configuration conf = new Configuration();
    for (String name : properties.stringPropertyNames()) {
        if (name.startsWith("hadoop.")) {
            conf.set(name, properties.getProperty(name));
        }/*from  w  w w  .  ja va2 s.  c  o m*/
    }

    String hdfsBaseDir = properties.getProperty("hazelcast.hdfs.basedir", "/bagheera");
    String dateFormat = properties.getProperty("hazelcast.hdfs.dateformat", "yyyy-MM-dd");
    sdf = new SimpleDateFormat(dateFormat);
    Calendar cal = Calendar.getInstance();
    if (!hdfsBaseDir.endsWith(Path.SEPARATOR)) {
        baseDir = new Path(hdfsBaseDir + Path.SEPARATOR + mapName + Path.SEPARATOR + sdf.format(cal.getTime()));
    } else {
        baseDir = new Path(hdfsBaseDir + mapName + Path.SEPARATOR + sdf.format(cal.getTime()));
    }

    maxFileSize = Integer.parseInt(properties.getProperty("hazelcast.hdfs.max.filesize", "0"));
    LOG.info("Using HDFS max file size: " + maxFileSize);
    previousRolloverMillis = System.currentTimeMillis();

    try {
        hdfs = FileSystem.get(conf);
        initWriter();
    } catch (IOException e) {
        LOG.error("Error initializing SequenceFile.Writer", e);
        throw new RuntimeException(e);
    }
}

From source file:com.mozilla.bagheera.sink.SequenceFileSink.java

License:Apache License

public SequenceFileSink(String namespace, String baseDirPath, String dateFormat, long maxFileSize,
        boolean useBytesValue, boolean addTimestamp) throws IOException {
    LOG.info("Initializing writer for namespace: " + namespace);
    conf = new Configuration();
    conf.setBoolean("fs.automatic.close", false);
    hdfs = FileSystem.newInstance(conf);
    this.useBytesValue = useBytesValue;
    this.maxFileSize = maxFileSize;
    this.addTimestamp = addTimestamp;
    sdf = new SimpleDateFormat(dateFormat);
    if (!baseDirPath.endsWith(Path.SEPARATOR)) {
        baseDir = new Path(baseDirPath + Path.SEPARATOR + namespace + Path.SEPARATOR
                + sdf.format(new Date(System.currentTimeMillis())));
    } else {/* ww  w . j  a  v  a2  s  .  c o  m*/
        baseDir = new Path(
                baseDirPath + namespace + Path.SEPARATOR + sdf.format(new Date(System.currentTimeMillis())));
    }
    initWriter();
    stored = Metrics.newMeter(new MetricName("bagheera", "sink.hdfs.", namespace + ".stored"), "messages",
            TimeUnit.SECONDS);
}

From source file:com.mozilla.hadoop.UnknownPathFinder.java

License:Apache License

/**
 * Get all of the filesystem paths that HBase .META. knows about
 * @param hbaseRootDir/*from w ww.  ja  va2s.c om*/
 * @param tableName
 * @return
 * @throws IOException
 */
public static Set<String> getRegionPaths(Path hbaseRootDir, byte[] tableName) throws IOException {
    Set<String> pathSet = new HashSet<String>();

    Scan s = new Scan();
    int i = 0;
    HTable t = null;
    ResultScanner scanner = null;
    try {
        t = new HTable(tableName);
        scanner = t.getScanner(s);
        Result result = null;
        while ((result = scanner.next()) != null) {
            byte[] familyQualifierBytes = result.getValue(HConstants.CATALOG_FAMILY,
                    HConstants.REGIONINFO_QUALIFIER);
            HRegionInfo hri = Writables.getHRegionInfo(familyQualifierBytes);
            HTableDescriptor htd = hri.getTableDesc();
            Path p = HTableDescriptor.getTableDir(hbaseRootDir, htd.getName());
            pathSet.add(p.toString() + Path.SEPARATOR + hri.getEncodedName());
            i++;
        }
        LOG.info("# of Known Directories in .META.: " + i);
    } finally {
        if (scanner != null) {
            scanner.close();
        }
        if (t != null) {
            try {
                t.close();
            } catch (IOException e) {
                LOG.error("Failed to close table!", e);
            }
        }
    }

    return pathSet;
}

From source file:com.netflix.bdp.inviso.history.impl.BucketedHistoryLocator.java

License:Apache License

/**
 * Returns the config and history locations
  * @param jobId/*www  . j  a v a 2  s  .com*/
  * @return 
 */
@Override
public Pair<Path, Path> locate(String jobId) {
    String bucket = jobId.substring(jobId.length() - conf.getInt("inviso.history.bucket.depth", 3));

    Path originConfig = new Path(bucketedPath + Path.SEPARATOR + bucket, jobId + configPostfix);
    Path originHistory = new Path(bucketedPath + Path.SEPARATOR + bucket, jobId + historyPostfix);

    return new ImmutablePair<>(originConfig, originHistory);
}

From source file:com.practicalHadoop.outputformat.MultpleDirectories.FileOutputCommitter.java

License:Apache License

/**
 * Create a file output committer//from   w w w  .  j a  v  a 2  s .  c o  m
 * @param outputPath the job's output path
 * @param context the task's context
 * @throws IOException
 */
public FileOutputCommitter(Path outputPath, TaskAttemptContext context) throws IOException {
    super(outputPath, context);
    Job job = new Job(context.getConfiguration());
    String outputDirectories = job.getConfiguration().get(MULTIPLE_OUTPUTS, "");
    if (outputDirectories != null) {
        StringTokenizer st = new StringTokenizer(outputDirectories, " ");
        while (st.hasMoreTokens()) {
            pathNames.add(st.nextToken());
        }
    }
    if (outputPath != null) {
        this.outputPath = outputPath;
        outputFileSystem = outputPath.getFileSystem(context.getConfiguration());
        workPath = new Path(outputPath, (FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR + "_"
                + context.getTaskAttemptID().toString())).makeQualified(outputFileSystem);
        for (String p : pathNames) {
            if (outputPath.toString().endsWith(p)) {
                committers.put(p, this);
                fake = false;
                break;
            }
        }
    }
}

From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java

License:Apache License

/**
 * Utility method for creating a symlink and warning on errors.
 *
 * If link is null, does nothing./*from   w  w w  .j ava2s .c om*/
 */
private void symlink(File workDir, String target, String link) throws IOException {
    if (link != null) {
        link = workDir.toString() + Path.SEPARATOR + link;
        File flink = new File(link);

        //CODE CHANGE FROM ORIGINAL FILE, BUG FIX:
        //
        //If the cleanup of the previous job failed for some reason, we can have lingering symlink,
        //pointing to the obsolete file (in that case flink.exists() == true) or to non-existant
        //file(flink.exists() == false). In the second case, the original code tried to create symlink
        //anyway causing "already exists" error. In the first case, this method used to do nothing
        //without logging it, which effectively left the old symlink in place, leading to
        //elusive bugs.
        //
        //Changes:
        //1.Try delete symlink, and log if there was a symlink to delete (it means something wrong with cleanup)
        //2. Remove the if(!flink.exist()) check before creating symlink.
        if (flink.delete()) {
            LOG.warn(String.format("Symlink already existed, deleting: %s <- %s", target, link));
        }

        LOG.info(String.format("Creating symlink: %s <- %s", target, link));
        if (0 != FileUtil.symLink(target, link)) {
            LOG.warn(String.format("Failed to create symlink: %s <- %s", target, link));
        } else {
            symlinksCreated.add(new File(link));
        }

    }
}

From source file:com.thinkbiganalytics.kylo.catalog.file.DefaultCatalogFileManager.java

License:Apache License

@Override
public void deleteUpload(@Nonnull final DataSet dataSet, @Nonnull final String fileName) throws IOException {
    final Path path = getUploadPath(dataSet, fileName);
    if (!isolatedFunction(dataSet, path, fs -> fs.delete(path, false))) {
        log.info("Delete unsuccessful for path: {}", path);
        throw new IOException("Failed to delete: " + dataSet.getId() + Path.SEPARATOR + fileName);
    }//w w w .j a  va 2  s.c o  m
}

From source file:com.thinkbiganalytics.kylo.catalog.file.PathValidator.java

License:Apache License

/**
 * Determines if the specified path is allowed for the specified data set and data source.
 *//*  w  w  w . j a  v  a  2 s.  c o  m*/
private boolean isPathAllowed(@Nonnull final Path path, @Nullable final String dataSetId,
        @Nonnull final DataSource dataSource) {
    final Optional<List<String>> dataSourcePaths = DataSourceUtil.getPaths(dataSource);
    if (dataSourcePaths.isPresent()) {
        final Stream<String> allowedPaths = dataSourcePaths.get().stream();
        final String pluginId = dataSource.getConnector().getPluginId();
        final Optional<ConnectorPlugin> plugin = this.pluginManager.getPlugin(pluginId);

        if (plugin.isPresent()) {
            if (ConnectorUtil.hasAnyTabSref(plugin.get().getDescriptor(), fileSystemSrefs)) {
                return isPathAllowed(path.toUri(), toURIs(allowedPaths));
            }
            if (dataSetId != null && ConnectorUtil.hasAnyTabSref(plugin.get().getDescriptor(), uploadSrefs)) {
                final Stream<String> uploadPaths = allowedPaths
                        .map(allowedPath -> allowedPath.endsWith(Path.SEPARATOR) ? allowedPath
                                : allowedPath + Path.SEPARATOR)
                        .map(allowedPath -> allowedPath + dataSetId + Path.SEPARATOR);
                return isPathAllowed(path.toUri(), toURIs(uploadPaths));
            }
        } else {
            return false;
        }
    }
    return true;
}