Example usage for org.apache.hadoop.fs Path toUri

List of usage examples for org.apache.hadoop.fs Path toUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path toUri.

Prototype

public URI toUri() 

Source Link

Document

Convert this Path to a URI.

Usage

From source file:com.quantcast.qfs.hadoop.QuantcastFileSystem.java

License:Apache License

public boolean setReplication(Path path, short replication) throws IOException {

    Path absolute = makeAbsolute(path);
    String srep = absolute.toUri().getPath();

    int res = qfsImpl.setReplication(srep, replication);
    return res >= 0;
}

From source file:com.qubole.streamx.s3.S3Storage.java

License:Apache License

private void renameFile(String sourcePath, String targetPath) throws IOException {
    if (sourcePath.equals(targetPath)) {
        return;/*from   ww  w. ja v a  2  s .c o  m*/
    }
    final Path srcPath = new Path(sourcePath);
    final Path dstPath = new Path(targetPath);
    FileSystem localFs = FileSystem.get(srcPath.toUri(), hadoopConf);
    fs.rename(srcPath, dstPath);
}

From source file:com.quest.orahive.Utilities.java

License:Apache License

public static String getOraHiveJarFile() {

    Path jarFilePath = new Path(Utilities.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    String result = jarFilePath.toUri().getPath();
    if (!result.endsWith(".jar"))
        result = result + Path.SEPARATOR_CHAR + Constants.ORAHIVE_JAR_FILENAME;
    return result;
}

From source file:com.quixey.hadoop.fs.oss.CloudOSSFileSystemStoreTest.java

License:Apache License

@Test
public void testMultipartListing() throws Exception {
    final int count = 50;

    conf.setInt(OSS_MAX_LISTING_LENGTH_PROPERTY, 2);
    fs.initialize(URI.create(conf.get(TEST_URI_PROPERTY)), conf);

    Path path = fs.makeQualified(new Path("list"));
    String base = path.toUri().getPath().substring(1);

    for (int i = 0; i < count; i++) {
        store.storeEmptyFile(base + "/x-" + i);
    }//from w w  w.j a  va 2  s .  c  o  m

    // check count
    FileStatus[] files = fs.listStatus(path);
    assertEquals(count, files.length);

    // collect all names, check
    TreeSet<String> names = new TreeSet<>();
    for (FileStatus file : files)
        names.add(file.getPath().getName());
    for (int i = 0; i < count; i++)
        assertTrue(names.contains("x-" + i));
}

From source file:com.quixey.hadoop.fs.oss.FileSystemUtils.java

License:Apache License

static String pathToKey(Path path) {
    return path.toUri().getPath().substring(1);
}

From source file:com.quixey.hadoop.fs.oss.OSSFileSystem.java

License:Apache License

/**
 * Constructs a OSS key from given {@code path}.
 *
 * If a path has a trailing slash, it's removed.
 *
 * @param path absolute HDFS path//from w w w  .j a  v  a  2 s .c  o  m
 * @return OSS key
 */
private static String pathToKey(Path path) {
    if (null != path.toUri().getScheme() && path.toUri().getPath().isEmpty()) {
        // allow uris without trailing slash after bucket to refer to root,
        // like oss://mybucket
        return "";
    }

    checkArgument(path.isAbsolute(), "Path must be absolute: " + path);

    String ret = path.toUri().getPath().substring(1); // remove initial slash
    if (ret.endsWith("/") && ret.indexOf("/") != ret.length() - 1) {
        ret = ret.substring(0, ret.length() - 1);
    }

    return ret;
}

From source file:com.quixey.hadoop.fs.oss.OSSFileSystem.java

License:Apache License

@Override
@Nonnull//  ww  w .j  a v  a 2s . com
public FileStatus[] listStatus(Path path) throws IOException {
    path = checkNotNull(path);

    Path absolutePath = makeAbsolute(path);
    String key = pathToKey(absolutePath);

    if (key.length() > 0) {
        FileMetadata meta = store.retrieveMetadata(key);
        // if metadata exists, return it
        if (null != meta) {
            return new FileStatus[] { newFile(meta, absolutePath) };
        }
    }

    // treat path as a directory, and collect its children
    URI pathUri = absolutePath.toUri();
    Set<FileStatus> status = new TreeSet<>();
    String marker = null;
    do {
        PartialListing listing = store.list(key, maxListingLength, marker, false);

        for (FileMetadata fileMetadata : listing.getFiles()) {
            Path subpath = keyToPath(fileMetadata.getKey());
            String relativePath = pathUri.relativize(subpath.toUri()).getPath();

            if (fileMetadata.getKey().equals(key + "/")) {
                // this is just the directory we have been asked to list
                LOG.trace("..");
            } else if (relativePath.endsWith(FOLDER_SUFFIX)) {
                status.add(newDirectory(new Path(absolutePath,
                        relativePath.substring(0, relativePath.indexOf(FOLDER_SUFFIX)))));
            } else {
                status.add(newFile(fileMetadata, subpath));
            }
        }

        for (String commonPrefix : listing.getCommonPrefixes()) {
            Path subpath = keyToPath(commonPrefix);
            String relativePath = pathUri.relativize(subpath.toUri()).getPath();
            status.add(newDirectory(new Path(absolutePath, relativePath)));
        }

        // keep paging through the listing
        marker = listing.getMarker();
    } while (null != marker);

    if (status.isEmpty() && key.length() > 0 && null == store.retrieveMetadata(key + FOLDER_SUFFIX)) {
        throw new FileNotFoundException("File " + path + " does not exist.");
    }

    return status.toArray(new FileStatus[status.size()]);
}

From source file:com.rapleaf.ramhdfs.RamFileSystem.java

License:Apache License

private static Path makeAbsolute(Path f) {
    if (f.isAbsolute()) {
        return new Path("ram:" + f.toUri().getSchemeSpecificPart());
    } else {/*  ww  w . ja v  a  2s.com*/
        return new Path(workingDir, f);
    }
}

From source file:com.ricemap.spateDB.core.RTreeGridRecordWriter.java

License:Apache License

/**
 * Closes a cell by writing all outstanding objects and closing current file.
 * Then, the file is read again, an RTree is built on top of it and, finally,
 * the file is written again with the RTree built.
 *///from ww  w.j  a  va  2 s . co m
@Override
protected Path flushAllEntries(Path intermediateCellPath, OutputStream intermediateCellStream,
        Path finalCellPath) throws IOException {
    // Close stream to current intermediate file.
    intermediateCellStream.close();

    // Read all data of the written file in memory
    byte[] cellData = new byte[(int) new File(intermediateCellPath.toUri().getPath()).length()];
    InputStream cellIn = new FileInputStream(intermediateCellPath.toUri().getPath());
    cellIn.read(cellData);
    cellIn.close();

    // Build an RTree over the elements read from file
    RTree<S> rtree = new RTree<S>();
    rtree.setStockObject(stockObject);
    // It should create a new stream
    DataOutputStream cellStream = (DataOutputStream) createFinalCellStream(finalCellPath);
    cellStream.writeLong(SpatialSite.RTreeFileMarker);
    int degree = 4096 / RTree.NodeSize;
    rtree.bulkLoadWrite(cellData, 0, cellData.length, degree, cellStream, fastRTree, columnarStorage);
    cellStream.close();
    cellData = null; // To allow GC to collect it

    return finalCellPath;
}

From source file:com.ricemap.spateDB.core.SpatialSite.java

License:Apache License

public static void setCells(JobConf job, CellInfo[] cellsInfo) throws IOException {
    Path tempFile;
    FileSystem fs = FileSystem.get(job);
    do {//from  w  ww.ja  v a  2  s .  c  o  m
        tempFile = new Path(job.getJobName() + "_" + (int) (Math.random() * 1000000) + ".cells");
    } while (fs.exists(tempFile));
    FSDataOutputStream out = fs.create(tempFile);
    out.writeInt(cellsInfo.length);
    for (CellInfo cell : cellsInfo) {
        cell.write(out);
    }
    out.close();

    fs.deleteOnExit(tempFile);

    DistributedCache.addCacheFile(tempFile.toUri(), job);
    job.set(OUTPUT_CELLS, tempFile.getName());
    LOG.info("Partitioning file into " + cellsInfo.length + " cells");
}