Example usage for org.apache.hadoop.fs Path toUri

List of usage examples for org.apache.hadoop.fs Path toUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path toUri.

Prototype

public URI toUri() 

Source Link

Document

Convert this Path to a URI.

Usage

From source file:com.knewton.mapreduce.SSTableRecordReaderTest.java

License:Apache License

/**
 * Test a valid configuration of the SSTableColumnRecordReader.
 *///from w w w .jav  a 2  s  .  c  o  m
@Test
public void testInitializeColumnReader() throws Exception {
    Path inputPath = inputSplit.getPath();
    FileSystem remoteFS = FileSystem.get(inputPath.toUri(), conf);
    FileSystem localFS = FileSystem.getLocal(conf);
    TaskAttemptContext context = getTaskAttemptContext(true, true, false);
    ssTableColumnRecordReader.initialize(inputSplit, context);
    verify(ssTableColumnRecordReader).copyTablesToLocal(remoteFS, localFS, inputPath, context);
    ssTableColumnRecordReader.close();
}

From source file:com.knewton.mapreduce.SSTableRecordReaderTest.java

License:Apache License

/**
 * Test a valid configuration of the SSTableRowRecordReader.
 *///from w  ww .ja v  a  2 s  .  c om
@Test
public void testInitializeRowReader() throws Exception {
    Path inputPath = inputSplit.getPath();
    FileSystem remoteFS = FileSystem.get(inputPath.toUri(), conf);
    FileSystem localFS = FileSystem.getLocal(conf);
    TaskAttemptContext context = getTaskAttemptContext(true, true, true);
    ssTableRowRecordReader.initialize(inputSplit, context);
    verify(ssTableRowRecordReader).copyTablesToLocal(remoteFS, localFS, inputPath, context);
    ssTableRowRecordReader.close();
}

From source file:com.knewton.mapreduce.SSTableRecordReaderTest.java

License:Apache License

/**
 * Tests to make sure initialization doesn't fail when a sparse CF is specified
 *///w w w  .j  av a  2  s  .com
@Test
public void testInitialize() throws Exception {
    Path inputPath = inputSplit.getPath();
    FileSystem remoteFS = FileSystem.get(inputPath.toUri(), conf);
    FileSystem localFS = FileSystem.getLocal(conf);
    TaskAttemptContext context = getTaskAttemptContext(true, true, true);
    SSTableInputFormat.setIsSparse(false, job);
    ssTableColumnRecordReader.initialize(inputSplit, context);
    verify(ssTableColumnRecordReader).copyTablesToLocal(remoteFS, localFS, inputPath, context);
    ssTableColumnRecordReader.close();
}

From source file:com.knewton.mapreduce.SSTableRowRecordReaderTest.java

License:Apache License

/**
 * Make sure that the single row that was set can be read and returned
 *//* w ww.  j a v  a  2  s. c  o m*/
@Test
public void testNextKeyValue() throws Exception {
    Path inputPath = inputSplit.getPath();
    FileSystem remoteFS = FileSystem.get(inputPath.toUri(), conf);
    FileSystem localFS = FileSystem.getLocal(conf);
    TaskAttemptContext context = getTaskAttemptContext();
    ssTableRowRecordReader.initialize(inputSplit, context);
    verify(ssTableRowRecordReader).copyTablesToLocal(remoteFS, localFS, inputPath, context);

    assertEquals(0, ssTableRowRecordReader.getProgress(), 0);
    assertTrue(ssTableRowRecordReader.nextKeyValue());
    assertEquals(key.getKey(), ssTableRowRecordReader.getCurrentKey());
    assertEquals(row, ssTableRowRecordReader.getCurrentValue());

    assertEquals(1, ssTableRowRecordReader.getProgress(), 0);
    assertFalse(ssTableRowRecordReader.nextKeyValue());
    assertNull(ssTableRowRecordReader.getCurrentKey());
    assertNull(ssTableRowRecordReader.getCurrentValue());
}

From source file:com.liferay.hadoop.store.HDFSStore.java

License:Open Source License

@Override
public InputStream getFileAsStream(long companyId, long repositoryId, String fileName, String versionLabel)
        throws PortalException, SystemException {

    Path fullPath = HadoopManager.getFullVersionFilePath(companyId, repositoryId, fileName, versionLabel);

    try {//from w ww. ja  v  a2s.c o m
        FileSystem fileSystem = HadoopManager.getFileSystem();

        if (!fileSystem.exists(fullPath)) {
            throw new PortalException("File " + fullPath.toUri().toString() + " does not exist");
        }

        return fileSystem.open(fullPath);
    } catch (IOException ioe) {
        throw new SystemException(ioe);
    }
}

From source file:com.liferay.hadoop.store.HDFSStore.java

License:Open Source License

@Override
public long getFileSize(long companyId, long repositoryId, String fileName)
        throws PortalException, SystemException {

    Path fullPath = HadoopManager.getFullVersionFilePath(companyId, repositoryId, fileName, VERSION_DEFAULT);

    try {/* w  w w .  ja  va2  s  .  c  o  m*/
        FileSystem fileSystem = HadoopManager.getFileSystem();

        if (!fileSystem.exists(fullPath)) {
            throw new PortalException("File " + fullPath.toUri().toString() + " does not exist");
        }

        FileStatus fileStatus = fileSystem.getFileStatus(fullPath);

        return fileStatus.getLen();
    } catch (IOException ioe) {
        throw new SystemException(ioe);
    }
}

From source file:com.liferay.hadoop.store.HDFSStore.java

License:Open Source License

@Override
public void updateFile(long companyId, long repositoryId, long newRepositoryId, String fileName)
        throws PortalException, SystemException {

    Path sourcePath = HadoopManager.getFullVersionFilePath(companyId, repositoryId, fileName, VERSION_DEFAULT);
    Path targetPath = HadoopManager.getFullVersionFilePath(companyId, newRepositoryId, fileName,
            VERSION_DEFAULT);/*w  w  w  .j  a v  a  2 s .  c  o m*/

    try {
        FileSystem fileSystem = HadoopManager.getFileSystem();

        if (fileSystem.exists(targetPath)) {
            throw new DuplicateFileException(fileName);
        }

        if (!fileSystem.exists(sourcePath)) {
            throw new PortalException("File " + sourcePath.toUri().toString() + " does not exist");
        }

        boolean renamed = fileSystem.rename(sourcePath, targetPath);

        if (!renamed) {
            throw new SystemException("File name directory was not renamed from "
                    + sourcePath.toUri().toString() + " to " + targetPath.toUri().toString());
        }
    } catch (IOException ioe) {
        throw new SystemException(ioe);
    }
}

From source file:com.liferay.hadoop.store.HDFSStore.java

License:Open Source License

public void updateFile(long companyId, long repositoryId, String fileName, String newFileName)
        throws PortalException, SystemException {

    Path sourcePath = HadoopManager.getFullVersionFilePath(companyId, repositoryId, fileName, VERSION_DEFAULT);
    Path targetPath = HadoopManager.getFullVersionFilePath(companyId, repositoryId, newFileName,
            VERSION_DEFAULT);/*from   w  w  w .j  ava2 s  . com*/

    try {
        FileSystem fileSystem = HadoopManager.getFileSystem();

        if (fileSystem.exists(targetPath)) {
            throw new DuplicateFileException(fileName);
        }

        if (!fileSystem.exists(sourcePath)) {
            throw new PortalException("File " + sourcePath.toUri().toString() + " does not exist");
        }

        boolean renamed = fileSystem.rename(sourcePath, targetPath);

        if (!renamed) {
            throw new SystemException("File name directory was not renamed from "
                    + sourcePath.toUri().toString() + " to " + targetPath.toUri().toString());
        }
    } catch (IOException ioe) {
        throw new SystemException(ioe);
    }
}

From source file:com.lightboxtechnologies.spectrum.ExtractData.java

License:Apache License

public int run(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("Usage: ExtractData <imageID> <friendly_name> <extents_file> <evidence file>");
        return 2;
    }//  w w w  .  jav a  2 s  . co  m

    final String imageID = args[0];
    final String friendlyName = args[1];
    final String extentsPath = args[2];
    final String image = args[3];

    Configuration conf = getConf();

    final Job job = SKJobFactory.createJobFromConf(imageID, friendlyName, "ExtractData", conf);
    job.setJarByClass(ExtractData.class);
    job.setMapperClass(ExtractDataMapper.class);
    job.setReducerClass(KeyValueSortReducer.class);
    job.setNumReduceTasks(1);

    // job ctor copies the Configuration we pass it, get the real one
    conf = job.getConfiguration();

    conf.setLong("timestamp", System.currentTimeMillis());

    job.setInputFormatClass(RawFileInputFormat.class);
    RawFileInputFormat.addInputPath(job, new Path(image));

    job.setOutputFormatClass(HFileOutputFormat.class);
    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(KeyValue.class);

    conf.setInt("mapreduce.job.jvm.numtasks", -1);

    final FileSystem fs = FileSystem.get(conf);
    Path hfileDir = new Path("/texaspete/ev/tmp", UUID.randomUUID().toString());
    hfileDir = hfileDir.makeQualified(fs);
    LOG.info("Hashes will be written temporarily to " + hfileDir);

    HFileOutputFormat.setOutputPath(job, hfileDir);

    final Path extp = new Path(extentsPath);
    final URI extents = extp.toUri();
    LOG.info("extents file is " + extents);

    DistributedCache.addCacheFile(extents, conf);
    conf.set("com.lbt.extentsname", extp.getName());
    // job.getConfiguration().setBoolean("mapred.task.profile", true);
    // job.getConfiguration().setBoolean("mapreduce.task.profile", true);

    HBaseTables.summon(conf, HBaseTables.HASH_TBL_B, HBaseTables.HASH_COLFAM_B);

    HBaseTables.summon(conf, HBaseTables.ENTRIES_TBL_B, HBaseTables.ENTRIES_COLFAM_B);

    final boolean result = job.waitForCompletion(true);
    if (result) {
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        HBaseConfiguration.addHbaseResources(conf);
        loader.setConf(conf);
        LOG.info("Loading hashes into hbase");
        chmodR(fs, hfileDir);
        loader.doBulkLoad(hfileDir, new HTable(conf, HBaseTables.HASH_TBL_B));
        //      result = fs.delete(hfileDir, true);
    }
    return result ? 0 : 1;
}

From source file:com.lightboxtechnologies.spectrum.HDFSArchiver.java

License:Apache License

protected static String relativize(Path p) {
    // chop off HDFS scheme and relativize to /
    String relpath = p.toUri().getPath();
    if (relpath.startsWith("/")) {
        relpath = relpath.substring(1);//from   w w w.  java 2  s  . co  m
    }

    return relpath;
}