Example usage for org.apache.hadoop.fs FileSystem getUri

List of usage examples for org.apache.hadoop.fs FileSystem getUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getUri.

Prototype

public abstract URI getUri();

Source Link

Document

Returns a URI which identifies this FileSystem.

Usage

From source file:org.apache.accumulo.server.util.TabletOperations.java

License:Apache License

public static String createTabletDirectory(VolumeManager fs, String tableId, Text endRow) {
    String lowDirectory;/*from w  w w.j  a  v a  2s.  c  om*/

    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    String volume = fs.choose(ServerConstants.getTablesDirs());

    while (true) {
        try {
            if (endRow == null) {
                lowDirectory = Constants.DEFAULT_TABLET_LOCATION;
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath)) {
                    FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory())
                            .toString();
                }
                log.warn("Failed to create " + lowDirectoryPath + " for unknown reason");
            } else {
                lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath))
                    throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath);
                if (fs.mkdirs(lowDirectoryPath)) {
                    FileSystem lowDirectoryFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath
                            .makeQualified(lowDirectoryFs.getUri(), lowDirectoryFs.getWorkingDirectory())
                            .toString();
                }
            }
        } catch (IOException e) {
            log.warn(e);
        }

        log.warn("Failed to create dir for tablet in table " + tableId + " in volume " + volume
                + " + will retry ...");
        UtilWaitThread.sleep(3000);

    }
}

From source file:org.apache.accumulo.tserver.tablet.Tablet.java

License:Apache License

private static String createTabletDirectory(VolumeManager fs, String tableId, Text endRow) {
    String lowDirectory;//from w w w  .ja va  2  s  .c o m

    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    String volume = fs.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR
            + Path.SEPARATOR;

    while (true) {
        try {
            if (endRow == null) {
                lowDirectory = Constants.DEFAULT_TABLET_LOCATION;
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath)) {
                    FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory())
                            .toString();
                }
                log.warn("Failed to create " + lowDirectoryPath + " for unknown reason");
            } else {
                lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath))
                    throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath);
                if (fs.mkdirs(lowDirectoryPath)) {
                    FileSystem lowDirectoryFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath
                            .makeQualified(lowDirectoryFs.getUri(), lowDirectoryFs.getWorkingDirectory())
                            .toString();
                }
            }
        } catch (IOException e) {
            log.warn(e);
        }

        log.warn("Failed to create dir for tablet in table " + tableId + " in volume " + volume
                + " + will retry ...");
        sleepUninterruptibly(3, TimeUnit.SECONDS);

    }
}

From source file:org.apache.accumulo.utils.NamespaceRename.java

License:Apache License

private static void checkConfiguration(Opts opts, AccumuloConfiguration configuration) throws IOException {
    if (opts.oldName.endsWith("/"))
        throw new RuntimeException(opts.oldName + " ends with a slash, do not include it");
    if (opts.newName.endsWith("/"))
        throw new RuntimeException(opts.newName + " ends with a slash, do not include it");
    String volumes = configuration.get(Property.INSTANCE_VOLUMES);
    if (volumes != null && !volumes.isEmpty()) {
        Set<String> volumeSet = new HashSet<String>(Arrays.asList(volumes.split(",")));
        if (volumeSet.contains(opts.oldName))
            throw new RuntimeException(Property.INSTANCE_VOLUMES.getKey() + " is set to " + volumes
                    + " which still contains the old name " + opts.oldName);
        if (!volumeSet.contains(opts.newName))
            throw new RuntimeException(Property.INSTANCE_VOLUMES.getKey() + " is set to " + volumes
                    + " which does not contain the new name " + opts.oldName);
        return;//from   w  w  w .  jav a  2 s.c o m
    } else {
        String uri = configuration.get(Property.INSTANCE_DFS_URI);
        if (uri != null && !uri.isEmpty()) {
            if (!uri.startsWith(opts.newName))
                throw new RuntimeException(Property.INSTANCE_DFS_DIR.getKey() + " is set to " + uri
                        + " which is not in " + opts.newName);
            return;
        }
    }
    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
    if (!fs.getUri().toString().equals(opts.newName))
        throw new RuntimeException("Default filesystem is " + fs.getUri() + " and the new name is "
                + opts.newName + ". Update your hadoop dfs configuration.");
}

From source file:org.apache.ambari.servicemonitor.utils.DFSUtils.java

License:Apache License

/**
 * Create a DFS Instance that is not cached
 *
 * @param conf the configuration to work with
 * @return the DFS Instance//  w  w  w  .  j a v  a  2s  .c  o  m
 * @throws IOException on any IO problem
 * @throws ExitMainException if the default FS isn't HDFS
 */
public static DistributedFileSystem createUncachedDFS(Configuration conf) throws IOException {
    conf.setBoolean(HadoopKeys.FS_HDFS_IMPL_DISABLE_CACHE, true);
    FileSystem filesys = FileSystem.get(conf);
    URI fsURI = filesys.getUri();
    if (!(filesys instanceof DistributedFileSystem)) {
        throw new ExitMainException(-1, "Filesystem is not HDFS " + fsURI);
    }
    return (DistributedFileSystem) filesys;
}

From source file:org.apache.blur.mapreduce.lib.BlurInputFormatTest.java

License:Apache License

private void runTest(String tableName, boolean disableFast, Path fileCache)
        throws IOException, BlurException, TException, InterruptedException, ClassNotFoundException {
    FileSystem fileSystem = miniCluster.getFileSystem();
    Path root = new Path(fileSystem.getUri() + "/");

    creatTable(tableName, new Path(root, "tables"), disableFast);
    loadTable(tableName, 100, 100);//from   w w w. ja va 2 s .  c o m

    Iface client = getClient();

    TableDescriptor tableDescriptor = client.describe(tableName);

    Job job = Job.getInstance(conf, "Read Data");
    job.setJarByClass(BlurInputFormatTest.class);
    job.setMapperClass(TestMapper.class);
    job.setInputFormatClass(BlurInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setNumReduceTasks(0);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(TableBlurRecord.class);

    Path output = new Path(new Path(root, "output"), tableName);

    String snapshot = UUID.randomUUID().toString();
    client.createSnapshot(tableName, snapshot);

    if (fileCache != null) {
        BlurInputFormat.setLocalCachePath(job, fileCache);
    }

    BlurInputFormat.setMaxNumberOfMaps(job, 1);
    BlurInputFormat.setZooKeeperConnectionStr(job, miniCluster.getZkConnectionString());
    BlurInputFormat.addTable(job, tableDescriptor, snapshot);
    FileOutputFormat.setOutputPath(job, output);

    try {
        assertTrue(job.waitForCompletion(true));
        Counters counters = job.getCounters();
        assertMapTask(1, counters);

    } finally {
        client.removeSnapshot(tableName, snapshot);
    }

    final Map<Text, TableBlurRecord> results = new TreeMap<Text, TableBlurRecord>();
    walkOutput(output, conf, new ResultReader() {
        @Override
        public void read(Text rowId, TableBlurRecord tableBlurRecord) {
            results.put(new Text(rowId), new TableBlurRecord(tableBlurRecord));
        }
    });
    int rowId = 100;
    for (Entry<Text, TableBlurRecord> e : results.entrySet()) {
        Text r = e.getKey();
        assertEquals(new Text("row-" + rowId), r);
        BlurRecord blurRecord = new BlurRecord();
        blurRecord.setRowId("row-" + rowId);
        blurRecord.setRecordId("record-" + rowId);
        blurRecord.setFamily("fam0");
        blurRecord.addColumn("col0", "value-" + rowId);
        TableBlurRecord tableBlurRecord = new TableBlurRecord(new Text(tableName), blurRecord);
        assertEquals(tableBlurRecord, e.getValue());

        rowId++;
    }
    assertEquals(200, rowId);
}

From source file:org.apache.blur.mapreduce.lib.BlurMapReduceUtil.java

License:Apache License

/**
 * Adds all the jars in the same path as the blur jar files.
 * //from  www  .j a  va 2 s  . c  om
 * @param conf
 * @throws IOException
 */
public static void addAllJarsInBlurLib(Configuration conf) throws IOException {
    FileSystem localFs = FileSystem.getLocal(conf);
    Set<String> jars = new HashSet<String>();
    jars.addAll(conf.getStringCollection("tmpjars"));

    String property = System.getProperty("java.class.path");
    String[] files = property.split("\\:");

    String blurLibPath = getPath("blur-", files);
    if (blurLibPath == null) {
        return;
    }
    List<String> pathes = getPathes(blurLibPath, files);
    for (String pathStr : pathes) {
        Path path = new Path(pathStr);
        if (!localFs.exists(path)) {
            LOG.warn("Could not validate jar file " + path);
            continue;
        }
        jars.add(path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toString());
    }
    if (jars.isEmpty()) {
        return;
    }
    conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[0])));
}

From source file:org.apache.blur.mapreduce.lib.BlurMapReduceUtil.java

License:Apache License

/**
 * Add the jars containing the given classes to the job's configuration such
 * that JobClient will ship them to the cluster and add them to the
 * DistributedCache./*from   w  w w.j a  v a  2  s  .c o  m*/
 */
public static void addDependencyJars(Configuration conf, Class<?>... classes) throws IOException {
    FileSystem localFs = FileSystem.getLocal(conf);
    Set<String> jars = new HashSet<String>();
    // Add jars that are already in the tmpjars variable
    jars.addAll(conf.getStringCollection("tmpjars"));

    // Add jars containing the specified classes
    for (Class<?> clazz : classes) {
        if (clazz == null) {
            continue;
        }

        String pathStr = findOrCreateJar(clazz);
        if (pathStr == null) {
            LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster.");
            continue;
        }
        Path path = new Path(pathStr);
        if (!localFs.exists(path)) {
            LOG.warn("Could not validate jar file " + path + " for class " + clazz);
            continue;
        }
        jars.add(path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toString());
    }
    if (jars.isEmpty()) {
        return;
    }

    conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[0])));
}

From source file:org.apache.blur.mapreduce.lib.CsvBlurMapper.java

License:Apache License

@Override
protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    Configuration configuration = context.getConfiguration();
    _autoGenerateRecordIdAsHashOfData = isAutoGenerateRecordIdAsHashOfData(configuration);
    _autoGenerateRowIdAsHashOfData = isAutoGenerateRowIdAsHashOfData(configuration);
    if (_autoGenerateRecordIdAsHashOfData || _autoGenerateRowIdAsHashOfData) {
        try {//from   w  w w .j a v a2 s.c o  m
            _digest = MessageDigest.getInstance("MD5");
        } catch (NoSuchAlgorithmException e) {
            throw new IOException(e);
        }
    }
    _columnNameMap = getFamilyAndColumnNameMap(configuration);
    _separator = new String(Base64.decodeBase64(configuration.get(BLUR_CSV_SEPARATOR_BASE64, _separator)),
            UTF_8);
    _splitter = Splitter.on(_separator);
    Path fileCurrentlyProcessing = getCurrentFile(context);
    Collection<String> families = configuration.getStringCollection(BLUR_CSV_FAMILY_PATH_MAPPINGS_FAMILIES);
    OUTER: for (String family : families) {
        Collection<String> pathStrCollection = configuration
                .getStringCollection(BLUR_CSV_FAMILY_PATH_MAPPINGS_FAMILY_PREFIX + family);
        for (String pathStr : pathStrCollection) {
            Path path = new Path(pathStr);
            FileSystem fileSystem = path.getFileSystem(configuration);
            path = path.makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory());
            if (isParent(path, fileCurrentlyProcessing)) {
                _familyFromPath = family;
                _familyNotInFile = true;
                break OUTER;
            }
        }
    }
}

From source file:org.apache.blur.mapreduce.lib.CsvBlurMapper.java

License:Apache License

protected Path getCurrentFile(Context context) throws IOException {
    InputSplit split = context.getInputSplit();
    if (split != null && split instanceof FileSplit) {
        FileSplit inputSplit = (FileSplit) split;
        Path path = inputSplit.getPath();
        FileSystem fileSystem = path.getFileSystem(context.getConfiguration());
        return path.makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory());
    }/*from  w  w  w  . jav a  2  s.co  m*/
    return null;
}

From source file:org.apache.blur.mapreduce.lib.update.DriverTest.java

License:Apache License

@Test
public void testDriverAddSingleRowWithSingleRecord() throws Exception {
    FileSystem fileSystem = miniCluster.getFileSystem();
    Path root = new Path(fileSystem.getUri() + "/");

    String tableName = "testDriverAddSingleRowWithSingleRecord";
    creatTable(tableName, new Path(root, "tables"), true);

    Driver driver = new Driver();
    driver.setConf(conf);/*from   w ww  .ja  v  a2s . c  om*/

    String mrIncWorkingPathStr = new Path(root, "working").toString();
    generateData(mrIncWorkingPathStr);
    String outputPathStr = new Path(root, "output").toString();
    String blurZkConnection = miniCluster.getZkConnectionString();

    assertEquals(0,
            driver.run(new String[] { tableName, mrIncWorkingPathStr, outputPathStr, blurZkConnection, "1" }));

    Iface client = getClient();
    client.loadData(tableName, outputPathStr);

    waitUntilAllImportsAreCompleted(client, tableName);

    TableStats tableStats = client.tableStats(tableName);
    assertEquals(1, tableStats.getRowCount());
    assertEquals(1, tableStats.getRecordCount());
}