Example usage for org.apache.hadoop.fs Path makeQualified

List of usage examples for org.apache.hadoop.fs Path makeQualified

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path makeQualified.

Prototype

@Deprecated
public Path makeQualified(FileSystem fs) 

Source Link

Document

Returns a qualified path object for the FileSystem 's working directory.

Usage

From source file:org.apache.accumulo.server.tabletserver.Tablet.java

License:Apache License

private static SortedMap<FileRef, DataFileValue> lookupDatafiles(AccumuloConfiguration conf, VolumeManager fs,
        KeyExtent extent, SortedMap<Key, Value> tabletsKeyValues) throws IOException {

    TreeMap<FileRef, DataFileValue> datafiles = new TreeMap<FileRef, DataFileValue>();

    if (extent.isRootTablet()) { // the meta0 tablet
        Path location = new Path(ServerConstants.getRootTabletDir());
        location = location.makeQualified(fs.getDefaultVolume());
        // cleanUpFiles() has special handling for delete. files
        FileStatus[] files = fs.listStatus(location);
        Collection<String> goodPaths = cleanUpFiles(fs, files, true);
        for (String good : goodPaths) {
            Path path = new Path(good);
            String filename = path.getName();
            FileRef ref = new FileRef(location.toString() + "/" + filename, path);
            DataFileValue dfv = new DataFileValue(0, 0);
            datafiles.put(ref, dfv);/*  www  .j a  va  2  s .  c o m*/
        }
    } else {

        Text rowName = extent.getMetadataEntry();

        String tableId = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
        ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(), tableId,
                Authorizations.EMPTY);

        // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
        // reduced batch size to improve performance
        // changed here after endKeys were implemented from 10 to 1000
        mdScanner.setBatchSize(1000);

        // leave these in, again, now using endKey for safety
        mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);

        mdScanner.setRange(new Range(rowName));

        for (Entry<Key, Value> entry : mdScanner) {

            if (entry.getKey().compareRow(rowName) != 0) {
                break;
            }

            FileRef ref = new FileRef(entry.getKey().getColumnQualifier().toString(),
                    fs.getFullPath(entry.getKey()));
            datafiles.put(ref, new DataFileValue(entry.getValue().get()));
        }
    }
    return datafiles;
}

From source file:org.apache.accumulo.server.tabletserver.Tablet.java

License:Apache License

/**
 * yet another constructor - this one allows us to avoid costly lookups into the Metadata table if we already know the files we need - as at split time
 */// w  ww  .  j av  a2 s  . c o  m
private Tablet(final TabletServer tabletServer, final Text location, final KeyExtent extent,
        final TabletResourceManager trm, final Configuration conf, final VolumeManager fs,
        final List<LogEntry> logEntries, final SortedMap<FileRef, DataFileValue> datafiles, String time,
        final TServerInstance lastLocation, Set<FileRef> scanFiles, long initFlushID, long initCompactID)
        throws IOException {
    Path locationPath;
    if (location.find(":") >= 0) {
        locationPath = new Path(location.toString());
    } else {
        locationPath = fs.getFullPath(FileType.TABLE, extent.getTableId().toString() + location.toString());
    }
    this.location = locationPath.makeQualified(fs.getFileSystemByPath(locationPath));
    this.lastLocation = lastLocation;
    this.tabletDirectory = location.toString();
    this.conf = conf;
    this.acuTableConf = tabletServer.getTableConfiguration(extent);

    this.fs = fs;
    this.extent = extent;
    this.tabletResources = trm;

    this.lastFlushID = initFlushID;
    this.lastCompactID = initCompactID;

    if (extent.isRootTablet()) {

        long rtime = Long.MIN_VALUE;
        for (FileRef ref : datafiles.keySet()) {
            Path path = ref.path();
            FileSystem ns = fs.getFileSystemByPath(path);
            FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), true, ns,
                    ns.getConf(), tabletServer.getTableConfiguration(extent));
            long maxTime = -1;
            try {

                while (reader.hasTop()) {
                    maxTime = Math.max(maxTime, reader.getTopKey().getTimestamp());
                    reader.next();
                }

            } finally {
                reader.close();
            }

            if (maxTime > rtime) {
                time = TabletTime.LOGICAL_TIME_ID + "" + maxTime;
                rtime = maxTime;
            }
        }
    }

    this.tabletServer = tabletServer;
    this.logId = tabletServer.createLogId(extent);

    this.timer = new TabletStatsKeeper();

    setupDefaultSecurityLabels(extent);

    tabletMemory = new TabletMemory();
    tabletTime = TabletTime.getInstance(time);
    persistedTime = tabletTime.getTime();

    acuTableConf.addObserver(configObserver = new ConfigurationObserver() {

        private void reloadConstraints() {
            constraintChecker.set(new ConstraintChecker(getTableConfiguration()));
        }

        @Override
        public void propertiesChanged() {
            reloadConstraints();

            try {
                setupDefaultSecurityLabels(extent);
            } catch (Exception e) {
                log.error("Failed to reload default security labels for extent: " + extent.toString());
            }
        }

        @Override
        public void propertyChanged(String prop) {
            if (prop.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey()))
                reloadConstraints();
            else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) {
                try {
                    log.info("Default security labels changed for extent: " + extent.toString());
                    setupDefaultSecurityLabels(extent);
                } catch (Exception e) {
                    log.error("Failed to reload default security labels for extent: " + extent.toString());
                }
            }

        }

        @Override
        public void sessionExpired() {
            log.debug("Session expired, no longer updating per table props...");
        }

    });
    // Force a load of any per-table properties
    configObserver.propertiesChanged();

    tabletResources.setTablet(this, acuTableConf);
    if (!logEntries.isEmpty()) {
        log.info("Starting Write-Ahead Log recovery for " + this.extent);
        final long[] count = new long[2];
        final CommitSession commitSession = tabletMemory.getCommitSession();
        count[1] = Long.MIN_VALUE;
        try {
            Set<String> absPaths = new HashSet<String>();
            for (FileRef ref : datafiles.keySet())
                absPaths.add(ref.path().toString());

            tabletServer.recover(this.tabletServer.getFileSystem(), this, logEntries, absPaths,
                    new MutationReceiver() {
                        @Override
                        public void receive(Mutation m) {
                            // LogReader.printMutation(m);
                            Collection<ColumnUpdate> muts = m.getUpdates();
                            for (ColumnUpdate columnUpdate : muts) {
                                if (!columnUpdate.hasTimestamp()) {
                                    // if it is not a user set timestamp, it must have been set
                                    // by the system
                                    count[1] = Math.max(count[1], columnUpdate.getTimestamp());
                                }
                            }
                            tabletMemory.mutate(commitSession, Collections.singletonList(m));
                            count[0]++;
                        }
                    });

            if (count[1] != Long.MIN_VALUE) {
                tabletTime.useMaxTimeFromWALog(count[1]);
            }
            commitSession.updateMaxCommittedTime(tabletTime.getTime());

            tabletMemory.updateMemoryUsageStats();

            if (count[0] == 0) {
                MetadataTableUtil.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock());
                logEntries.clear();
            }

        } catch (Throwable t) {
            if (acuTableConf.getBoolean(Property.TABLE_FAILURES_IGNORE)) {
                log.warn("Error recovering from log files: ", t);
            } else {
                throw new RuntimeException(t);
            }
        }
        // make some closed references that represent the recovered logs
        currentLogs = new HashSet<DfsLogger>();
        for (LogEntry logEntry : logEntries) {
            for (String log : logEntry.logSet) {
                String[] parts = log.split("/", 2);
                Path file = fs.getFullPath(FileType.WAL, parts[1]);
                currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.server, file));
            }
        }

        log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + count[0]
                + " mutations applied, " + tabletMemory.getNumEntries() + " entries created)");
    }

    String contextName = acuTableConf.get(Property.TABLE_CLASSPATH);
    if (contextName != null && !contextName.equals("")) {
        // initialize context classloader, instead of possibly waiting for it to initialize for a scan
        // TODO this could hang, causing other tablets to fail to load - ACCUMULO-1292
        AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName);
    }

    // do this last after tablet is completely setup because it
    // could cause major compaction to start
    datafileManager = new DatafileManager(datafiles);

    computeNumEntries();

    datafileManager.removeFilesAfterScan(scanFiles);

    // look for hints of a failure on the previous tablet server
    if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) {
        // look for any temp files hanging around
        removeOldTemporaryFiles();
    }

    log.log(TLevel.TABLET_HIST, extent + " opened ");
}

From source file:org.apache.bigtop.itest.hbase.util.HBaseTestUtil.java

License:Apache License

public static Path getMROutputDir(String testName) throws IOException {
    Path p = new Path(testName + "_" + getTestPrefix());
    return p.makeQualified(getClusterFileSystem());
}

From source file:org.apache.blur.trace.hdfs.HdfsTraceStorageTest.java

License:Apache License

@Before
public void setUp() throws IOException, InterruptedException {
    rmr(TMPDIR);/*from  w w  w  .j  a v  a  2 s.  co m*/
    LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
    File testDirectory = new File(TMPDIR, "HdfsTraceStorageTest").getAbsoluteFile();
    testDirectory.mkdirs();

    Path directory = new Path(testDirectory.getPath());
    FsPermission dirPermissions = localFS.getFileStatus(directory).getPermission();
    FsAction userAction = dirPermissions.getUserAction();
    FsAction groupAction = dirPermissions.getGroupAction();
    FsAction otherAction = dirPermissions.getOtherAction();

    StringBuilder builder = new StringBuilder();
    builder.append(userAction.ordinal());
    builder.append(groupAction.ordinal());
    builder.append(otherAction.ordinal());
    String dirPermissionNum = builder.toString();
    System.setProperty("dfs.datanode.data.dir.perm", dirPermissionNum);

    configuration = new BlurConfiguration();
    configuration.set(BLUR_HDFS_TRACE_PATH, directory.makeQualified(localFS).toString());
    _storage = new HdfsTraceStorage(configuration);
    _storage.init(new Configuration());
}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java

License:Apache License

@Override
public FileStatus[] listStatus(Path f) throws IOException {
    Path absolutePath = makeAbsolute(f);
    INode inode = store.retrieveINode(absolutePath);
    if (inode == null) {
        return null;
    }//ww w. ja v  a  2s .  c  o m
    if (inode.isFile()) {
        return new FileStatus[] { new CassandraFileStatus(f.makeQualified(this), inode) };
    }
    ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
    for (Path p : store.listSubPaths(absolutePath)) {
        // we shouldn't list ourselves
        if (p.equals(f))
            continue;

        try {
            FileStatus stat = getFileStatus(p.makeQualified(this));

            ret.add(stat);
        } catch (FileNotFoundException e) {
            logger.warn("No file found for: " + p);
        }
    }
    return ret.toArray(new FileStatus[0]);
}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java

License:Apache License

/**
 * FileStatus for Cassandra file systems. {@inheritDoc}
 *///from   w  w w. j  av a  2 s .  com
@Override
public FileStatus getFileStatus(Path f) throws IOException {
    INode inode = store.retrieveINode(makeAbsolute(f));
    if (inode == null) {
        throw new FileNotFoundException(f.toString());
    }
    return new CassandraFileStatus(f.makeQualified(this), inode);
}

From source file:org.apache.crunch.io.hbase.HFileTargetIT.java

License:Apache License

private static Path getTempPathOnHDFS(String fileName) throws IOException {
    Configuration conf = HBASE_TEST_UTILITY.getConfiguration();
    FileSystem fs = FileSystem.get(conf);
    Path result = new Path(TEMP_DIR, fileName);
    return result.makeQualified(fs);
}

From source file:org.apache.crunch.util.DistCache.java

License:Apache License

public static Path getPathToCacheFile(Path path, Configuration conf) {
    try {/*  ww w .j  ava  2  s .c  om*/
        for (Path localPath : DistributedCache.getLocalCacheFiles(conf)) {
            if (localPath.toString().endsWith(path.getName())) {
                return localPath.makeQualified(FileSystem.getLocal(conf));
            }
        }
    } catch (IOException e) {
        throw new CrunchRuntimeException(e);
    }
    return null;
}

From source file:org.apache.crunch.util.DistCache.java

License:Apache License

/**
 * Adds the specified jar to the distributed cache of jobs using the provided
 * configuration. The jar will be placed on the classpath of tasks run by the
 * job.//w  w w.  j a  v  a2  s  .  c  o m
 * 
 * @param conf
 *          The configuration used to add the jar to the distributed cache.
 * @param jarFile
 *          The jar file to add to the distributed cache.
 * @throws IOException
 *           If the jar file does not exist or there is a problem accessing
 *           the file.
 */
public static void addJarToDistributedCache(Configuration conf, File jarFile) throws IOException {
    if (!jarFile.exists()) {
        throw new IOException("Jar file: " + jarFile.getCanonicalPath() + " does not exist.");
    }
    if (!jarFile.getName().endsWith(".jar")) {
        throw new IllegalArgumentException("File: " + jarFile.getCanonicalPath() + " is not a .jar " + "file.");
    }
    // Get a qualified path for the jar.
    FileSystem fileSystem = FileSystem.getLocal(conf);
    Path jarPath = new Path(jarFile.getCanonicalPath());
    String qualifiedPath = jarPath.makeQualified(fileSystem).toString();
    // Add the jar to the configuration variable.
    String jarConfiguration = conf.get(TMPJARS_KEY, "");
    if (!jarConfiguration.isEmpty()) {
        jarConfiguration += ",";
    }
    jarConfiguration += qualifiedPath;
    conf.set(TMPJARS_KEY, jarConfiguration);
}

From source file:org.apache.hama.bsp.BSPJobClient.java

License:Apache License

protected RunningJob launchJob(BSPJobID jobId, BSPJob job, Path submitJobFile, FileSystem fs)
        throws IOException {
    ///*from   w ww. j  a v a2  s .c  o m*/
    // Now, actually submit the job (using the submit name)
    //
    JobStatus status = jobSubmitClient.submitJob(jobId, submitJobFile.makeQualified(fs).toString());
    if (status != null) {
        return new NetworkedJob(status);
    } else {
        throw new IOException("Could not launch job");
    }
}