Example usage for org.apache.hadoop.fs FileSystem getConf

List of usage examples for org.apache.hadoop.fs FileSystem getConf

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getConf.

Prototype

@Override
    public Configuration getConf() 

Source Link

Usage

From source file:org.apache.accumulo.tserver.log.SortedLogRecoveryTest.java

License:Apache License

private static List<Mutation> recover(Map<String, KeyValue[]> logs, Set<String> files, KeyExtent extent)
        throws IOException {
    TemporaryFolder root = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
    root.create();/* w  w w . j a  v  a  2s .c o  m*/
    final String workdir = root.getRoot().getAbsolutePath() + "/workdir";
    VolumeManager fs = VolumeManagerImpl.getLocal(workdir);
    final Path workdirPath = new Path("file://" + workdir);
    fs.deleteRecursively(workdirPath);
    ArrayList<Path> dirs = new ArrayList<>();
    try {
        for (Entry<String, KeyValue[]> entry : logs.entrySet()) {
            String path = workdir + "/" + entry.getKey();
            FileSystem ns = fs.getVolumeByPath(new Path(path)).getFileSystem();
            @SuppressWarnings("deprecation")
            Writer map = new MapFile.Writer(ns.getConf(), ns, path + "/log1", LogFileKey.class,
                    LogFileValue.class);
            for (KeyValue lfe : entry.getValue()) {
                map.append(lfe.key, lfe.value);
            }
            map.close();
            ns.create(SortedLogState.getFinishedMarkerPath(path)).close();
            dirs.add(new Path(path));
        }
        // Recover
        SortedLogRecovery recovery = new SortedLogRecovery(fs);
        CaptureMutations capture = new CaptureMutations();
        recovery.recover(extent, dirs, files, capture);
        return capture.result;
    } finally {
        root.delete();
    }
}

From source file:org.apache.accumulo.tserver.tablet.Compactor.java

License:Apache License

@Override
public CompactionStats call() throws IOException, CompactionCanceledException {

    FileSKVWriter mfw = null;//from ww w . j av  a  2  s .c om

    CompactionStats majCStats = new CompactionStats();

    boolean remove = runningCompactions.add(this);

    clearStats();

    final Path outputFilePath = outputFile.path();
    final String outputFilePathName = outputFilePath.toString();
    String oldThreadName = Thread.currentThread().getName();
    String newThreadName = "MajC compacting " + extent.toString() + " started "
            + dateFormatter.format(new Date()) + " file: " + outputFile;
    Thread.currentThread().setName(newThreadName);
    thread = Thread.currentThread();
    try {
        FileOperations fileFactory = FileOperations.getInstance();
        FileSystem ns = this.fs.getVolumeByPath(outputFilePath).getFileSystem();
        mfw = fileFactory.newWriterBuilder().forFile(outputFilePathName, ns, ns.getConf())
                .withTableConfiguration(acuTableConf).withRateLimiter(env.getWriteLimiter()).build();

        Map<String, Set<ByteSequence>> lGroups;
        try {
            lGroups = LocalityGroupUtil.getLocalityGroups(acuTableConf);
        } catch (LocalityGroupConfigurationError e) {
            throw new IOException(e);
        }

        long t1 = System.currentTimeMillis();

        HashSet<ByteSequence> allColumnFamilies = new HashSet<>();

        if (mfw.supportsLocalityGroups()) {
            for (Entry<String, Set<ByteSequence>> entry : lGroups.entrySet()) {
                setLocalityGroup(entry.getKey());
                compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
                allColumnFamilies.addAll(entry.getValue());
            }
        }

        setLocalityGroup("");
        compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);

        long t2 = System.currentTimeMillis();

        FileSKVWriter mfwTmp = mfw;
        mfw = null; // set this to null so we do not try to close it again in finally if the close fails
        try {
            mfwTmp.close(); // if the close fails it will cause the compaction to fail
        } catch (IOException ex) {
            if (!fs.deleteRecursively(outputFile.path())) {
                if (fs.exists(outputFile.path())) {
                    log.error("Unable to delete " + outputFile);
                }
            }
            throw ex;
        }

        log.debug(String.format(
                "Compaction %s %,d read | %,d written | %,6d entries/sec | %,6.3f secs | %,12d bytes | %9.3f byte/sec",
                extent, majCStats.getEntriesRead(), majCStats.getEntriesWritten(),
                (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0,
                mfwTmp.getLength(), mfwTmp.getLength() / ((t2 - t1) / 1000.0)));

        majCStats.setFileSize(mfwTmp.getLength());
        return majCStats;
    } catch (IOException e) {
        log.error("{}", e.getMessage(), e);
        throw e;
    } catch (RuntimeException e) {
        log.error("{}", e.getMessage(), e);
        throw e;
    } finally {
        Thread.currentThread().setName(oldThreadName);
        if (remove) {
            thread = null;
            runningCompactions.remove(this);
        }

        try {
            if (mfw != null) {
                // compaction must not have finished successfully, so close its output file
                try {
                    mfw.close();
                } finally {
                    if (!fs.deleteRecursively(outputFile.path()))
                        if (fs.exists(outputFile.path()))
                            log.error("Unable to delete " + outputFile);
                }
            }
        } catch (IOException e) {
            log.warn("{}", e.getMessage(), e);
        } catch (RuntimeException exception) {
            log.warn("{}", exception.getMessage(), exception);
        }
    }
}

From source file:org.apache.accumulo.tserver.tablet.Compactor.java

License:Apache License

private List<SortedKeyValueIterator<Key, Value>> openMapDataFiles(String lgName,
        ArrayList<FileSKVIterator> readers) throws IOException {

    List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(filesToCompact.size());

    for (FileRef mapFile : filesToCompact.keySet()) {
        try {/*from w  w w.  j  av  a 2s .  c  om*/

            FileOperations fileFactory = FileOperations.getInstance();
            FileSystem fs = this.fs.getVolumeByPath(mapFile.path()).getFileSystem();
            FileSKVIterator reader;

            reader = fileFactory.newReaderBuilder().forFile(mapFile.path().toString(), fs, fs.getConf())
                    .withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter()).build();

            readers.add(reader);

            SortedKeyValueIterator<Key, Value> iter = new ProblemReportingIterator(context, extent.getTableId(),
                    mapFile.path().toString(), false, reader);

            if (filesToCompact.get(mapFile).isTimeSet()) {
                iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
            }

            iters.add(iter);

        } catch (Throwable e) {

            ProblemReports.getInstance(context).report(new ProblemReport(extent.getTableId(),
                    ProblemType.FILE_READ, mapFile.path().toString(), e));

            log.warn("Some problem opening map file {} {}", mapFile, e.getMessage(), e);
            // failed to open some map file... close the ones that were opened
            for (FileSKVIterator reader : readers) {
                try {
                    reader.close();
                } catch (Throwable e2) {
                    log.warn("Failed to close map file", e2);
                }
            }

            readers.clear();

            if (e instanceof IOException)
                throw (IOException) e;
            throw new IOException("Failed to open map data files", e);
        }
    }

    return iters;
}

From source file:org.apache.accumulo.tserver.tablet.Tablet.java

License:Apache License

private Map<FileRef, Pair<Key, Key>> getFirstAndLastKeys(SortedMap<FileRef, DataFileValue> allFiles)
        throws IOException {
    Map<FileRef, Pair<Key, Key>> result = new HashMap<>();
    FileOperations fileFactory = FileOperations.getInstance();
    VolumeManager fs = getTabletServer().getFileSystem();
    for (Entry<FileRef, DataFileValue> entry : allFiles.entrySet()) {
        FileRef file = entry.getKey();// ww  w .ja  v a 2 s.  c  o m
        FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
        FileSKVIterator openReader = fileFactory.newReaderBuilder()
                .forFile(file.path().toString(), ns, ns.getConf())
                .withTableConfiguration(this.getTableConfiguration()).seekToBeginning().build();
        try {
            Key first = openReader.getFirstKey();
            Key last = openReader.getLastKey();
            result.put(file, new Pair<>(first, last));
        } finally {
            openReader.close();
        }
    }
    return result;
}

From source file:org.apache.accumulo.tserver.tablet.TabletData.java

License:Apache License

public TabletData(VolumeManager fs, ZooReader rdr, AccumuloConfiguration conf) throws IOException {
    directory = VolumeUtil.switchRootTableVolume(MetadataTableUtil.getRootTabletDir());

    Path location = new Path(directory);

    // cleanReplacement() has special handling for deleting files
    FileStatus[] files = fs.listStatus(location);
    Collection<String> goodPaths = RootFiles.cleanupReplacement(fs, files, true);
    long rtime = Long.MIN_VALUE;
    for (String good : goodPaths) {
        Path path = new Path(good);
        String filename = path.getName();
        FileRef ref = new FileRef(location.toString() + "/" + filename, path);
        DataFileValue dfv = new DataFileValue(0, 0);
        dataFiles.put(ref, dfv);//from  www . j ava2  s  .  c  om

        FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
        FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
                .forFile(path.toString(), ns, ns.getConf()).withTableConfiguration(conf).seekToBeginning()
                .build();
        long maxTime = -1;
        try {
            while (reader.hasTop()) {
                maxTime = Math.max(maxTime, reader.getTopKey().getTimestamp());
                reader.next();
            }
        } finally {
            reader.close();
        }
        if (maxTime > rtime) {
            time = TabletTime.LOGICAL_TIME_ID + "" + maxTime;
            rtime = maxTime;
        }
    }

    try {
        logEntris = MetadataTableUtil.getLogEntries(null, RootTable.EXTENT);
    } catch (Exception ex) {
        throw new RuntimeException("Unable to read tablet log entries", ex);
    }
}

From source file:org.apache.accumulo.tserver.Tablet.java

License:Apache License

/**
 * yet another constructor - this one allows us to avoid costly lookups into the Metadata table if we already know the files we need - as at split time
 *//* w w w. j  a  va  2  s  . c o  m*/
private Tablet(final TabletServer tabletServer, final Text location, final KeyExtent extent,
        final TabletResourceManager trm, final Configuration conf, final VolumeManager fs,
        final List<LogEntry> logEntries, final SortedMap<FileRef, DataFileValue> datafiles, String time,
        final TServerInstance lastLocation, Set<FileRef> scanFiles, long initFlushID, long initCompactID)
        throws IOException {
    Path locationPath;
    if (location.find(":") >= 0) {
        locationPath = new Path(location.toString());
    } else {
        locationPath = fs.getFullPath(FileType.TABLE, extent.getTableId().toString() + location.toString());
    }

    locationPath = DirectoryDecommissioner.checkTabletDirectory(tabletServer, fs, extent, locationPath);

    this.location = locationPath;
    this.lastLocation = lastLocation;
    this.tabletDirectory = location.toString();
    this.conf = conf;
    this.acuTableConf = tabletServer.getTableConfiguration(extent);

    this.fs = fs;
    this.extent = extent;
    this.tabletResources = trm;

    this.lastFlushID = initFlushID;
    this.lastCompactID = initCompactID;

    if (extent.isRootTablet()) {
        long rtime = Long.MIN_VALUE;
        for (FileRef ref : datafiles.keySet()) {
            Path path = ref.path();
            FileSystem ns = fs.getFileSystemByPath(path);
            FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), true, ns,
                    ns.getConf(), tabletServer.getTableConfiguration(extent));
            long maxTime = -1;
            try {

                while (reader.hasTop()) {
                    maxTime = Math.max(maxTime, reader.getTopKey().getTimestamp());
                    reader.next();
                }

            } finally {
                reader.close();
            }

            if (maxTime > rtime) {
                time = TabletTime.LOGICAL_TIME_ID + "" + maxTime;
                rtime = maxTime;
            }
        }
    }
    if (time == null && datafiles.isEmpty() && extent.equals(RootTable.OLD_EXTENT)) {
        // recovery... old root tablet has no data, so time doesn't matter:
        time = TabletTime.LOGICAL_TIME_ID + "" + Long.MIN_VALUE;
    }

    this.tabletServer = tabletServer;
    this.logId = tabletServer.createLogId(extent);

    this.timer = new TabletStatsKeeper();

    setupDefaultSecurityLabels(extent);

    tabletMemory = new TabletMemory();
    tabletTime = TabletTime.getInstance(time);
    persistedTime = tabletTime.getTime();

    acuTableConf.addObserver(configObserver = new ConfigurationObserver() {

        private void reloadConstraints() {
            constraintChecker.set(new ConstraintChecker(acuTableConf));
        }

        @Override
        public void propertiesChanged() {
            reloadConstraints();

            try {
                setupDefaultSecurityLabels(extent);
            } catch (Exception e) {
                log.error("Failed to reload default security labels for extent: " + extent.toString());
            }
        }

        @Override
        public void propertyChanged(String prop) {
            if (prop.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey()))
                reloadConstraints();
            else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) {
                try {
                    log.info("Default security labels changed for extent: " + extent.toString());
                    setupDefaultSecurityLabels(extent);
                } catch (Exception e) {
                    log.error("Failed to reload default security labels for extent: " + extent.toString());
                }
            }

        }

        @Override
        public void sessionExpired() {
            log.debug("Session expired, no longer updating per table props...");
        }

    });

    acuTableConf.getNamespaceConfiguration().addObserver(configObserver);

    // Force a load of any per-table properties
    configObserver.propertiesChanged();

    if (!logEntries.isEmpty()) {
        log.info("Starting Write-Ahead Log recovery for " + this.extent);
        final long[] count = new long[2];
        final CommitSession commitSession = tabletMemory.getCommitSession();
        count[1] = Long.MIN_VALUE;
        try {
            Set<String> absPaths = new HashSet<String>();
            for (FileRef ref : datafiles.keySet())
                absPaths.add(ref.path().toString());

            tabletServer.recover(this.tabletServer.getFileSystem(), extent, acuTableConf, logEntries, absPaths,
                    new MutationReceiver() {
                        @Override
                        public void receive(Mutation m) {
                            // LogReader.printMutation(m);
                            Collection<ColumnUpdate> muts = m.getUpdates();
                            for (ColumnUpdate columnUpdate : muts) {
                                if (!columnUpdate.hasTimestamp()) {
                                    // if it is not a user set timestamp, it must have been set
                                    // by the system
                                    count[1] = Math.max(count[1], columnUpdate.getTimestamp());
                                }
                            }
                            tabletMemory.mutate(commitSession, Collections.singletonList(m));
                            count[0]++;
                        }
                    });

            if (count[1] != Long.MIN_VALUE) {
                tabletTime.useMaxTimeFromWALog(count[1]);
            }
            commitSession.updateMaxCommittedTime(tabletTime.getTime());

            if (count[0] == 0) {
                MetadataTableUtil.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock());
                logEntries.clear();
            }

        } catch (Throwable t) {
            if (acuTableConf.getBoolean(Property.TABLE_FAILURES_IGNORE)) {
                log.warn("Error recovering from log files: ", t);
            } else {
                throw new RuntimeException(t);
            }
        }
        // make some closed references that represent the recovered logs
        currentLogs = new HashSet<DfsLogger>();
        for (LogEntry logEntry : logEntries) {
            for (String log : logEntry.logSet) {
                currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), log));
            }
        }

        log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + count[0]
                + " mutations applied, " + tabletMemory.getNumEntries() + " entries created)");
    }

    String contextName = acuTableConf.get(Property.TABLE_CLASSPATH);
    if (contextName != null && !contextName.equals("")) {
        // initialize context classloader, instead of possibly waiting for it to initialize for a scan
        // TODO this could hang, causing other tablets to fail to load - ACCUMULO-1292
        AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName);
    }

    // do this last after tablet is completely setup because it
    // could cause major compaction to start
    datafileManager = new DatafileManager(datafiles);

    computeNumEntries();

    datafileManager.removeFilesAfterScan(scanFiles);

    // look for hints of a failure on the previous tablet server
    if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) {
        // look for any temp files hanging around
        removeOldTemporaryFiles();
    }

    log.log(TLevel.TABLET_HIST, extent + " opened");
}

From source file:org.apache.accumulo.tserver.Tablet.java

License:Apache License

private Map<FileRef, Pair<Key, Key>> getFirstAndLastKeys(SortedMap<FileRef, DataFileValue> allFiles)
        throws IOException {
    Map<FileRef, Pair<Key, Key>> result = new HashMap<FileRef, Pair<Key, Key>>();
    FileOperations fileFactory = FileOperations.getInstance();
    for (Entry<FileRef, DataFileValue> entry : allFiles.entrySet()) {
        FileRef file = entry.getKey();/*from www.j  a  v a  2  s  . c  o  m*/
        FileSystem ns = fs.getFileSystemByPath(file.path());
        FileSKVIterator openReader = fileFactory.openReader(file.path().toString(), true, ns, ns.getConf(),
                this.getTableConfiguration());
        try {
            Key first = openReader.getFirstKey();
            Key last = openReader.getLastKey();
            result.put(file, new Pair<Key, Key>(first, last));
        } finally {
            openReader.close();
        }
    }
    return result;
}

From source file:org.apache.crunch.impl.mem.MemPipeline.java

License:Apache License

@SuppressWarnings({ "rawtypes", "unchecked" })
private void writeSequenceFileFromPTable(final FileSystem fs, final Path path, final PTable table)
        throws IOException {
    final PTableType pType = table.getPTableType();
    final Class<?> keyClass = pType.getConverter().getKeyClass();
    final Class<?> valueClass = pType.getConverter().getValueClass();

    final SequenceFile.Writer writer = new SequenceFile.Writer(fs, fs.getConf(), path, keyClass, valueClass);

    for (final Object o : table.materialize()) {
        final Pair<?, ?> p = (Pair) o;
        final Object key = pType.getKeyType().getOutputMapFn().map(p.first());
        final Object value = pType.getValueType().getOutputMapFn().map(p.second());
        writer.append(key, value);/*from  w w w .j a va2 s. c o m*/
    }

    writer.close();
}

From source file:org.apache.crunch.impl.mem.MemPipeline.java

License:Apache License

private void writeSequenceFileFromPCollection(final FileSystem fs, final Path path,
        final PCollection collection) throws IOException {
    final PType pType = collection.getPType();
    final Converter converter = pType.getConverter();
    final Class valueClass = converter.getValueClass();

    final SequenceFile.Writer writer = new SequenceFile.Writer(fs, fs.getConf(), path, NullWritable.class,
            valueClass);/*from ww  w .  j  av a2 s .  c  om*/

    for (final Object o : collection.materialize()) {
        final Object value = pType.getOutputMapFn().map(o);
        writer.append(NullWritable.get(), value);
    }

    writer.close();
}

From source file:org.apache.crunch.io.avro.AvroFileReaderFactory.java

License:Apache License

@Override
public Iterator<T> read(FileSystem fs, final Path path) {
    this.mapFn.initialize();
    try {/*from  w ww .j  av  a  2  s .  c  om*/
        FsInput fsi = new FsInput(path, fs.getConf());
        final DataFileReader<T> reader = new DataFileReader<T>(fsi, recordReader);
        return new AutoClosingIterator<T>(reader, new UnmodifiableIterator<T>() {
            @Override
            public boolean hasNext() {
                return reader.hasNext();
            }

            @Override
            public T next() {
                return mapFn.map(reader.next());
            }
        });
    } catch (IOException e) {
        LOG.info("Could not read avro file at path: " + path, e);
        return Iterators.emptyIterator();
    }
}