Example usage for org.apache.hadoop.fs Path equals

List of usage examples for org.apache.hadoop.fs Path equals

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path equals.

Prototype

@Override
    public boolean equals(Object o) 

Source Link

Usage

From source file:org.apache.accumulo.server.init.Initialize.java

License:Apache License

private static void addVolumes(VolumeManager fs) throws IOException {

    String[] volumeURIs = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());

    HashSet<String> initializedDirs = new HashSet<>();
    initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseUris(volumeURIs, true)));

    HashSet<String> uinitializedDirs = new HashSet<>();
    uinitializedDirs.addAll(Arrays.asList(volumeURIs));
    uinitializedDirs.removeAll(initializedDirs);

    Path aBasePath = new Path(initializedDirs.iterator().next());
    Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR);
    Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR);

    UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, SiteConfiguration.getInstance()));
    for (Pair<Path, Path> replacementVolume : ServerConstants.getVolumeReplacements()) {
        if (aBasePath.equals(replacementVolume.getFirst()))
            log.error(aBasePath + " is set to be replaced in " + Property.INSTANCE_VOLUMES_REPLACEMENTS
                    + " and should not appear in " + Property.INSTANCE_VOLUMES
                    + ". It is highly recommended that this property be removed as data could still be written to this volume.");
    }/*from  w w w. j  av a 2  s  .c  o  m*/

    if (ServerConstants.DATA_VERSION != Accumulo.getAccumuloPersistentVersion(
            versionPath.getFileSystem(CachedConfiguration.getInstance()), versionPath)) {
        throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version "
                + Accumulo.getAccumuloPersistentVersion(fs));
    }

    initDirs(fs, uuid, uinitializedDirs.toArray(new String[uinitializedDirs.size()]), true);
}

From source file:org.apache.accumulo.server.master.recovery.HadoopLogCloser.java

License:Apache License

@Override
public long close(AccumuloConfiguration conf, VolumeManager fs, Path source) throws IOException {
    FileSystem ns = fs.getVolumeByPath(source).getFileSystem();

    // if path points to a viewfs path, then resolve to underlying filesystem
    if (ViewFSUtils.isViewFS(ns)) {
        Path newSource = ns.resolvePath(source);
        if (!newSource.equals(source) && newSource.toUri().getScheme() != null) {
            ns = newSource.getFileSystem(CachedConfiguration.getInstance());
            source = newSource;//from   w  w w  .j a va  2  s.c  om
        }
    }

    if (ns instanceof DistributedFileSystem) {
        DistributedFileSystem dfs = (DistributedFileSystem) ns;
        try {
            if (!dfs.recoverLease(source)) {
                log.info("Waiting for file to be closed " + source.toString());
                return conf.getTimeInMillis(Property.MASTER_LEASE_RECOVERY_WAITING_PERIOD);
            }
            log.info("Recovered lease on " + source.toString());
        } catch (FileNotFoundException ex) {
            throw ex;
        } catch (Exception ex) {
            log.warn("Error recovering lease on " + source.toString(), ex);
            ns.append(source).close();
            log.info("Recovered lease on " + source.toString() + " using append");
        }
    } else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) {
        // ignore
    } else {
        throw new IllegalStateException("Don't know how to recover a lease for " + ns.getClass().getName());
    }
    return 0;
}

From source file:org.apache.accumulo.tserver.tablet.DatafileManager.java

License:Apache License

public void importMapFiles(long tid, Map<FileRef, DataFileValue> pathsString, boolean setTime)
        throws IOException {

    String bulkDir = null;/* w  w w  . jav  a2s  . c o  m*/

    Map<FileRef, DataFileValue> paths = new HashMap<>();
    for (Entry<FileRef, DataFileValue> entry : pathsString.entrySet())
        paths.put(entry.getKey(), entry.getValue());

    for (FileRef tpath : paths.keySet()) {

        boolean inTheRightDirectory = false;
        Path parent = tpath.path().getParent().getParent();
        for (String tablesDir : ServerConstants.getTablesDirs()) {
            if (parent.equals(new Path(tablesDir, tablet.getExtent().getTableId()))) {
                inTheRightDirectory = true;
                break;
            }
        }
        if (!inTheRightDirectory) {
            throw new IOException("Data file " + tpath + " not in table dirs");
        }

        if (bulkDir == null)
            bulkDir = tpath.path().getParent().toString();
        else if (!bulkDir.equals(tpath.path().getParent().toString()))
            throw new IllegalArgumentException("bulk files in different dirs " + bulkDir + " " + tpath);

    }

    if (tablet.getExtent().isMeta()) {
        throw new IllegalArgumentException("Can not import files to a metadata tablet");
    }

    synchronized (bulkFileImportLock) {

        if (paths.size() > 0) {
            long bulkTime = Long.MIN_VALUE;
            if (setTime) {
                for (DataFileValue dfv : paths.values()) {
                    long nextTime = tablet.getAndUpdateTime();
                    if (nextTime < bulkTime)
                        throw new IllegalStateException(
                                "Time went backwards unexpectedly " + nextTime + " " + bulkTime);
                    bulkTime = nextTime;
                    dfv.setTime(bulkTime);
                }
            }

            tablet.updatePersistedTime(bulkTime, paths, tid);
        }
    }

    synchronized (tablet) {
        for (Entry<FileRef, DataFileValue> tpath : paths.entrySet()) {
            if (datafileSizes.containsKey(tpath.getKey())) {
                log.error("Adding file that is already in set " + tpath.getKey());
            }
            datafileSizes.put(tpath.getKey(), tpath.getValue());

        }

        tablet.getTabletResources().importedMapFiles();

        tablet.computeNumEntries();
    }

    for (Entry<FileRef, DataFileValue> entry : paths.entrySet()) {
        log.log(TLevel.TABLET_HIST, tablet.getExtent() + " import " + entry.getKey() + " " + entry.getValue());
    }
}

From source file:org.apache.blur.kvs.HdfsKeyValueStore.java

License:Apache License

public void cleanupOldFiles() throws IOException {
    _writeLock.lock();/*from  ww  w  .j av a2 s .  c o m*/
    try {
        if (!isOpenForWriting()) {
            return;
        }
        SortedSet<FileStatus> fileStatusSet = getSortedSet(_path);
        if (fileStatusSet == null || fileStatusSet.size() < 1) {
            return;
        }
        Path newestGen = fileStatusSet.last().getPath();
        if (!newestGen.equals(_outputPath)) {
            throw new IOException("No longer the owner of [" + _path + "]");
        }
        Set<Path> existingFiles = new HashSet<Path>();
        for (FileStatus fileStatus : fileStatusSet) {
            existingFiles.add(fileStatus.getPath());
        }
        Set<Entry<BytesRef, Value>> entrySet = _pointers.entrySet();
        existingFiles.remove(_outputPath);
        for (Entry<BytesRef, Value> e : entrySet) {
            Path p = e.getValue()._path;
            existingFiles.remove(p);
        }
        for (Path p : existingFiles) {
            LOG.info("Removing file no longer referenced [{0}]", p);
            _fileSystem.delete(p, false);
        }
    } finally {
        _writeLock.unlock();
    }
}

From source file:org.apache.blur.mapreduce.lib.BlurInputFormat.java

License:Apache License

public static Text getTableFromPath(Configuration configuration, Path path) throws IOException {
    for (Entry<String, String> e : configuration) {
        if (e.getKey().startsWith(BLUR_TABLE_PATH_MAPPING)) {
            String k = e.getKey();
            String table = k.substring(BLUR_TABLE_PATH_MAPPING.length());
            String pathStr = e.getValue();
            Path tablePath = new Path(pathStr);
            if (tablePath.equals(path)) {
                return new Text(table);
            }/*  w w  w.jav a  2s . c  om*/
        }
    }
    throw new IOException("Table name not found for path [" + path + "]");
}

From source file:org.apache.blur.mapreduce.lib.CsvBlurMapper.java

License:Apache License

protected boolean isParent(Path possibleParent, Path child) {
    if (child == null) {
        return false;
    }//from  w w  w  .  ja va  2s.  c  o m
    if (possibleParent.equals(child.getParent())) {
        return true;
    }
    return isParent(possibleParent, child.getParent());
}

From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockDataMap.java

License:Apache License

private boolean validatePartitionInfo(List<PartitionSpec> partitions) {
    // First get the partitions which are stored inside datamap.
    String[] fileDetails = getFileDetails();
    // Check the exact match of partition information inside the stored partitions.
    boolean found = false;
    Path folderPath = new Path(fileDetails[0]);
    for (PartitionSpec spec : partitions) {
        if (folderPath.equals(spec.getLocation()) && isCorrectUUID(fileDetails, spec)) {
            found = true;//from   ww  w .  j ava  2 s . co m
            break;
        }
    }
    return found;
}

From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap.java

License:Apache License

@Override
public List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties,
        List<PartitionSpec> partitions) {
    if (memoryDMStore.getRowCount() == 0) {
        return new ArrayList<>();
    }//from ww w. ja  va2 s . c o m
    // if it has partitioned datamap but there is no partitioned information stored, it means
    // partitions are dropped so return empty list.
    if (partitions != null) {
        // First get the partitions which are stored inside datamap.
        String[] fileDetails = getFileDetails();
        // Check the exact match of partition information inside the stored partitions.
        boolean found = false;
        Path folderPath = new Path(fileDetails[0]);
        for (PartitionSpec spec : partitions) {
            if (folderPath.equals(spec.getLocation()) && isCorrectUUID(fileDetails, spec)) {
                found = true;
                break;
            }
        }
        if (!found) {
            return new ArrayList<>();
        }
    }
    // Prune with filters if the partitions are existed in this datamap
    // changed segmentProperties to this.segmentProperties to make sure the pruning with its own
    // segmentProperties.
    // Its a temporary fix. The Interface DataMap.prune(FilterResolverIntf filterExp,
    // SegmentProperties segmentProperties, List<PartitionSpec> partitions) should be corrected
    return prune(filterExp, this.segmentProperties);
}

From source file:org.apache.carbondata.core.metadata.SegmentFileStore.java

License:Apache License

/**
 * Drops the partition related files from the segment file of the segment and writes
 * to a new file. First iterator over segment file and check the path it needs to be dropped.
 * And update the status with delete if it found.
 *
 * @param uniqueId//from w  w w  . j  a v  a 2 s . co  m
 * @throws IOException
 */
public void dropPartitions(Segment segment, List<PartitionSpec> partitionSpecs, String uniqueId,
        List<String> toBeDeletedSegments, List<String> toBeUpdatedSegments) throws IOException {
    readSegment(tablePath, segment.getSegmentFileName());
    boolean updateSegment = false;
    for (Map.Entry<String, FolderDetails> entry : segmentFile.getLocationMap().entrySet()) {
        String location = entry.getKey();
        if (entry.getValue().isRelative) {
            location = tablePath + CarbonCommonConstants.FILE_SEPARATOR + location;
        }
        Path path = new Path(location);
        // Update the status to delete if path equals
        if (null != partitionSpecs) {
            for (PartitionSpec spec : partitionSpecs) {
                if (path.equals(spec.getLocation())) {
                    entry.getValue().setStatus(SegmentStatus.MARKED_FOR_DELETE.getMessage());
                    updateSegment = true;
                    break;
                }
            }
        }
    }
    if (updateSegment) {
        String writePath = CarbonTablePath.getSegmentFilesLocation(tablePath);
        writePath = writePath + CarbonCommonConstants.FILE_SEPARATOR
                + SegmentFileStore.genSegmentFileName(segment.getSegmentNo(), String.valueOf(uniqueId))
                + CarbonTablePath.SEGMENT_EXT;
        writeSegmentFile(segmentFile, writePath);
    }
    // Check whether we can completly remove the segment.
    boolean deleteSegment = true;
    for (Map.Entry<String, FolderDetails> entry : segmentFile.getLocationMap().entrySet()) {
        if (entry.getValue().getStatus().equals(SegmentStatus.SUCCESS.getMessage())) {
            deleteSegment = false;
            break;
        }
    }
    if (deleteSegment) {
        toBeDeletedSegments.add(segment.getSegmentNo());
    }
    if (updateSegment) {
        toBeUpdatedSegments.add(segment.getSegmentNo());
    }
}

From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java

License:Apache License

@Override
public FileStatus[] listStatus(Path f) throws IOException {
    Path absolutePath = makeAbsolute(f);
    INode inode = store.retrieveINode(absolutePath);
    if (inode == null) {
        return null;
    }/*  w w  w . j ava2  s . co  m*/
    if (inode.isFile()) {
        return new FileStatus[] { new CassandraFileStatus(f.makeQualified(this), inode) };
    }
    ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
    for (Path p : store.listSubPaths(absolutePath)) {
        // we shouldn't list ourselves
        if (p.equals(f))
            continue;

        try {
            FileStatus stat = getFileStatus(p.makeQualified(this));

            ret.add(stat);
        } catch (FileNotFoundException e) {
            logger.warn("No file found for: " + p);
        }
    }
    return ret.toArray(new FileStatus[0]);
}