Example usage for org.apache.hadoop.hdfs.server.namenode INode getId

List of usage examples for org.apache.hadoop.hdfs.server.namenode INode getId

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.server.namenode INode getId.

Prototype

public abstract long getId();

Source Link

Document

Get inode id

Usage

From source file:io.hops.erasure_coding.ErasureCodingManager.java

License:Apache License

private void finalizeEncoding(final String path) {
    LOG.info("Finilizing encoding for " + path);
    try {// w w w.  j  a v a  2 s  .com
        new HopsTransactionalRequestHandler(HDFSOperationType.GET_INODE) {
            private String parityPath;

            @Override
            public void setUp() throws StorageException, IOException {
                super.setUp();
                EncodingStatus status = namesystem.getEncodingStatus(path);
                // TODO How to handle the case that status was not found?
                parityPath = parityFolder + "/" + status.getParityFileName();
            }

            @Override
            public void acquireLock(TransactionLocks locks) throws IOException {
                LockFactory lf = LockFactory.getInstance();
                locks.add(lf.getINodeLock(namesystem.getNameNode(), TransactionLockTypes.INodeLockType.WRITE,
                        TransactionLockTypes.INodeResolveType.PATH, path, parityPath))
                        .add(lf.getEncodingStatusLock(TransactionLockTypes.LockType.WRITE, path));
            }

            @Override
            public Object performTask() throws StorageException, IOException {
                INode sourceInode = namesystem.getINode(path);
                INode parityInode = namesystem.getINode(parityPath);
                // TODO How to make sure that all blocks are available at this very moment?

                if (sourceInode == null) {
                    // TODO The source was deleted. Should probably delete the parity here if existing.
                    return null;
                }

                EncodingStatus encodingStatus = EntityManager.find(EncodingStatus.Finder.ByInodeId,
                        sourceInode.getId());

                if (parityInode == null) {
                    encodingStatus.setStatus(EncodingStatus.Status.ENCODING_FAILED);
                    encodingStatus.setStatusModificationTime(System.currentTimeMillis());
                } else {
                    encodingStatus.setStatus(EncodingStatus.Status.ENCODED);
                    encodingStatus.setStatusModificationTime(System.currentTimeMillis());
                    encodingStatus.setParityInodeId(parityInode.getId());
                    encodingStatus.setParityStatus(EncodingStatus.ParityStatus.HEALTHY);
                    encodingStatus.setParityStatusModificationTime(System.currentTimeMillis());
                }

                EntityManager.update(encodingStatus);
                return null;
            }
        }.handle(this);
    } catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
    }
}

From source file:io.hops.erasure_coding.ErasureCodingManager.java

License:Apache License

private void scheduleEncodings() throws IOException {
    LOG.info("Scheuling encodings.");
    final int limit = activeEncodingLimit - activeEncodings;
    if (limit <= 0) {
        return;//from  ww w  .  j  av  a  2s  .  c  om
    }

    LightWeightRequestHandler findHandler = new LightWeightRequestHandler(
            EncodingStatusOperationType.FIND_REQUESTED_ENCODINGS) {
        @Override
        public Object performTask() throws StorageException, IOException {
            EncodingStatusDataAccess<EncodingStatus> dataAccess = (EncodingStatusDataAccess) HdfsStorageFactory
                    .getDataAccess(EncodingStatusDataAccess.class);
            return dataAccess.findRequestedEncodings(limit);
        }
    };
    Collection<EncodingStatus> requestedEncodings = (Collection<EncodingStatus>) findHandler.handle();
    for (EncodingStatus encodingStatus : requestedEncodings) {
        try {
            LOG.info("Trying to schedule encoding for " + encodingStatus);
            INode iNode = namesystem.findInode(encodingStatus.getInodeId());
            if (iNode == null) {
                LOG.error("findInode returned null for id " + encodingStatus.getInodeId());
                // TODO Should it be marked as deleted?
                continue;
            }
            if (iNode.isUnderConstruction()) {
                // It might still be written to the file
                LOG.info("Still under construction. Encoding not scheduled for " + iNode.getId());
                continue;
            }

            String path = namesystem.getPath(iNode.getId());
            if (iNode == null) {
                continue;
            }

            LOG.info("Schedule encoding for " + path);
            UUID parityFileName = UUID.randomUUID();
            encodingManager.encodeFile(encodingStatus.getEncodingPolicy(), new Path(path),
                    new Path(parityFolder + "/" + parityFileName.toString()));
            namesystem.updateEncodingStatus(path, EncodingStatus.Status.ENCODING_ACTIVE,
                    parityFileName.toString());
            activeEncodings++;
        } catch (IOException e) {
            LOG.error(StringUtils.stringifyException(e));
        }
    }
}

From source file:io.hops.erasure_coding.ErasureCodingManager.java

License:Apache License

private void checkFixedSource(final String path) throws IOException {
    new HopsTransactionalRequestHandler(HDFSOperationType.CHECK_FIXED_SOURCE) {
        @Override// w ww  .ja v  a  2 s.com
        public void acquireLock(TransactionLocks locks) throws IOException {
            LockFactory lf = LockFactory.getInstance();
            locks.add(lf.getINodeLock(namesystem.getNameNode(), TransactionLockTypes.INodeLockType.WRITE,
                    TransactionLockTypes.INodeResolveType.PATH, path))
                    .add(lf.getEncodingStatusLock(TransactionLockTypes.LockType.WRITE, path));
        }

        @Override
        public Object performTask() throws IOException {
            INode targetNode = namesystem.getINode(path);
            EncodingStatus status = EntityManager.find(EncodingStatus.Finder.ByInodeId, targetNode.getId());
            if (status.getLostBlocks() == 0) {
                status.setStatus(EncodingStatus.Status.ENCODED);
            } else {
                status.setStatus(EncodingStatus.Status.REPAIR_REQUESTED);
            }
            status.setStatusModificationTime(System.currentTimeMillis());
            EntityManager.update(status);
            return null;
        }
    }.handle();
}

From source file:io.hops.erasure_coding.ErasureCodingManager.java

License:Apache License

private void checkFixedParity(final String path) throws IOException {
    new HopsTransactionalRequestHandler(HDFSOperationType.CHECK_FIXED_PARITY) {
        @Override/*from  www.j  a  v a2s. co m*/
        public void acquireLock(TransactionLocks locks) throws IOException {
            LockFactory lf = LockFactory.getInstance();
            locks.add(lf.getINodeLock(namesystem.getNameNode(), TransactionLockTypes.INodeLockType.WRITE,
                    TransactionLockTypes.INodeResolveType.PATH, path))
                    .add(lf.getEncodingStatusLock(TransactionLockTypes.LockType.WRITE, path));
        }

        @Override
        public Object performTask() throws IOException {
            INode targetNode = namesystem.getINode(path);
            EncodingStatus status = EntityManager.find(EncodingStatus.Finder.ByParityInodeId,
                    targetNode.getId());
            if (status.getLostParityBlocks() == 0) {
                status.setParityStatus(EncodingStatus.ParityStatus.HEALTHY);
            } else {
                status.setParityStatus(EncodingStatus.ParityStatus.REPAIR_REQUESTED);
            }
            status.setParityStatusModificationTime(System.currentTimeMillis());
            EntityManager.update(status);
            return null;
        }
    }.handle();
}

From source file:io.hops.resolvingcache.InMemoryCache.java

License:Apache License

@Override
protected void setInternal(String path, List<INode> inodes) {
    for (INode iNode : inodes) {
        cache.put(iNode.nameParentKey(), iNode.getId());
    }/*  w w w.  j a  v a  2 s  .c  o m*/
}

From source file:io.hops.resolvingcache.INodeMemcache.java

License:Apache License

static void setInternal(MemcachedClient mc, String KEY_PREFIX, int KEY_EXPIRY, INode inode) {
    mc.set(getKey(KEY_PREFIX, inode), KEY_EXPIRY, inode.getId());
}

From source file:io.hops.TestUtil.java

License:Apache License

/**
 * Get the inodeId for a file.// w  w w .j  a  va 2  s.com
 *
 * @param nameNode the NameNode
 * @param path the path to the file
 * @return the inodeId
 * @throws IOException
 */
public static int getINodeId(final NameNode nameNode, final Path path) throws IOException {
    final String filePath = path.toUri().getPath();
    return (Integer) new HopsTransactionalRequestHandler(HDFSOperationType.TEST) {
        @Override
        public void acquireLock(TransactionLocks locks) throws IOException {
            LockFactory lf = LockFactory.getInstance();
            locks.add(lf.getINodeLock(nameNode, TransactionLockTypes.INodeLockType.READ_COMMITTED,
                    TransactionLockTypes.INodeResolveType.PATH, filePath));
        }

        @Override
        public Object performTask() throws IOException {
            INode targetNode = nameNode.getNamesystem().getINode(filePath);
            return targetNode.getId();
        }
    }.handle();
}

From source file:io.hops.transaction.context.INodeContext.java

License:Apache License

@Override
public void remove(INode iNode) throws TransactionContextException {
    super.remove(iNode);
    inodesNameParentIndex.remove(iNode.nameParentKey());
    log("removed-inode", "id", iNode.getId(), "name", iNode.getLocalName());
}

From source file:io.hops.transaction.context.INodeContext.java

License:Apache License

@Override
public void update(INode iNode) throws TransactionContextException {
    super.update(iNode);
    inodesNameParentIndex.put(iNode.nameParentKey(), iNode);
    log("updated-inode", "id", iNode.getId(), "name", iNode.getLocalName());
}

From source file:io.hops.transaction.context.INodeContext.java

License:Apache License

@Override
public void prepare(TransactionLocks lks) throws TransactionContextException, StorageException {

    // if the list is not empty then check for the lock types
    // lock type is checked after when list length is checked
    // because some times in the tx handler the acquire lock
    // function is empty and in that case tlm will throw
    // null pointer exceptions
    Collection<INode> removed = getRemoved();
    Collection<INode> added = new ArrayList<INode>(getAdded());
    added.addAll(renamedInodes);//from   w  w w.j a  va  2s. c o m
    Collection<INode> modified = getModified();

    if (lks.containsLock(Lock.Type.INode)) {
        BaseINodeLock hlk = (BaseINodeLock) lks.getLock(Lock.Type.INode);
        if (!removed.isEmpty()) {
            for (INode inode : removed) {
                TransactionLockTypes.INodeLockType lock = hlk.getLockedINodeLockType(inode);
                if (lock != null && lock != TransactionLockTypes.INodeLockType.WRITE
                        && lock != TransactionLockTypes.INodeLockType.WRITE_ON_TARGET_AND_PARENT) {
                    throw new LockUpgradeException(
                            "Trying to remove inode id=" + inode.getId() + " acquired lock was " + lock);
                }
            }
        }

        if (!modified.isEmpty()) {
            for (INode inode : modified) {
                TransactionLockTypes.INodeLockType lock = hlk.getLockedINodeLockType(inode);
                if (lock != null && lock != TransactionLockTypes.INodeLockType.WRITE
                        && lock != TransactionLockTypes.INodeLockType.WRITE_ON_TARGET_AND_PARENT) {
                    throw new LockUpgradeException(
                            "Trying to update inode id=" + inode.getId() + " acquired lock was " + lock);
                }
            }
        }
    }

    dataAccess.prepare(removed, added, modified);
}