Example usage for org.apache.hadoop.ipc RemoteException getClassName

List of usage examples for org.apache.hadoop.ipc RemoteException getClassName

Introduction

In this page you can find the example usage for org.apache.hadoop.ipc RemoteException getClassName.

Prototype

public String getClassName() 

Source Link

Usage

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
    for (int i = 0; i < 10; i++) {
        try {//  ww w . java  2 s  .c  o  m
            return fs.append(p);
        } catch (RemoteException re) {
            if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
                MiniDFSClusterBridge.getAppendTestUtilLOG().info("Will sleep and retry, i=" + i + ", p=" + p,
                        re);
                Thread.sleep(1000);
            } else
                throw re;
        }
    }
    throw new IOException("Cannot append to " + p);
}

From source file:com.renren.dp.xlog.util.RemoteExceptionHandler.java

License:Apache License

/**
 * Converts org.apache.hadoop.ipc.RemoteException into original exception,
 * if possible. If the original exception is an Error or a RuntimeException,
 * throws the original exception./*from   w w  w  .  j a va  2  s. c  o m*/
 *
 * @param re original exception
 * @return decoded RemoteException if it is an instance of or a subclass of
 *         IOException, or the original RemoteException if it cannot be decoded.
 *
 * @throws IOException indicating a server error ocurred if the decoded
 *         exception is not an IOException. The decoded exception is set as
 *         the cause.
 * @deprecated Use {@link RemoteException#unwrapRemoteException()} instead.
 * In fact we should look into deprecating this whole class - St.Ack 2010929
 */
public static IOException decodeRemoteException(final RemoteException re) throws IOException {
    IOException i = re;

    try {
        Class<?> c = Class.forName(re.getClassName());

        Class<?>[] parameterTypes = { String.class };
        Constructor<?> ctor = c.getConstructor(parameterTypes);

        Object[] arguments = { re.getMessage() };
        Throwable t = (Throwable) ctor.newInstance(arguments);

        if (t instanceof IOException) {
            i = (IOException) t;

        } else {
            i = new IOException("server error");
            i.initCause(t);
            throw i;
        }

    } catch (ClassNotFoundException x) {
        // continue
    } catch (NoSuchMethodException x) {
        // continue
    } catch (IllegalAccessException x) {
        // continue
    } catch (InvocationTargetException x) {
        // continue
    } catch (InstantiationException x) {
        // continue
    }
    return i;
}

From source file:com.uber.hoodie.common.table.log.avro.AvroLogAppender.java

License:Apache License

public AvroLogAppender(HoodieLogAppendConfig config) throws IOException, InterruptedException {
    FileSystem fs = config.getFs();
    this.config = config;
    this.autoFlush = config.isAutoFlush();
    GenericDatumWriter<IndexedRecord> datumWriter = new GenericDatumWriter<>(config.getSchema());
    this.writer = new DataFileWriter<>(datumWriter);
    Path path = config.getLogFile().getPath();

    if (fs.exists(path)) {
        //TODO - check for log corruption and roll over if needed
        log.info(config.getLogFile() + " exists. Appending to existing file");
        // this log path exists, we will append to it
        fs = FileSystem.get(fs.getConf());
        try {/*from w w  w  . j  av  a  2s .c o m*/
            this.output = fs.append(path, config.getBufferSize());
        } catch (RemoteException e) {
            // this happens when either another task executor writing to this file died or data node is going down
            if (e.getClassName().equals(AlreadyBeingCreatedException.class.getName())
                    && fs instanceof DistributedFileSystem) {
                log.warn("Trying to recover log on path " + path);
                if (FSUtils.recoverDFSFileLease((DistributedFileSystem) fs, path)) {
                    log.warn("Recovered lease on path " + path);
                    // try again
                    this.output = fs.append(path, config.getBufferSize());
                } else {
                    log.warn("Failed to recover lease on path " + path);
                    throw new HoodieException(e);
                }
            }
        }
        this.writer.appendTo(new AvroFSInput(FileContext.getFileContext(fs.getConf()), path), output);
        // we always want to flush to disk everytime a avro block is written
        this.writer.setFlushOnEveryBlock(true);
    } else {
        log.info(config.getLogFile() + " does not exist. Create a new file");
        this.output = fs.create(path, false, config.getBufferSize(), config.getReplication(),
                config.getBlockSize(), null);
        this.writer.create(config.getSchema(), output);
        this.writer.setFlushOnEveryBlock(true);
        // We need to close the writer to be able to tell the name node that we created this file
        // this.writer.close();
    }
}

From source file:com.uber.hoodie.common.table.log.HoodieLogFormatWriter.java

License:Apache License

private void handleAppendExceptionOrRecoverLease(Path path, RemoteException e)
        throws IOException, InterruptedException {
    if (e.getMessage().contains(APPEND_UNAVAILABLE_EXCEPTION_MESSAGE)) {
        // This issue happens when all replicas for a file are down and/or being decommissioned.
        // The fs.append() API could append to the last block for a file. If the last block is full, a new block is
        // appended to. In a scenario when a lot of DN's are decommissioned, it can happen that DN's holding all
        // replicas for a block/file are decommissioned together. During this process, all these blocks will start to
        // get replicated to other active DataNodes but this process might take time (can be of the order of few
        // hours). During this time, if a fs.append() API is invoked for a file whose last block is eligible to be
        // appended to, then the NN will throw an exception saying that it couldn't find any active replica with the
        // last block. Find more information here : https://issues.apache.org/jira/browse/HDFS-6325
        log.warn("Failed to open an append stream to the log file. Opening a new log file..", e);
        // Rollover the current log file (since cannot get a stream handle) and create new one
        this.logFile = logFile.rollOver(fs, rolloverLogWriteToken);
        createNewFile();//from ww  w.j  av a 2s . c  o m
    } else if (e.getClassName().contentEquals(AlreadyBeingCreatedException.class.getName())) {
        log.warn("Another task executor writing to the same log file(" + logFile + ". Rolling over");
        // Rollover the current log file (since cannot get a stream handle) and create new one
        this.logFile = logFile.rollOver(fs, rolloverLogWriteToken);
        createNewFile();
    } else if (e.getClassName().contentEquals(RecoveryInProgressException.class.getName())
            && (fs instanceof DistributedFileSystem)) {
        // this happens when either another task executor writing to this file died or
        // data node is going down. Note that we can only try to recover lease for a DistributedFileSystem.
        // ViewFileSystem unfortunately does not support this operation
        log.warn("Trying to recover log on path " + path);
        if (FSUtils.recoverDFSFileLease((DistributedFileSystem) fs, path)) {
            log.warn("Recovered lease on path " + path);
            // try again
            this.output = fs.append(path, bufferSize);
        } else {
            log.warn("Failed to recover lease on path " + path);
            throw new HoodieException(e);
        }
    } else {
        throw new HoodieIOException("Failed to open an append stream ", e);
    }
}

From source file:common.DataNode.java

License:Apache License

/**
 * Main loop for the DataNode.  Runs until shutdown,
 * forever calling remote NameNode functions.
 *///from  www. j a  va  2s.  c  o  m
public void offerService() throws Exception {

    LOG.info("using BLOCKREPORT_INTERVAL of " + blockReportInterval + "msec" + " Initial delay: "
            + initialBlockReportDelay + "msec");

    //
    // Now loop for a long time....
    //
    while (shouldRun) {
        try {
            long startTime = now();

            //
            // Every so often, send heartbeat or block-report
            //

            if (startTime - lastHeartbeat > heartBeatInterval) {
                //
                // All heartbeat messages include following info:
                // -- Datanode name
                // -- data transfer port
                // -- Total capacity
                // -- Bytes remaining
                //
                lastHeartbeat = startTime;
                DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration, data.getCapacity(),
                        data.getDfsUsed(), data.getRemaining(), xmitsInProgress.get(), getXceiverCount());
                myMetrics.heartbeats.inc(now() - startTime);
                //LOG.info("Just sent heartbeat, with name " + localName);
                if (!processCommand(cmds))
                    continue;
            }

            reportReceivedBlocks();

            DatanodeCommand cmd = blockReport();
            processCommand(cmd);

            // start block scanner
            if (blockScanner != null && blockScannerThread == null && upgradeManager.isUpgradeCompleted()) {
                LOG.info("Starting Periodic block scanner.");
                blockScannerThread = new Daemon(blockScanner);
                blockScannerThread.start();
            }

            //
            // There is no work to do;  sleep until hearbeat timer elapses, 
            // or work arrives, and then iterate again.
            //
            long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat);
            synchronized (receivedBlockList) {
                if (waitTime > 0 && receivedBlockList.size() == 0) {
                    try {
                        receivedBlockList.wait(waitTime);
                    } catch (InterruptedException ie) {
                    }
                }
            } // synchronized
        } catch (RemoteException re) {
            String reClass = re.getClassName();
            if (UnregisteredNodeException.class.getName().equals(reClass)
                    || DisallowedDatanodeException.class.getName().equals(reClass)
                    || IncorrectVersionException.class.getName().equals(reClass)) {
                LOG.warn("DataNode is shutting down: " + StringUtils.stringifyException(re));
                shutdown();
                return;
            }
            LOG.warn(StringUtils.stringifyException(re));
        } catch (IOException e) {
            LOG.warn(StringUtils.stringifyException(e));
        }
    } // while (shouldRun)
}

From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java

License:Apache License

static void rethrowRemoteException(RemoteException e, Path p1, Path p2) throws IOException {
    switch (e.getClassName()) {
    case "org.apache.hadoop.fs.PathIsNotEmptyDirectoryException":
        throw new DirectoryNotEmptyException(p1.toString());

    case "org.apache.hadoop.fs.PathExistsException":
    case "org.apache.hadoop.fs.FileAlreadyExistsException":
        throw new FileAlreadyExistsException(Objects.toString(p1), Objects.toString(p2),
                e.getLocalizedMessage());

    case "org.apache.hadoop.fs.PathPermissionException":
    case "org.apache.hadoop.fs.PathAccessDeniedException":
        throw new AccessDeniedException(Objects.toString(p1), Objects.toString(p2), e.getLocalizedMessage());

    case "org.apache.hadoop.fs.ParentNotDirectoryException":
    case "org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException":
    case "org.apache.hadoop.fs.PathIsNotDirectoryException":
        throw new NotDirectoryException(Objects.toString(p1));

    case "org.apache.hadoop.fs.PathIsDirectoryException":
    case "org.apache.hadoop.fs.InvalidPathException":
    case "org.apache.hadoop.fs.PathNotFoundException":
        throw new NoSuchFileException(Objects.toString(p1), Objects.toString(p2), e.getLocalizedMessage());

    case "org.apache.hadoop.fs.UnresolvedLinkException":
        throw new NotLinkException(Objects.toString(p1), Objects.toString(p2), e.getLocalizedMessage());

    case "org.apache.hadoop.fs.PathIOException":
    case "org.apache.hadoop.fs.ChecksumException":
    case "org.apache.hadoop.fs.InvalidRequestException":
    case "org.apache.hadoop.fs.UnsupportedFileSystemException":
    case "org.apache.hadoop.fs.ZeroCopyUnavailableException":

    }//from   w ww.j av a2 s. c  om

    throw new IOException(e.getLocalizedMessage(), e);
}

From source file:io.confluent.connect.hdfs.wal.FSWAL.java

License:Apache License

public void acquireLease() throws ConnectException {
    long sleepIntervalMs = 1000L;
    long MAX_SLEEP_INTERVAL_MS = 16000L;
    while (sleepIntervalMs < MAX_SLEEP_INTERVAL_MS) {
        try {/*w  ww  . j a  va2s  .c o  m*/
            if (writer == null) {
                writer = WALFile.createWriter(conf, Writer.file(new Path(logFile)),
                        Writer.appendIfExists(true));
                log.info("Successfully acquired lease for {}", logFile);
            }
            break;
        } catch (RemoteException e) {
            if (e.getClassName().equals(leaseException)) {
                log.info("Cannot acquire lease on WAL {}", logFile);
                try {
                    Thread.sleep(sleepIntervalMs);
                } catch (InterruptedException ie) {
                    throw new ConnectException(ie);
                }
                sleepIntervalMs = sleepIntervalMs * 2;
            } else {
                throw new ConnectException(e);
            }
        } catch (IOException e) {
            throw new ConnectException("Error creating writer for log file " + logFile, e);
        }
    }
    if (sleepIntervalMs >= MAX_SLEEP_INTERVAL_MS) {
        throw new ConnectException("Cannot acquire lease after timeout, will retry.");
    }
}

From source file:mzb.NameNodeConnector.java

License:Apache License

private OutputStream checkAndMarkRunningBalancer() throws IOException {
    try {/*from   ww  w .j a v  a  2  s  .co m*/
        final DataOutputStream out = fs.create(BALANCER_ID_PATH);
        out.writeBytes(InetAddress.getLocalHost().getHostName());
        out.flush();
        return out;
    } catch (RemoteException e) {
        if (AlreadyBeingCreatedException.class.getName().equals(e.getClassName())) {
            return null;
        } else {
            throw e;
        }
    }
}

From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java

License:Apache License

@org.junit.Test
public void readTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();//  ww w .j a  va 2s .c o m
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));

    // Now try to read the file as "bob" - this should be allowed (by the policy - user)
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Now try to read the file as "alice" - this should be allowed (by the policy - group)
    ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" });
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Now try to read the file as unknown user "eve" - this should not be allowed
    ugi = UserGroupInformation.createUserForTesting("eve", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            try {
                fs.open(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });

    // Now try to read the file as known user "dave" - this should not be allowed, as he doesn't have the correct permissions
    ugi = UserGroupInformation.createUserForTesting("dave", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            try {
                fs.open(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java

License:Apache License

@org.junit.Test
public void writeTest() throws Exception {

    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir2/data-file3");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();// w  ww  .j av a2 s  .c  o  m
    }
    out.close();

    // Now try to write to the file as "bob" - this should be allowed (by the policy - user)
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            fs.append(file);

            fs.close();
            return null;
        }
    });

    // Now try to write to the file as "alice" - this should be allowed (by the policy - group)
    ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" });
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            fs.append(file);

            fs.close();
            return null;
        }
    });

    // Now try to read the file as unknown user "eve" - this should not be allowed
    ugi = UserGroupInformation.createUserForTesting("eve", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });

    // Now try to read the file as known user "dave" - this should not be allowed, as he doesn't have the correct permissions
    ugi = UserGroupInformation.createUserForTesting("dave", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });
}