Example usage for org.apache.hadoop.util Daemon Daemon

List of usage examples for org.apache.hadoop.util Daemon Daemon

Introduction

In this page you can find the example usage for org.apache.hadoop.util Daemon Daemon.

Prototype

public Daemon(Runnable runnable) 

Source Link

Document

Construct a daemon thread.

Usage

From source file:com.cloudera.impala.security.PersistedDelegationTokenSecretManager.java

License:Apache License

@Override
public synchronized void startThreads() throws IOException {
    try {//from w  w  w.j  a v a2s. c om
        // updateCurrentKey needs to be called to initialize the master key
        // (there should be a null check added in the future in rollMasterKey)
        // updateCurrentKey();
        Method m = AbstractDelegationTokenSecretManager.class.getDeclaredMethod("updateCurrentKey");
        m.setAccessible(true);
        m.invoke(this);
    } catch (Exception e) {
        throw new IOException("Failed to initialize master key", e);
    }
    running = true;
    tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
    tokenRemoverThread.start();
}

From source file:com.mellanox.r4h.LeaseRenewer.java

License:Apache License

synchronized void put(final long inodeId, final DFSOutputStream out, final DFSClient dfsc) {
    if (dfsc.isClientRunning()) {
        if (!isRunning() || isRenewerExpired()) {
            // start a new deamon with a new id.
            final int id = ++currentId;
            daemon = new Daemon(new Runnable() {
                @Override/*w  w w .  j a  va 2 s .  c o  m*/
                public void run() {
                    try {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id
                                    + " started");
                        }
                        LeaseRenewer.this.run(id);
                    } catch (InterruptedException e) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug(LeaseRenewer.this.getClass().getSimpleName() + " is interrupted.", e);
                        }
                    } finally {
                        synchronized (LeaseRenewer.this) {
                            Factory.INSTANCE.remove(LeaseRenewer.this);
                        }
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id
                                    + " exited");
                        }
                    }
                }

                @Override
                public String toString() {
                    return String.valueOf(LeaseRenewer.this);
                }
            });
            daemon.start();
        }
        dfsc.putFileBeingWritten(inodeId, out);
        emptyTime = Long.MAX_VALUE;
    }
}

From source file:com.mellanox.r4h.R4HDatanodePlugin.java

License:Apache License

public void start(Object service) {
    LOG.debug("Starting plugin");
    if (!(service instanceof DataNode)) {
        throw new IllegalArgumentException("Unexpected service object type");
    }//from w w w .  ja  v  a2s .c o m
    LOG.debug("Service object is DataNode");
    dn = (DataNode) service;
    dnExposer = new DataNodeBridge(dn);

    try {
        DataNodeBridge dnBridge = new DataNodeBridge(dn);
        dxs = new DataXceiverServer(dnBridge);
    } catch (URISyntaxException e) {
        throw new IllegalArgumentException("Failed to create URI for DataXceriverServer", e);
    }
    daemon = new Daemon(dxs);
    daemon.setName(String.format("DataXceiverServer-JXIO-Listener-%d", daemon.getId()));
    daemon.start();

    LOG.info("Started");
    // JXIO version
    LOG.info("JXIO version :\t" + EventQueueHandler.class.getPackage().getImplementationVersion());
    // R4H version
    LOG.info("R4H verison :\t" + DataXceiver.class.getPackage().getImplementationVersion());
}

From source file:com.mellanox.r4h.R4HDatanodePlugin.java

License:Apache License

/**
 * Stop R4H server/* w  w w .j a  v  a2  s.c  o m*/
 * 
 * @param waitForDaemon
 *            milliseconds to wait for resources to be closed or -1 for infinate wait
 */
public void stop(int waitForDaemon) {
    if (waitForDaemon < -1) {
        throw new IllegalArgumentException(
                "Illegal (begative) number of milliseconds argument to wait for deamon to stop");
    }

    LOG.debug("Stopping R4H Datanode plugin");
    Daemon dm = new Daemon(new Runnable() {

        @Override
        public void run() {
            dxs.stop();
        }
    });
    dm.start();

    try {
        if (waitForDaemon == -1) {
            daemon.join();
        } else {
            daemon.join(waitForDaemon);
        }
    } catch (InterruptedException e) {
        LOG.debug("daemon join interrupted. Exception: " + e.toString());
    }

    if (dm.isAlive()) {
        LOG.error("timeout waiting for R4H plugin to stop");
    } else {
        LOG.info("R4H Datanode plugin stopped");
    }
}

From source file:common.DataNode.java

License:Apache License

/**
 * Main loop for the DataNode.  Runs until shutdown,
 * forever calling remote NameNode functions.
 *//* w  ww . j av  a2 s . c  om*/
public void offerService() throws Exception {

    LOG.info("using BLOCKREPORT_INTERVAL of " + blockReportInterval + "msec" + " Initial delay: "
            + initialBlockReportDelay + "msec");

    //
    // Now loop for a long time....
    //
    while (shouldRun) {
        try {
            long startTime = now();

            //
            // Every so often, send heartbeat or block-report
            //

            if (startTime - lastHeartbeat > heartBeatInterval) {
                //
                // All heartbeat messages include following info:
                // -- Datanode name
                // -- data transfer port
                // -- Total capacity
                // -- Bytes remaining
                //
                lastHeartbeat = startTime;
                DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration, data.getCapacity(),
                        data.getDfsUsed(), data.getRemaining(), xmitsInProgress.get(), getXceiverCount());
                myMetrics.heartbeats.inc(now() - startTime);
                //LOG.info("Just sent heartbeat, with name " + localName);
                if (!processCommand(cmds))
                    continue;
            }

            reportReceivedBlocks();

            DatanodeCommand cmd = blockReport();
            processCommand(cmd);

            // start block scanner
            if (blockScanner != null && blockScannerThread == null && upgradeManager.isUpgradeCompleted()) {
                LOG.info("Starting Periodic block scanner.");
                blockScannerThread = new Daemon(blockScanner);
                blockScannerThread.start();
            }

            //
            // There is no work to do;  sleep until hearbeat timer elapses, 
            // or work arrives, and then iterate again.
            //
            long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat);
            synchronized (receivedBlockList) {
                if (waitTime > 0 && receivedBlockList.size() == 0) {
                    try {
                        receivedBlockList.wait(waitTime);
                    } catch (InterruptedException ie) {
                    }
                }
            } // synchronized
        } catch (RemoteException re) {
            String reClass = re.getClassName();
            if (UnregisteredNodeException.class.getName().equals(reClass)
                    || DisallowedDatanodeException.class.getName().equals(reClass)
                    || IncorrectVersionException.class.getName().equals(reClass)) {
                LOG.warn("DataNode is shutting down: " + StringUtils.stringifyException(re));
                shutdown();
                return;
            }
            LOG.warn(StringUtils.stringifyException(re));
        } catch (IOException e) {
            LOG.warn(StringUtils.stringifyException(e));
        }
    } // while (shouldRun)
}

From source file:common.DataNode.java

License:Apache License

private void transferBlock(Block block, DatanodeInfo xferTargets[]) throws IOException {
    if (!data.isValidBlock(block)) {
        // block does not exist or is under-construction
        String errStr = "Can't send invalid block " + block;
        LOG.info(errStr);//from w w  w .j  a v  a  2s  .  co m
        namenode.errorReport(dnRegistration, DatanodeProtocol.INVALID_BLOCK, errStr);
        return;
    }

    // Check if NN recorded length matches on-disk length 
    long onDiskLength = data.getLength(block);
    if (block.getNumBytes() > onDiskLength) {
        // Shorter on-disk len indicates corruption so report NN the corrupt block
        namenode.reportBadBlocks(new LocatedBlock[] {
                new LocatedBlock(block, new DatanodeInfo[] { new DatanodeInfo(dnRegistration) }) });
        LOG.info("Can't replicate block " + block + " because on-disk length " + onDiskLength
                + " is shorter than NameNode recorded length " + block.getNumBytes());
        return;
    }

    int numTargets = xferTargets.length;
    if (numTargets > 0) {
        if (LOG.isInfoEnabled()) {
            StringBuilder xfersBuilder = new StringBuilder();
            for (int i = 0; i < numTargets; i++) {
                xfersBuilder.append(xferTargets[i].getName());
                xfersBuilder.append(" ");
            }
            LOG.info(dnRegistration + " Starting thread to transfer block " + block + " to " + xfersBuilder);
        }

        new Daemon(new DataTransfer(xferTargets, block, this)).start();
    }
}

From source file:io.fabric8.hadoop.hdfs.SecondaryNameNodeFactory.java

License:Apache License

@Override
protected SecondaryNameNode doCreate(Dictionary properties) throws Exception {
    Configuration conf = new Configuration();
    for (Enumeration e = properties.keys(); e.hasMoreElements();) {
        Object key = e.nextElement();
        Object val = properties.get(key);
        conf.set(key.toString(), val.toString());
    }//from   ww  w.  j a v  a2 s .com
    SecondaryNameNode secondaryNameNode = new SecondaryNameNode(conf);
    new Daemon(secondaryNameNode).start();
    return secondaryNameNode;
}

From source file:io.fabric8.hadoop.mapred.TaskTrackerFactory.java

License:Apache License

@Override
protected TaskTracker doCreate(Dictionary properties) throws Exception {
    JobConf conf = new JobConf();
    for (Enumeration e = properties.keys(); e.hasMoreElements();) {
        Object key = e.nextElement();
        Object val = properties.get(key);
        conf.set(key.toString(), val.toString());
    }/*  www .j  a  va 2 s .c  o m*/
    TaskTracker taskTracker = new TaskTracker(conf);
    new Daemon(taskTracker).start();
    return taskTracker;
}

From source file:mzb.NameNodeConnector.java

License:Apache License

NameNodeConnector(URI nameNodeUri, Configuration conf) throws IOException {
    this.nameNodeUri = nameNodeUri;

    this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri, NamenodeProtocol.class).getProxy();
    this.client = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class).getProxy();
    this.fs = FileSystem.get(nameNodeUri, conf);

    final NamespaceInfo namespaceinfo = namenode.versionRequest();
    this.blockpoolID = namespaceinfo.getBlockPoolID();

    final ExportedBlockKeys keys = namenode.getBlockKeys();
    this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
    if (isBlockTokenEnabled) {
        long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
        long blockTokenLifetime = keys.getTokenLifetime();
        LOG.info(//from w  w w  .j av  a 2s . c  om
                "Block token params received from NN: keyUpdateInterval=" + blockKeyUpdateInterval / (60 * 1000)
                        + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000) + " min(s)");
        String encryptionAlgorithm = conf.get(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
        this.blockTokenSecretManager = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime,
                blockpoolID, encryptionAlgorithm);
        this.blockTokenSecretManager.addKeys(keys);
        /*
         * Balancer should sync its block keys with NN more frequently than NN
         * updates its block keys
         */
        this.keyUpdaterInterval = blockKeyUpdateInterval / 4;
        LOG.info(
                "Balancer will update its block keys every " + keyUpdaterInterval / (60 * 1000) + " minute(s)");
        this.keyupdaterthread = new Daemon(new BlockKeyUpdater());
        this.shouldRun = true;
        this.keyupdaterthread.start();
    }
    this.encryptDataTransfer = fs.getServerDefaults(new Path("/")).getEncryptDataTransfer();
    // Check if there is another balancer running.
    // Exit if there is another one running.
    out = checkAndMarkRunningBalancer();
    if (out == null) {
        throw new IOException("Another balancer is running");
    }
}

From source file:org.apache.oozie.service.JvmPauseMonitorService.java

License:Apache License

@Override
public void init(Services services) throws ServiceException {
    warnThresholdMs = ConfigurationService.getLong(services.getConf(), WARN_THRESHOLD_KEY);
    infoThresholdMs = ConfigurationService.getLong(services.getConf(), INFO_THRESHOLD_KEY);

    instrumentation = services.get(InstrumentationService.class).get();

    Preconditions.checkState(monitorThread == null, "Already started");
    monitorThread = new Daemon(new Monitor());
    monitorThread.start();/*from  w  w  w .j a  v a  2 s  .  c o  m*/
}