Example usage for org.apache.hadoop.util ShutdownHookManager get

List of usage examples for org.apache.hadoop.util ShutdownHookManager get

Introduction

In this page you can find the example usage for org.apache.hadoop.util ShutdownHookManager get.

Prototype

@InterfaceAudience.Public
public static ShutdownHookManager get() 

Source Link

Document

Return ShutdownHookManager singleton.

Usage

From source file:co.cask.cdap.app.runtime.spark.SparkRuntimeService.java

License:Apache License

/**
 * Cleanup all shutdown hooks added by Spark and execute them directly.
 * This is needed so that for CDAP standalone, it won't leak memory through shutdown hooks.
 *//* w  w  w .  j av a 2s . com*/
private void cleanupShutdownHooks() {
    // With Hadoop 2, Spark uses the Hadoop ShutdownHookManager
    ShutdownHookManager manager = ShutdownHookManager.get();
    try {
        // Use reflection to get the shutdown hooks
        Method getShutdownHooksInOrder = manager.getClass().getDeclaredMethod("getShutdownHooksInOrder");
        if (!Collection.class.isAssignableFrom(getShutdownHooksInOrder.getReturnType())) {
            LOG.warn("Unsupported method {}. Spark shutdown hooks cleanup skipped.", getShutdownHooksInOrder);
            return;
        }
        getShutdownHooksInOrder.setAccessible(true);

        // Filter out hooks that are defined in the same SparkRunnerClassLoader as this SparkProgramRunner class
        // This is for the case when there are concurrent Spark job running in the same VM
        List<Runnable> hooks = ImmutableList.copyOf(Iterables.filter(
                Iterables.filter((Collection<?>) getShutdownHooksInOrder.invoke(manager), Runnable.class),
                new Predicate<Runnable>() {
                    @Override
                    public boolean apply(Runnable runnable) {
                        return runnable.getClass().getClassLoader() == SparkRuntimeService.this.getClass()
                                .getClassLoader();
                    }
                }));

        for (Runnable hook : hooks) {
            LOG.debug("Running Spark shutdown hook {}", hook);
            hook.run();
            manager.removeShutdownHook(hook);
        }

    } catch (Exception e) {
        LOG.warn("Failed to cleanup Spark shutdown hooks.", e);
    }
}

From source file:co.cask.cdap.logging.framework.distributed.LogSaverTwillRunnable.java

License:Apache License

@Override
protected Injector doInit(TwillContext context) {
    name = context.getSpecification().getName();
    injector = createGuiceInjector(getCConfiguration(), getConfiguration(), context);

    // Register shutdown hook to stop Log Saver before Hadoop Filesystem shuts down
    ShutdownHookManager.get().addShutdownHook(new Runnable() {
        @Override/*from  w w w.  j a v a2s .  co m*/
        public void run() {
            LOG.info("Shutdown hook triggered.");
            stop();
        }
    }, FileSystem.SHUTDOWN_HOOK_PRIORITY + 1);

    return injector;
}

From source file:co.cask.cdap.logging.run.LogSaverTwillRunnable.java

License:Apache License

@Override
public void run() {
    LOG.info("Starting runnable " + name);

    // Register shutdown hook to stop Log Saver before Hadoop Filesystem shuts down
    ShutdownHookManager.get().addShutdownHook(new Runnable() {
        @Override//from   w ww.j  a v  a  2  s  .co  m
        public void run() {
            LOG.info("Shutdown hook triggered.");
            stop();
        }
    }, FileSystem.SHUTDOWN_HOOK_PRIORITY + 1);

    Futures.getUnchecked(Services.chainStart(zkClientService, kafkaClientService, metricsCollectionService,
            logSaverService, logSaverStatusService));
    LOG.info("Runnable started " + name);

    try {
        completion.get();

        LOG.info("Runnable stopped " + name);
    } catch (InterruptedException e) {
        LOG.error("Waiting on completion interrupted", e);
        Thread.currentThread().interrupt();
    } catch (ExecutionException e) {
        // Propagate the execution exception will causes TwillRunnable terminate with error,
        // and AM would detect and restarts it.
        LOG.error("Completed with exception. Exception get propagated", e);
        throw Throwables.propagate(e);
    }
}

From source file:com.cloudera.oryx.lambda.HadoopUtils.java

License:Open Source License

/**
 * Adds a shutdown hook that tries to call {@link Closeable#close()} on the given argument
 * at JVM shutdown. This integrates with Hadoop's {@link ShutdownHookManager} in order to
 * better interact with Spark's usage of the same.
 *
 * @param closeable thing to close// w  w  w. ja va 2 s .com
 */
public static void closeAtShutdown(Closeable closeable) {
    if (SHUTDOWN_HOOK.addCloseable(closeable)) {
        try {
            // Spark uses SHUTDOWN_HOOK_PRIORITY + 30; this tries to execute earlier
            ShutdownHookManager.get().addShutdownHook(SHUTDOWN_HOOK, FileSystem.SHUTDOWN_HOOK_PRIORITY + 40);
        } catch (IllegalStateException ise) {
            log.warn("Can't close {} at shutdown since shutdown is in progress", closeable);
        }
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer.java

License:Apache License

protected void checkForSafeError(Exception e) {
    boolean safeError = ShutdownHookManager.get().isShutdownInProgress();
    if (safeError) {
        // IOException because of closed file system. This happens when member is
        // shutting down
        if (logger.isDebugEnabled())
            logger.debug("IO error caused by filesystem shutdown", e);
        throw new CacheClosedException("IO error caused by filesystem shutdown", e);
    }/*from   www  .  j  av  a 2 s .  c o m*/

    if (isClosed()) {
        //If the hoplog organizer is closed, throw an exception to indicate the 
        //caller should retry on the new primary.
        throw new PrimaryBucketException(e);
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog.java

License:Apache License

private void handleReadIOError(HFileReader hfileReader, IOException e, boolean skipFailIfSafe) {
    if (logger.isDebugEnabled())
        logger.debug("Read IO error", e);
    boolean safeError = ShutdownHookManager.get().isShutdownInProgress();
    if (safeError) {
        // IOException because of closed file system. This happens when member is
        // shutting down
        if (logger.isDebugEnabled())
            logger.debug("IO error caused by filesystem shutdown", e);
        throw new CacheClosedException("IO error caused by filesystem shutdown", e);
    }//w w  w.  ja v a  2s  .c  om

    // expose the error wrapped inside remote exception. Remote exceptions are
    // handled by file system client. So let the caller handle this error
    if (e instanceof RemoteException) {
        e = ((RemoteException) e).unwrapRemoteException();
        throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
    }

    FileSystem currentFs = fsProvider.checkFileSystem();
    if (hfileReader != null && hfileReader.previousFS != currentFs) {
        if (logger.isDebugEnabled()) {
            logger.debug("{}Detected new FS client, closing old reader", logPrefix);
            if (currentFs != null) {
                if (logger.isDebugEnabled())
                    logger.debug("CurrentFs:" + currentFs.getUri() + "-" + currentFs.hashCode(), logPrefix);
            }
            if (hfileReader.previousFS != null) {
                if (logger.isDebugEnabled())
                    logger.debug("OldFs:" + hfileReader.previousFS.getUri() + "-"
                            + hfileReader.previousFS.hashCode() + ", closing old reader", logPrefix);
            }
        }
        try {
            HFileSortedOplog.this.compareAndClose(hfileReader, false);
        } catch (Exception ex) {
            if (logger.isDebugEnabled())
                logger.debug("Failed to close reader", ex);
        }
        if (skipFailIfSafe) {
            if (logger.isDebugEnabled())
                logger.debug("Not faling after io error since FS client changed");
            return;
        }
    }

    // it is not a safe error. let the caller handle it
    throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(path), e);
}

From source file:io.warp10.continuum.geo.GeoDirectory.java

License:Apache License

public GeoDirectory(KeyStore keystore, Properties properties) {

    ///*  w ww. ja v  a  2s .  c  o m*/
    // Check required properties
    //

    for (String required : REQUIRED_PROPERTIES) {
        Preconditions.checkNotNull(properties.getProperty(required), "Missing configuration parameter '%s'.",
                required);
    }

    this.maxPlasmaZnodeSize = Integer
            .parseInt(properties.getProperty(Configuration.GEODIR_ZK_PLASMA_MAXZNODESIZE, "65536"));
    this.plasmaZnodeRoot = properties.getProperty(Configuration.GEODIR_ZK_PLASMA_ZNODE);
    this.plasmaTopic = properties.getProperty(Configuration.GEODIR_KAFKA_DATA_TOPIC);

    this.maxSubsZnodeSize = Integer
            .parseInt(properties.getProperty(Configuration.GEODIR_ZK_SUBS_MAXZNODESIZE, "65536"));
    this.subsZnodeRoot = properties.getProperty(Configuration.GEODIR_ZK_SUBS_ZNODE);
    this.subsTopic = properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_TOPIC);

    this.name = properties.getProperty(Configuration.GEODIR_NAME);

    this.id = properties.getProperty(Configuration.GEODIR_ID);

    this.modulus = Long.parseLong(properties.getProperty(Configuration.GEODIR_MODULUS));
    this.remainder = Long.parseLong(properties.getProperty(Configuration.GEODIR_REMAINDER));
    this.period = Long.parseLong(properties.getProperty(Configuration.GEODIR_PERIOD));

    this.maxcells = Integer.parseInt(properties.getProperty(Configuration.GEODIR_MAXCELLS));

    this.fetchEndpoint = properties.getProperty(Configuration.GEODIR_FETCH_ENDPOINT);

    this.KAFKA_OUT_MAXSIZE = Integer.parseInt(properties.getProperty(Configuration.GEODIR_KAFKA_DATA_MAXSIZE));

    //
    // Create actual index
    //

    long depth = Long.parseLong(properties.getProperty(Configuration.GEODIR_CHUNK_DEPTH));
    int chunks = Integer.parseInt(properties.getProperty(Configuration.GEODIR_CHUNK_COUNT));
    final int resolution = Integer.parseInt(properties.getProperty(Configuration.GEODIR_RESOLUTION));

    this.index = new GeoIndex(resolution, chunks, depth);

    final String dumpPrefix = properties.getProperty(Configuration.GEODIR_DUMP_PREFIX);

    final GeoDirectory self = this;

    if (null != dumpPrefix) {
        File path = new File(dumpPrefix + "." + self.id);
        try {
            this.index.loadLKPIndex(path);
        } catch (IOException ioe) {
            LOG.error("Error while loading LKP '" + this.id + "' from " + path, ioe);
        }

        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                File path = new File(dumpPrefix + "." + self.id);
                try {
                    self.index.dumpLKPIndex(path);
                } catch (IOException ioe) {
                    LOG.error("Error while dumping LKP '" + self.id + "' into " + path);
                }
            }
        });

        //
        // Make sure ShutdownHookManager is initialized, otherwise it will try to
        // register a shutdown hook during the shutdown hook we just registered...
        //

        ShutdownHookManager.get();
    }

    //
    // Create the outbound Kafka producer for subscriptions
    //

    Properties subsProps = new Properties();
    // @see http://kafka.apache.org/documentation.html#producerconfigs
    subsProps.setProperty("metadata.broker.list",
            properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_BROKERLIST));
    if (null != properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_PRODUCER_CLIENTID)) {
        subsProps.setProperty("client.id",
                properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_PRODUCER_CLIENTID));
    }
    subsProps.setProperty("request.required.acks", "-1");
    subsProps.setProperty("producer.type", "sync");
    subsProps.setProperty("serializer.class", "kafka.serializer.DefaultEncoder");
    // We use the default partitioner
    //dataProps.setProperty("partitioner.class", ...);

    ProducerConfig subsConfig = new ProducerConfig(subsProps);
    this.subsProducer = new Producer<byte[], byte[]>(subsConfig);

    //
    // Create the outbound Kafka producer for data
    //

    Properties plasmaProps = new Properties();
    // @see http://kafka.apache.org/documentation.html#producerconfigs
    plasmaProps.setProperty("metadata.broker.list",
            properties.getProperty(Configuration.GEODIR_KAFKA_DATA_BROKERLIST));
    if (null != properties.getProperty(Configuration.GEODIR_KAFKA_DATA_PRODUCER_CLIENTID)) {
        plasmaProps.setProperty("client.id",
                properties.getProperty(Configuration.GEODIR_KAFKA_DATA_PRODUCER_CLIENTID));
    }
    plasmaProps.setProperty("request.required.acks", "-1");
    plasmaProps.setProperty("producer.type", "sync");
    plasmaProps.setProperty("request.required.acks", "-1");
    plasmaProps.setProperty("producer.type", "sync");
    plasmaProps.setProperty("serializer.class", "kafka.serializer.DefaultEncoder");
    plasmaProps.setProperty("partitioner.class", io.warp10.continuum.KafkaPartitioner.class.getName());

    ProducerConfig plasmaConfig = new ProducerConfig(plasmaProps);
    this.plasmaProducer = new Producer<byte[], byte[]>(plasmaConfig);

    //
    // Extract keys
    //

    this.AES_KAFKA_SUBS = keystore.decodeKey(properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_AES));
    this.AES_KAFKA_DATA = keystore.decodeKey(properties.getProperty(Configuration.GEODIR_KAFKA_DATA_AES));
    this.AES_ZK_SUBS = keystore.decodeKey(properties.getProperty(Configuration.GEODIR_ZK_SUBS_AES));
    this.SIPHASH_KAFKA_SUBS = SipHashInline
            .getKey(keystore.decodeKey(properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_MAC)));
    this.SIPHASH_KAFKA_DATA = SipHashInline
            .getKey(keystore.decodeKey(properties.getProperty(Configuration.GEODIR_KAFKA_DATA_MAC)));
    this.SIPHASH_FETCH_PSK = SipHashInline
            .getKey(keystore.decodeKey(properties.getProperty(Configuration.GEODIR_FETCH_PSK)));

    keystore.setKey(KeyStore.SIPHASH_DIRECTORY_PSK,
            keystore.decodeKey(properties.getProperty(Configuration.GEODIR_DIRECTORY_PSK)));

    //
    // Forget master key if it was set
    //

    keystore.forget();

    //
    // Start the various curator frameworks
    //

    this.plasmaCurator = CuratorFrameworkFactory.builder().connectionTimeoutMs(1000)
            .retryPolicy(new RetryNTimes(10, 500))
            .connectString(properties.getProperty(io.warp10.continuum.Configuration.GEODIR_ZK_PLASMA_QUORUM))
            .build();
    plasmaCurator.start();

    this.subsCurator = CuratorFrameworkFactory.builder().connectionTimeoutMs(1000)
            .retryPolicy(new RetryNTimes(10, 500))
            .connectString(properties.getProperty(io.warp10.continuum.Configuration.GEODIR_ZK_SUBS_QUORUM))
            .build();
    this.subsCurator.start();

    this.serviceCurator = CuratorFrameworkFactory.builder().connectionTimeoutMs(1000)
            .retryPolicy(new RetryNTimes(10, 500))
            .connectString(properties.getProperty(io.warp10.continuum.Configuration.GEODIR_ZK_SERVICE_QUORUM))
            .build();
    this.serviceCurator.start();

    //
    // Create ThreadDirectoryClient
    //

    try {
        //
        // Copy configuration from GeoDir to Directory.
        // We can do that as we were passed a clone of 'properties'
        //

        properties.setProperty(Configuration.DIRECTORY_PSK,
                properties.getProperty(Configuration.GEODIR_DIRECTORY_PSK));
        properties.setProperty(Configuration.DIRECTORY_ZK_ZNODE,
                properties.getProperty(Configuration.GEODIR_ZK_DIRECTORY_ZNODE));
        this.directoryClient = new ThriftDirectoryClient(keystore, properties);
    } catch (Exception e) {
        throw new RuntimeException("Unable to start GeoDirectory", e);
    }

    //
    // Load the known subscriptions for this GeoDir
    //

    zkLoad();

    //
    // Initialize Kafka Consumer Pools
    //

    //
    // GeoDir subscriptions
    //

    ConsumerFactory subsConsumerFactory = new SubsConsumerFactory(this);

    KafkaSynchronizedConsumerPool pool = new KafkaSynchronizedConsumerPool(
            properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_ZKCONNECT),
            properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_TOPIC),
            properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_CONSUMER_CLIENTID),
            properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_GROUPID),
            properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY),
            Integer.parseInt(properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_NTHREADS)),
            Long.parseLong(properties.getProperty(Configuration.GEODIR_KAFKA_SUBS_COMMITPERIOD)),
            subsConsumerFactory);

    pool.setAbortHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_GEODIR_SUBS_ABORTS, Sensision.EMPTY_LABELS, 1);
        }
    });

    pool.setPreCommitOffsetHook(new Hook() {
        @Override
        public void call() {
            //
            // Store updated selectors to ZooKeeper
            //
            zkStore();
        }
    });

    pool.setCommitOffsetHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_GEODIR_SUBS_KAFKA_COMMITS,
                    Sensision.EMPTY_LABELS, 1);
        }
    });

    pool.setSyncHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_GEODIR_SUBS_SYNCS, Sensision.EMPTY_LABELS, 1);
        }
    });

    //
    // Plasma data feed
    //

    ConsumerFactory dataConsumerFactory = new DataConsumerFactory(this);

    KafkaSynchronizedConsumerPool datapool = new KafkaSynchronizedConsumerPool(
            properties.getProperty(Configuration.GEODIR_KAFKA_DATA_ZKCONNECT),
            properties.getProperty(Configuration.GEODIR_KAFKA_DATA_TOPIC),
            properties.getProperty(Configuration.GEODIR_KAFKA_DATA_CONSUMER_CLIENTID),
            properties.getProperty(Configuration.GEODIR_KAFKA_DATA_GROUPID),
            properties.getProperty(Configuration.GEODIR_KAFKA_DATA_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY),
            Integer.parseInt(properties.getProperty(Configuration.GEODIR_KAFKA_DATA_NTHREADS)),
            Long.parseLong(properties.getProperty(Configuration.GEODIR_KAFKA_DATA_COMMITPERIOD)),
            subsConsumerFactory);

    pool.setAbortHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_GEODIR_DATA_ABORTS, Sensision.EMPTY_LABELS, 1);
        }
    });

    pool.setCommitOffsetHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_GEODIR_DATA_KAFKA_COMMITS,
                    Sensision.EMPTY_LABELS, 1);
        }
    });

    pool.setSyncHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_GEODIR_DATA_SYNCS, Sensision.EMPTY_LABELS, 1);
        }
    });

    //
    // Start Thread, this will trigger an initial subscription update
    //

    Thread t = new Thread(this);
    t.setDaemon(true);
    t.setName("Warp GeoDirectory");
    t.start();

    startThrift(properties);

    //
    // Start Jetty server
    //

    Server server = new Server();

    ServerConnector connector = new ServerConnector(server,
            Integer.parseInt(properties.getProperty(Configuration.GEODIR_ACCEPTORS)),
            Integer.parseInt(properties.getProperty(Configuration.GEODIR_SELECTORS)));
    connector.setIdleTimeout(Long.parseLong(properties.getProperty(Configuration.GEODIR_IDLE_TIMEOUT)));
    connector.setPort(Integer.parseInt(properties.getProperty(Configuration.GEODIR_HTTP_PORT)));
    connector.setHost(properties.getProperty(Configuration.GEODIR_HTTP_HOST));
    connector.setName("Warp GeoDir");

    server.setConnectors(new Connector[] { connector });

    server.setHandler(this);

    JettyUtil.setSendServerVersion(server, false);

    try {
        server.start();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    //
    // Start Thrift Server
    //

    //
    // Register service in ZK
    //
}

From source file:io.warp10.continuum.ingress.Ingress.java

License:Apache License

public Ingress(KeyStore keystore, Properties props) {

    ///* www. j a v a2s.  c  o m*/
    // Enable the ThrottlingManager
    //

    ThrottlingManager.enable();

    this.keystore = keystore;
    this.properties = props;

    //
    // Make sure all required configuration is present
    //

    for (String required : REQUIRED_PROPERTIES) {
        Preconditions.checkNotNull(props.getProperty(required), "Missing configuration parameter '%s'.",
                required);
    }

    //
    // Extract parameters from 'props'
    //

    this.doShuffle = "true".equals(props.getProperty(Configuration.INGRESS_DELETE_SHUFFLE));

    this.rejectDelete = "true".equals(props.getProperty(Configuration.INGRESS_DELETE_REJECT));

    if (props.containsKey(Configuration.INGRESS_CACHE_DUMP_PATH)) {
        this.cacheDumpPath = props.getProperty(Configuration.INGRESS_CACHE_DUMP_PATH);
    } else {
        this.cacheDumpPath = null;
    }

    int port = Integer.valueOf(props.getProperty(Configuration.INGRESS_PORT));
    String host = props.getProperty(Configuration.INGRESS_HOST);
    int acceptors = Integer.valueOf(props.getProperty(Configuration.INGRESS_ACCEPTORS));
    int selectors = Integer.valueOf(props.getProperty(Configuration.INGRESS_SELECTORS));
    long idleTimeout = Long.parseLong(props.getProperty(Configuration.INGRESS_IDLE_TIMEOUT));

    if (null != props.getProperty(Configuration.INGRESS_METADATA_CACHE_SIZE)) {
        this.METADATA_CACHE_SIZE = Integer
                .valueOf(props.getProperty(Configuration.INGRESS_METADATA_CACHE_SIZE));
    }

    this.metaTopic = props.getProperty(Configuration.INGRESS_KAFKA_META_TOPIC);

    this.dataTopic = props.getProperty(Configuration.INGRESS_KAFKA_DATA_TOPIC);

    this.DATA_MESSAGES_THRESHOLD = Long.parseLong(props.getProperty(Configuration.INGRESS_KAFKA_DATA_MAXSIZE));
    this.METADATA_MESSAGES_THRESHOLD = Long
            .parseLong(props.getProperty(Configuration.INGRESS_KAFKA_METADATA_MAXSIZE));
    this.maxValueSize = Long.parseLong(props.getProperty(Configuration.INGRESS_VALUE_MAXSIZE));

    extractKeys(this.keystore, props);

    this.classKey = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_CLASS));
    this.labelsKey = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_LABELS));

    this.AES_KAFKA_META = this.keystore.getKey(KeyStore.AES_KAFKA_METADATA);
    this.SIPHASH_KAFKA_META = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_KAFKA_METADATA));

    this.aesDataKey = this.keystore.getKey(KeyStore.AES_KAFKA_DATA);
    this.siphashDataKey = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_KAFKA_DATA));

    this.sendMetadataOnDelete = Boolean
            .parseBoolean(props.getProperty(Configuration.INGRESS_DELETE_METADATA_INCLUDE, "false"));

    //
    // Prepare meta, data and delete producers
    //

    Properties metaProps = new Properties();
    // @see http://kafka.apache.org/documentation.html#producerconfigs
    metaProps.setProperty("metadata.broker.list",
            props.getProperty(Configuration.INGRESS_KAFKA_META_BROKERLIST));
    if (null != props.getProperty(Configuration.INGRESS_KAFKA_META_PRODUCER_CLIENTID)) {
        metaProps.setProperty("client.id",
                props.getProperty(Configuration.INGRESS_KAFKA_META_PRODUCER_CLIENTID));
    }
    metaProps.setProperty("request.required.acks", "-1");
    // TODO(hbs): when we move to the new KafkaProducer API
    //metaProps.setProperty(org.apache.kafka.clients.producer.ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");
    metaProps.setProperty("producer.type", "sync");
    metaProps.setProperty("serializer.class", "kafka.serializer.DefaultEncoder");
    metaProps.setProperty("partitioner.class", io.warp10.continuum.KafkaPartitioner.class.getName());
    //??? metaProps.setProperty("block.on.buffer.full", "true");

    // FIXME(hbs): compression does not work
    //metaProps.setProperty("compression.codec", "snappy");
    //metaProps.setProperty("client.id","");

    ProducerConfig metaConfig = new ProducerConfig(metaProps);

    this.metaProducerPool = new KafkaProducerPool(metaConfig,
            Integer.parseInt(props.getProperty(Configuration.INGRESS_KAFKA_METADATA_POOLSIZE)),
            SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_METADATA_PRODUCER_POOL_GET,
            SensisionConstants.SENSISION_CLASS_CONTINUUM_INGRESS_KAFKA_METADATA_PRODUCER_WAIT_NANO);

    Properties dataProps = new Properties();
    // @see http://kafka.apache.org/documentation.html#producerconfigs
    dataProps.setProperty("metadata.broker.list",
            props.getProperty(Configuration.INGRESS_KAFKA_DATA_BROKERLIST));
    if (null != props.getProperty(Configuration.INGRESS_KAFKA_DATA_PRODUCER_CLIENTID)) {
        dataProps.setProperty("client.id",
                props.getProperty(Configuration.INGRESS_KAFKA_DATA_PRODUCER_CLIENTID));
    }
    dataProps.setProperty("request.required.acks", "-1");
    dataProps.setProperty("producer.type", "sync");
    dataProps.setProperty("serializer.class", "kafka.serializer.DefaultEncoder");
    dataProps.setProperty("partitioner.class", io.warp10.continuum.KafkaPartitioner.class.getName());
    // TODO(hbs): when we move to the new KafkaProducer API
    //dataProps.setProperty(org.apache.kafka.clients.producer.ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");

    if (null != props.getProperty(Configuration.INGRESS_KAFKA_DATA_REQUEST_TIMEOUT_MS)) {
        dataProps.setProperty("request.timeout.ms",
                props.getProperty(Configuration.INGRESS_KAFKA_DATA_REQUEST_TIMEOUT_MS));
    }

    ///???? dataProps.setProperty("block.on.buffer.full", "true");

    // FIXME(hbs): compression does not work
    //dataProps.setProperty("compression.codec", "snappy");
    //dataProps.setProperty("client.id","");

    ProducerConfig dataConfig = new ProducerConfig(dataProps);

    //this.dataProducer = new Producer<byte[], byte[]>(dataConfig);

    //
    // Allocate producer pool
    //

    this.dataProducers = new Producer[Integer
            .parseInt(props.getProperty(Configuration.INGRESS_KAFKA_DATA_POOLSIZE))];

    for (int i = 0; i < dataProducers.length; i++) {
        this.dataProducers[i] = new Producer<byte[], byte[]>(dataConfig);
    }

    this.dataProducersCurrentPoolSize = this.dataProducers.length;

    //
    // Producer for the Delete topic
    //

    /*
    Properties deleteProps = new Properties();
    // @see http://kafka.apache.org/documentation.html#producerconfigs
    deleteProps.setProperty("metadata.broker.list", props.getProperty(INGRESS_KAFKA_DELETE_BROKERLIST));
    deleteProps.setProperty("request.required.acks", "-1");
    deleteProps.setProperty("producer.type","sync");
    deleteProps.setProperty("serializer.class", "kafka.serializer.DefaultEncoder");
    deleteProps.setProperty("partitioner.class", io.warp10.continuum.KafkaPartitioner.class.getName());
            
    ProducerConfig deleteConfig = new ProducerConfig(deleteProps);
    this.deleteProducer = new Producer<byte[], byte[]>(deleteConfig);
    */

    //
    // Attempt to load the cache file (we do that prior to starting the Kafka consumer)
    //

    loadCache();

    //
    // Create Kafka consumer to handle Metadata deletions
    //

    ConsumerFactory metadataConsumerFactory = new IngressMetadataConsumerFactory(this);

    if (props.containsKey(Configuration.INGRESS_KAFKA_META_GROUPID)) {
        pool = new KafkaSynchronizedConsumerPool(props.getProperty(Configuration.INGRESS_KAFKA_META_ZKCONNECT),
                props.getProperty(Configuration.INGRESS_KAFKA_META_TOPIC),
                props.getProperty(Configuration.INGRESS_KAFKA_META_CONSUMER_CLIENTID),
                props.getProperty(Configuration.INGRESS_KAFKA_META_GROUPID),
                props.getProperty(Configuration.INGRESS_KAFKA_META_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY),
                props.getProperty(Configuration.INGRESS_KAFKA_META_CONSUMER_AUTO_OFFSET_RESET),
                Integer.parseInt(props.getProperty(Configuration.INGRESS_KAFKA_META_NTHREADS)),
                Long.parseLong(props.getProperty(Configuration.INGRESS_KAFKA_META_COMMITPERIOD)),
                metadataConsumerFactory);
    } else {
        pool = null;
    }

    //
    // Initialize ThriftDirectoryService
    //

    try {
        this.directoryClient = new ThriftDirectoryClient(this.keystore, props);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    //
    // Register shutdown hook
    //

    final Ingress self = this;

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            //
            // Make sure the Kakfa consumers are stopped so we don't miss deletions
            // when restarting and using the cache we are about to store
            //

            if (null != self.pool) {
                self.pool.shutdown();

                LOG.info("Waiting for Ingress Kafka consumers to stop.");

                while (!self.pool.isStopped()) {
                    LockSupport.parkNanos(250000000L);
                }

                LOG.info("Kafka consumers stopped, dumping GTS cache");
            }

            self.dumpCache();
        }
    });

    //
    // Make sure ShutdownHookManager is initialized, otherwise it will try to
    // register a shutdown hook during the shutdown hook we just registered...
    //

    ShutdownHookManager.get();

    //
    // Start Jetty server
    //

    int maxThreads = Integer.parseInt(props.getProperty(Configuration.INGRESS_JETTY_THREADPOOL));

    boolean enableStreamUpdate = !("true".equals(props.getProperty(Configuration.WARP_STREAMUPDATE_DISABLE)));

    BlockingArrayQueue<Runnable> queue = null;

    if (props.containsKey(Configuration.INGRESS_JETTY_MAXQUEUESIZE)) {
        int queuesize = Integer.parseInt(props.getProperty(Configuration.INGRESS_JETTY_MAXQUEUESIZE));
        queue = new BlockingArrayQueue<Runnable>(queuesize);
    }

    Server server = new Server(new QueuedThreadPool(maxThreads, 8, (int) idleTimeout, queue));
    ServerConnector connector = new ServerConnector(server, acceptors, selectors);
    connector.setIdleTimeout(idleTimeout);
    connector.setPort(port);
    connector.setHost(host);
    connector.setName("Continuum Ingress");

    server.setConnectors(new Connector[] { connector });

    HandlerList handlers = new HandlerList();

    Handler cors = new CORSHandler();
    handlers.addHandler(cors);

    handlers.addHandler(this);

    if (enableStreamUpdate) {
        IngressStreamUpdateHandler suHandler = new IngressStreamUpdateHandler(this);
        handlers.addHandler(suHandler);
    }

    server.setHandler(handlers);

    JettyUtil.setSendServerVersion(server, false);

    try {
        server.start();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    Thread t = new Thread(this);
    t.setDaemon(true);
    t.setName("Continuum Ingress");
    t.start();
}

From source file:io.warp10.standalone.StandaloneChunkedMemoryStore.java

License:Apache License

public StandaloneChunkedMemoryStore(Properties properties, KeyStore keystore) {
    this.properties = properties;

    this.series = new MapMaker().concurrencyLevel(64).makeMap();

    this.chunkcount = Integer
            .parseInt(properties.getProperty(io.warp10.continuum.Configuration.IN_MEMORY_CHUNK_COUNT, "3"));
    this.chunkspan = Long.parseLong(properties.getProperty(
            io.warp10.continuum.Configuration.IN_MEMORY_CHUNK_LENGTH, Long.toString(Long.MAX_VALUE)));

    this.labelsKeyLongs = SipHashInline.getKey(keystore.getKey(KeyStore.SIPHASH_LABELS));
    this.classKeyLongs = SipHashInline.getKey(keystore.getKey(KeyStore.SIPHASH_CLASS));

    ////w w  w  .j a  v a  2 s.  c  o  m
    // Add a shutdown hook to dump the memory store on exit
    //

    if (null != properties.getProperty(io.warp10.continuum.Configuration.STANDALONE_MEMORY_STORE_DUMP)) {

        final StandaloneChunkedMemoryStore self = this;
        final String path = properties
                .getProperty(io.warp10.continuum.Configuration.STANDALONE_MEMORY_STORE_DUMP);
        Thread dumphook = new Thread() {
            @Override
            public void run() {
                try {
                    self.dump(path);
                } catch (IOException ioe) {
                    ioe.printStackTrace();
                    throw new RuntimeException(ioe);
                }
            }
        };

        Runtime.getRuntime().addShutdownHook(dumphook);

        //
        // Make sure ShutdownHookManager is initialized, otherwise it will try to
        // register a shutdown hook during the shutdown hook we just registered...
        //

        ShutdownHookManager.get();
    }

    this.setDaemon(true);
    this.setName("[StandaloneChunkedMemoryStore Janitor]");
    this.setPriority(Thread.MIN_PRIORITY);
    this.start();
}

From source file:io.warp10.standalone.StandaloneMemoryStore.java

License:Apache License

public StandaloneMemoryStore(KeyStore keystore, long timespan, long highwatermark, long lowwatermark) {
    this.keystore = keystore;
    this.aesKey = this.keystore.getKey(KeyStore.AES_LEVELDB_DATA);
    //this.series = new ConcurrentHashMap<BigInteger,GTSEncoder>();
    this.series = new MapMaker().concurrencyLevel(64).makeMap();
    this.timespan = timespan;
    this.highwatermark = highwatermark;
    this.lowwatermark = lowwatermark;

    ///*w ww.j  a  v a 2  s.co  m*/
    // Add a shutdown hook to dump the memory store on exit
    //

    if (null != WarpConfig.getProperties()
            .getProperty(io.warp10.continuum.Configuration.STANDALONE_MEMORY_STORE_DUMP)) {

        final StandaloneMemoryStore self = this;
        final String path = WarpConfig.getProperties()
                .getProperty(io.warp10.continuum.Configuration.STANDALONE_MEMORY_STORE_DUMP);
        Thread dumphook = new Thread() {
            @Override
            public void run() {
                try {
                    self.dump(path);
                } catch (IOException ioe) {
                    ioe.printStackTrace();
                    throw new RuntimeException(ioe);
                }
            }
        };

        Runtime.getRuntime().addShutdownHook(dumphook);

        //
        // Make sure ShutdownHookManager is initialized, otherwise it will try to
        // register a shutdown hook during the shutdown hook we just registered...
        //

        ShutdownHookManager.get();
    }

    this.setDaemon(true);
    this.setName("[StandaloneMemoryStore Janitor]");
    this.setPriority(Thread.MIN_PRIORITY);
    this.start();
}