Example usage for org.apache.thrift TDeserializer deserialize

List of usage examples for org.apache.thrift TDeserializer deserialize

Introduction

In this page you can find the example usage for org.apache.thrift TDeserializer deserialize.

Prototype

public void deserialize(TBase base, byte[] bytes) throws TException 

Source Link

Document

Deserialize the Thrift object from a byte array.

Usage

From source file:io.warp10.continuum.ingress.IngressMetadataConsumerFactory.java

License:Apache License

@Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool,
        final KafkaStream<byte[], byte[]> stream) {

    return new Runnable() {
        @Override/*ww w  . j  a v  a  2  s .  c o m*/
        public void run() {
            ConsumerIterator<byte[], byte[]> iter = stream.iterator();

            // Iterate on the messages
            TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

            KafkaOffsetCounters counters = pool.getCounters();

            try {
                while (iter.hasNext()) {
                    //
                    // Since the call to 'next' may block, we need to first
                    // check that there is a message available
                    //

                    boolean nonEmpty = iter.nonEmpty();

                    if (nonEmpty) {
                        MessageAndMetadata<byte[], byte[]> msg = iter.next();
                        counters.count(msg.partition(), msg.offset());

                        byte[] data = msg.message();

                        Sensision.update(SensisionConstants.SENSISION_CLASS_WARP_INGRESS_KAFKA_META_IN_MESSAGES,
                                Sensision.EMPTY_LABELS, 1);
                        Sensision.update(SensisionConstants.SENSISION_CLASS_WARP_INGRESS_KAFKA_META_IN_BYTES,
                                Sensision.EMPTY_LABELS, data.length);

                        if (null != ingress.SIPHASH_KAFKA_META) {
                            data = CryptoUtils.removeMAC(ingress.SIPHASH_KAFKA_META, data);
                        }

                        // Skip data whose MAC was not verified successfully
                        if (null == data) {
                            Sensision.update(
                                    SensisionConstants.SENSISION_CLASS_WARP_INGRESS_KAFKA_META_IN_INVALIDMACS,
                                    Sensision.EMPTY_LABELS, 1);
                            continue;
                        }

                        // Unwrap data if need be
                        if (null != ingress.AES_KAFKA_META) {
                            data = CryptoUtils.unwrap(ingress.AES_KAFKA_META, data);
                        }

                        // Skip data that was not unwrapped successfuly
                        if (null == data) {
                            Sensision.update(
                                    SensisionConstants.SENSISION_CLASS_WARP_INGRESS_KAFKA_META_IN_INVALIDCIPHERS,
                                    Sensision.EMPTY_LABELS, 1);
                            continue;
                        }

                        //
                        // Extract Metadata
                        //

                        //
                        // TODO(hbs): We could check that metadata class/labels Id match those of the key, but
                        // since it was wrapped/authenticated, we suppose it's ok.
                        //

                        byte[] clslblsBytes = Arrays.copyOf(data, 16);
                        BigInteger clslblsId = new BigInteger(clslblsBytes);

                        byte[] metadataBytes = Arrays.copyOfRange(data, 16, data.length);

                        Metadata metadata = new Metadata();
                        deserializer.deserialize(metadata, metadataBytes);

                        //
                        // Only handle DELETE and METADATA sources
                        // We treat those two types of updates the same way, by removing the cache entry
                        // for the corresponding Metadata. By doing so we simplify handling
                        //
                        // TODO(hbs): update metadata cache when receiving Metadata from '/meta'?
                        //

                        if (Configuration.INGRESS_METADATA_DELETE_SOURCE.equals(metadata.getSource())) {
                            //
                            // Remove entry from Metadata cache
                            //

                            synchronized (ingress.metadataCache) {
                                ingress.metadataCache.remove(clslblsId);
                            }
                            continue;
                        } else if (Configuration.INGRESS_METADATA_UPDATE_ENDPOINT
                                .equals(metadata.getSource())) {
                            //
                            // //Update cache with new metadata
                            // Remove entry from Metadata cache
                            //

                            //ingress.metadataCache.put(clslblsId, metadata);
                            synchronized (ingress.metadataCache) {
                                ingress.metadataCache.remove(clslblsId);
                            }
                            continue;
                        } else {
                            continue;
                        }
                    }
                }
            } catch (Throwable t) {
                t.printStackTrace(System.err);
            } finally {
                // Set abort to true in case we exit the 'run' method
                pool.getAbort().set(true);
            }
        }
    };
}

From source file:io.warp10.continuum.LogUtil.java

License:Apache License

public static final LoggingEvent unwrapLog(byte[] key, String logmsg) {
    try {//  ww  w .j av  a  2  s .  co  m
        byte[] data = OrderPreservingBase64.decode(logmsg.getBytes(Charsets.US_ASCII));

        if (null == data) {
            return null;
        }

        data = CryptoUtils.unwrap(key, data);

        if (null == data) {
            return null;
        }

        TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
        LoggingEvent event = new LoggingEvent();
        try {
            deserializer.deserialize(event, data);
        } catch (TException te) {
            return null;
        }

        return event;
    } catch (Exception e) {
        return null;
    }
}

From source file:io.warp10.continuum.plasma.PlasmaFrontEnd.java

License:Apache License

public PlasmaFrontEnd(KeyStore keystore, final Properties properties) throws Exception {

    super(keystore, properties, null, false);

    // Extract Directory PSK
    String keyspec = properties.getProperty(Configuration.DIRECTORY_PSK);

    if (null != keyspec) {
        byte[] key = this.keystore.decodeKey(keyspec);
        Preconditions.checkArgument(16 == key.length,
                "Key " + Configuration.DIRECTORY_PSK + " MUST be 128 bits long.");
        this.keystore.setKey(KeyStore.SIPHASH_DIRECTORY_PSK, key);
    }//from w  w w.  jav  a2s  . c  o  m

    //
    // Make sure all required configuration is present
    //

    for (String required : REQUIRED_PROPERTIES) {
        Preconditions.checkNotNull(properties.getProperty(required), "Missing configuration parameter '%s'.",
                required);
    }

    this.znoderoot = properties.getProperty(Configuration.PLASMA_FRONTEND_ZNODE);

    this.maxZnodeSize = Integer.parseInt(properties.getProperty(Configuration.PLASMA_FRONTEND_MAXZNODESIZE));

    // Align maxZnodeSize on 16 bytes boundary
    this.maxZnodeSize = this.maxZnodeSize - (this.maxZnodeSize % 16);

    this.topic = properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_TOPIC);

    this.subscribeDelay = Long.parseLong(properties.getProperty(Configuration.PLASMA_FRONTEND_SUBSCRIBE_DELAY));

    //
    // Extract keys
    //

    if (null != properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_MAC)) {
        keystore.setKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN,
                keystore.decodeKey(properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_MAC)));
    }

    if (null != properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_AES)) {
        keystore.setKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN,
                keystore.decodeKey(properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_AES)));
    }

    //
    // Start Curator Framework for subscriptions
    //

    subscriptionCuratorFramework = CuratorFrameworkFactory.builder().connectionTimeoutMs(5000)
            .retryPolicy(new RetryNTimes(10, 500))
            .connectString(properties.getProperty(Configuration.PLASMA_FRONTEND_ZKCONNECT)).build();
    subscriptionCuratorFramework.start();

    DirectoryClient directoryClient = new ThriftDirectoryClient(this.keystore, properties);

    setDirectoryClient(directoryClient);

    this.setSubscriptionListener(this);

    //
    // Create Kafka consumer pool
    //

    final PlasmaFrontEnd frontend = this;

    ConsumerFactory factory = new ConsumerFactory() {
        @Override
        public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool,
                final KafkaStream<byte[], byte[]> stream) {
            return new Runnable() {
                @Override
                public void run() {
                    ConsumerIterator<byte[], byte[]> iter = stream.iterator();

                    byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
                    byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);

                    // Iterate on the messages
                    TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

                    KafkaOffsetCounters counters = pool.getCounters();

                    // TODO(hbs): allow setting of writeBufferSize

                    try {
                        while (iter.hasNext()) {
                            //
                            // Since the cal to 'next' may block, we need to first
                            // check that there is a message available
                            //

                            boolean nonEmpty = iter.nonEmpty();

                            if (nonEmpty) {
                                MessageAndMetadata<byte[], byte[]> msg = iter.next();
                                counters.count(msg.partition(), msg.offset());

                                byte[] data = msg.message();

                                Sensision.update(
                                        SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES,
                                        Sensision.EMPTY_LABELS, 1);
                                Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES,
                                        Sensision.EMPTY_LABELS, data.length);

                                if (null != sipHashKey) {
                                    data = CryptoUtils.removeMAC(sipHashKey, data);
                                }

                                // Skip data whose MAC was not verified successfully
                                if (null == data) {
                                    Sensision.update(
                                            SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS,
                                            Sensision.EMPTY_LABELS, 1);
                                    continue;
                                }

                                // Unwrap data if need be
                                if (null != aesKey) {
                                    data = CryptoUtils.unwrap(aesKey, data);
                                }

                                // Skip data that was not unwrapped successfuly
                                if (null == data) {
                                    Sensision.update(
                                            SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS,
                                            Sensision.EMPTY_LABELS, 1);
                                    continue;
                                }

                                //
                                // Extract KafkaDataMessage
                                //

                                KafkaDataMessage tmsg = new KafkaDataMessage();
                                deserializer.deserialize(tmsg, data);

                                switch (tmsg.getType()) {
                                case STORE:
                                    GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
                                    encoder.setClassId(tmsg.getClassId());
                                    encoder.setLabelsId(tmsg.getLabelsId());
                                    frontend.dispatch(encoder);
                                    break;
                                case DELETE:
                                case ARCHIVE:
                                    break;
                                default:
                                    throw new RuntimeException("Invalid message type.");
                                }
                            } else {
                                // Sleep a tiny while
                                try {
                                    Thread.sleep(1L);
                                } catch (InterruptedException ie) {
                                }
                            }
                        }
                    } catch (Throwable t) {
                        t.printStackTrace(System.err);
                    } finally {
                        // Set abort to true in case we exit the 'run' method
                        pool.getAbort().set(true);
                    }

                }
            };
        }
    };

    KafkaSynchronizedConsumerPool pool = new KafkaSynchronizedConsumerPool(
            properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_ZKCONNECT),
            properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_TOPIC),
            properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_CONSUMER_CLIENTID),
            properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_GROUPID),
            properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY),
            Integer.parseInt(properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_NTHREADS)),
            Long.parseLong(properties.getProperty(Configuration.PLASMA_FRONTEND_KAFKA_COMMITPERIOD)), factory);

    pool.setAbortHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_ABORTS, Sensision.EMPTY_LABELS,
                    1);
        }
    });

    pool.setCommitOffsetHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_COMMITS,
                    Sensision.EMPTY_LABELS, 1);
        }
    });
    pool.setSyncHook(new Hook() {
        @Override
        public void call() {
            Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_SYNCS, Sensision.EMPTY_LABELS,
                    1);
        }
    });

    //
    // Start Jetty server
    //

    Server server = new Server();

    ServerConnector connector = new ServerConnector(server,
            Integer.parseInt(properties.getProperty(Configuration.PLASMA_FRONTEND_ACCEPTORS)),
            Integer.parseInt(properties.getProperty(Configuration.PLASMA_FRONTEND_SELECTORS)));
    connector
            .setIdleTimeout(Long.parseLong(properties.getProperty(Configuration.PLASMA_FRONTEND_IDLE_TIMEOUT)));
    connector.setPort(Integer.parseInt(properties.getProperty(Configuration.PLASMA_FRONTEND_PORT)));
    connector.setHost(properties.getProperty(Configuration.PLASMA_FRONTEND_HOST));
    connector.setName("Continuum Plasma Front End");

    server.setConnectors(new Connector[] { connector });

    server.setHandler(this);

    JettyUtil.setSendServerVersion(server, false);

    try {
        server.start();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    Thread t = new Thread(this);
    t.setDaemon(true);
    t.setName("[Continuum Plasma Front End]");
    t.start();
}

From source file:io.warp10.continuum.store.Directory.java

License:Apache License

public Directory(KeyStore keystore, final Properties props) throws IOException {
    this.keystore = keystore;

    SIPHASH_CLASS_LONGS = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_CLASS));
    SIPHASH_LABELS_LONGS = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_LABELS));

    this.properties = (Properties) props.clone();

    ///*from  w w  w.  j  ava 2  s  . c  om*/
    // Check mandatory parameters
    //

    for (String required : REQUIRED_PROPERTIES) {
        Preconditions.checkNotNull(properties.getProperty(required), "Missing configuration parameter '%s'.",
                required);
    }

    maxThriftFrameLength = Integer.parseInt(
            this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_FRAME_MAXLEN, "0"));

    maxFindResults = Integer.parseInt(
            this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_FIND_MAXRESULTS, "100000"));

    maxHardFindResults = Integer.parseInt(
            this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_FIND_MAXRESULTS_HARD));

    this.register = "true"
            .equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_REGISTER));
    this.init = "true".equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_INIT));
    this.store = "true".equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STORE));
    this.delete = "true"
            .equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_DELETE));

    //
    // Extract parameters
    //

    if (null != props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_METADATA_CACHE_SIZE)) {
        this.METADATA_CACHE_SIZE = Integer
                .valueOf(props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_METADATA_CACHE_SIZE));
    }

    idleTimeout = Long.parseLong(
            this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_IDLE_TIMEOUT));

    if (properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_STATS_CLASS_MAXCARDINALITY)) {
        this.LIMIT_CLASS_CARDINALITY = Long.parseLong(
                properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STATS_CLASS_MAXCARDINALITY));
    }

    if (properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_STATS_LABELS_MAXCARDINALITY)) {
        this.LIMIT_LABELS_CARDINALITY = Long.parseLong(properties
                .getProperty(io.warp10.continuum.Configuration.DIRECTORY_STATS_LABELS_MAXCARDINALITY));
    }

    this.initNThreads = Integer.parseInt(properties.getProperty(
            io.warp10.continuum.Configuration.DIRECTORY_INIT_NTHREADS, DIRECTORY_INIT_NTHREADS_DEFAULT));

    String partition = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_PARTITION);
    String[] tokens = partition.split(":");
    this.modulus = Integer.parseInt(tokens[0]);
    this.remainder = Integer.parseInt(tokens[1]);

    this.maxage = Long.parseLong(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_MAXAGE));

    final String topic = properties
            .getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_TOPIC);
    final int nthreads = Integer
            .valueOf(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_NTHREADS));

    Configuration conf = new Configuration();
    conf.set("hbase.zookeeper.quorum",
            properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_ZKCONNECT));
    if (!"".equals(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_ZNODE))) {
        conf.set("zookeeper.znode.parent",
                properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_ZNODE));
    }

    if (properties
            .containsKey(io.warp10.continuum.Configuration.DIRECTORY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT)) {
        conf.set("hbase.zookeeper.property.clientPort", properties
                .getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
    }

    //
    // Handle additional HBase configurations
    //

    if (properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_HBASE_CONFIG)) {
        String[] keys = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_CONFIG)
                .split(",");
        for (String key : keys) {
            if (!properties.containsKey("directory." + key.trim())) {
                throw new RuntimeException("Missing declared property 'directory." + key.trim() + "'.");
            }
            conf.set(key, properties.getProperty("directory." + key.trim()));
        }
    }

    this.conn = ConnectionFactory.createConnection(conf);

    this.hbaseTable = TableName
            .valueOf(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_TABLE));
    this.colfam = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_COLFAM)
            .getBytes(Charsets.UTF_8);

    this.serviceNThreads = Integer
            .valueOf(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_SERVICE_NTHREADS));

    //
    // Extract keys
    //

    extractKeys(properties);

    SIPHASH_PSK_LONGS = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_DIRECTORY_PSK));

    //
    // Load Directory plugin
    //

    if (this.properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_PLUGIN_CLASS)) {
        try {
            // Create new classloader with filtering so caller cannot access the warp10 classes, except those needed
            ClassLoader filteringCL = new ClassLoader(this.getClass().getClassLoader()) {
                @Override
                protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
                    if (name.startsWith("io.warp10") && !name.startsWith("io.warp10.warp.sdk.")) {
                        throw new ClassNotFoundException();
                    } else {
                        return this.getParent().loadClass(name);
                    }
                }
            };

            Class pluginClass = Class.forName(
                    (String) properties.get(io.warp10.continuum.Configuration.DIRECTORY_PLUGIN_CLASS), true,
                    filteringCL);
            this.plugin = (DirectoryPlugin) pluginClass.newInstance();

            //
            // Now call the 'init' method of the plugin
            //

            this.plugin.init(new Properties(properties));
        } catch (Exception e) {
            throw new RuntimeException("Unable to instantiate plugin class", e);
        }
    } else {
        this.plugin = null;
    }

    //
    // Create Curator framework and service discovery
    //

    CuratorFramework curatorFramework = CuratorFrameworkFactory.builder().connectionTimeoutMs(1000)
            .retryPolicy(new RetryNTimes(10, 500))
            .connectString(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_ZK_QUORUM))
            .build();
    curatorFramework.start();

    this.sd = ServiceDiscoveryBuilder.builder(Map.class)
            .basePath(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_ZK_ZNODE))
            .client(curatorFramework).build();

    //
    // Launch a Thread which will populate the metadata cache
    // We don't do that in the constructor otherwise it might take too long to return
    //

    final Directory self = this;

    if (this.init) {

        Thread[] initThreads = new Thread[this.initNThreads];
        final AtomicBoolean[] stopMarkers = new AtomicBoolean[this.initNThreads];

        final LinkedBlockingQueue<Result> resultQ = new LinkedBlockingQueue<Result>(initThreads.length * 8192);

        for (int i = 0; i < initThreads.length; i++) {
            stopMarkers[i] = new AtomicBoolean(false);
            final AtomicBoolean stopMe = stopMarkers[i];
            initThreads[i] = new Thread(new Runnable() {
                @Override
                public void run() {
                    AESWrapEngine engine = null;
                    if (null != self.keystore.getKey(KeyStore.AES_HBASE_METADATA)) {
                        engine = new AESWrapEngine();
                        CipherParameters params = new KeyParameter(
                                self.keystore.getKey(KeyStore.AES_HBASE_METADATA));
                        engine.init(false, params);
                    }

                    PKCS7Padding padding = new PKCS7Padding();

                    TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

                    while (!stopMe.get()) {
                        try {

                            Result result = resultQ.poll(100, TimeUnit.MILLISECONDS);

                            if (null == result) {
                                continue;
                            }

                            byte[] value = result.getValue(self.colfam, Constants.EMPTY_COLQ);

                            if (null != engine) {
                                //
                                // Unwrap
                                //

                                byte[] unwrapped = engine.unwrap(value, 0, value.length);

                                //
                                // Unpad
                                //

                                int padcount = padding.padCount(unwrapped);
                                value = Arrays.copyOf(unwrapped, unwrapped.length - padcount);
                            }

                            //
                            // Deserialize
                            //

                            Metadata metadata = new Metadata();
                            deserializer.deserialize(metadata, value);

                            //
                            // Compute classId/labelsId and compare it to the values in the row key
                            //

                            long classId = GTSHelper.classId(self.SIPHASH_CLASS_LONGS, metadata.getName());
                            long labelsId = GTSHelper.labelsId(self.SIPHASH_LABELS_LONGS, metadata.getLabels());

                            //
                            // Recheck labelsid so we don't retain GTS with invalid labelsid in the row key (which may have happened due
                            // to bugs)
                            //

                            int rem = ((int) ((labelsId >>> 56) & 0xffL)) % self.modulus;

                            if (self.remainder != rem) {
                                continue;
                            }

                            ByteBuffer bb = ByteBuffer.wrap(result.getRow()).order(ByteOrder.BIG_ENDIAN);
                            bb.position(1);
                            long hbClassId = bb.getLong();
                            long hbLabelsId = bb.getLong();

                            // If classId/labelsId are incoherent, skip metadata
                            if (classId != hbClassId || labelsId != hbLabelsId) {
                                LOG.warn("Incoherent class/labels Id for " + metadata);
                                continue;
                            }

                            metadata.setClassId(classId);
                            metadata.setLabelsId(labelsId);

                            if (!metadata.isSetAttributes()) {
                                metadata.setAttributes(new HashMap<String, String>());
                            }

                            //
                            // Internalize Strings
                            //

                            GTSHelper.internalizeStrings(metadata);

                            //
                            // Let the DirectoryPlugin handle the Metadata
                            //

                            if (null != plugin) {

                                long nano = 0;

                                try {
                                    GTS gts = new GTS(new UUID(metadata.getClassId(), metadata.getLabelsId()),
                                            metadata.getName(), metadata.getLabels(), metadata.getAttributes());

                                    nano = System.nanoTime();

                                    if (!plugin.store(null, gts)) {
                                        throw new RuntimeException(
                                                "Error storing GTS " + gts + " using external plugin.");
                                    }
                                } finally {
                                    nano = System.nanoTime() - nano;
                                    Sensision.update(
                                            SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_PLUGIN_STORE_CALLS,
                                            Sensision.EMPTY_LABELS, 1);
                                    Sensision.update(
                                            SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_PLUGIN_STORE_TIME_NANOS,
                                            Sensision.EMPTY_LABELS, nano);
                                }
                                continue;
                            }

                            try {
                                metadatasLock.lockInterruptibly();
                                if (!metadatas.containsKey(metadata.getName())) {
                                    metadatas.put(metadata.getName(),
                                            new ConcurrentSkipListMap<Long, Metadata>(ID_COMPARATOR));
                                    classNames.put(classId, metadata.getName());
                                }
                            } finally {
                                if (metadatasLock.isHeldByCurrentThread()) {
                                    metadatasLock.unlock();
                                }
                            }

                            //
                            // Store per owner class name. We use the name since it has been internalized,
                            // therefore we only consume the HashNode and the HashSet overhead
                            //

                            String owner = metadata.getLabels().get(Constants.OWNER_LABEL);

                            synchronized (classesPerOwner) {
                                Set<String> classes = classesPerOwner.get(owner);

                                if (null == classes) {
                                    classes = new ConcurrentSkipListSet<String>();
                                    classesPerOwner.put(owner, classes);
                                }

                                classes.add(metadata.getName());
                            }

                            Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_OWNERS,
                                    Sensision.EMPTY_LABELS, classesPerOwner.size());

                            synchronized (metadatas.get(metadata.getName())) {
                                if (!metadatas.get(metadata.getName()).containsKey(labelsId)) {
                                    metadatas.get(metadata.getName()).put(labelsId, metadata);
                                    continue;
                                } else if (!metadatas.get(metadata.getName()).get(labelsId).getLabels()
                                        .equals(metadata.getLabels())) {
                                    LOG.warn("LabelsId collision under class '" + metadata.getName() + "' "
                                            + metadata.getLabels() + " and "
                                            + metadatas.get(metadata.getName()).get(labelsId).getLabels());
                                    Sensision.update(SensisionConstants.CLASS_WARP_DIRECTORY_LABELS_COLLISIONS,
                                            Sensision.EMPTY_LABELS, 1);
                                }
                            }

                            continue;
                        } catch (InvalidCipherTextException icte) {
                            throw new RuntimeException(icte);
                        } catch (TException te) {
                            throw new RuntimeException(te);
                        } catch (InterruptedException ie) {
                        }
                    }
                }
            });

            initThreads[i].setDaemon(true);
            initThreads[i].setName("[Directory initializer #" + i + "]");
            initThreads[i].start();
        }

        Thread populator = new Thread(new Runnable() {

            @Override
            public void run() {

                long nano = System.nanoTime();

                Table htable = null;

                long count = 0L;

                boolean done = false;

                byte[] lastrow = HBASE_METADATA_KEY_PREFIX;

                while (!done) {
                    try {
                        //
                        // Populate the metadata cache with initial data from HBase
                        //

                        htable = self.conn.getTable(self.hbaseTable);

                        Scan scan = new Scan();
                        scan.setStartRow(lastrow);
                        // FIXME(hbs): we know the prefix is 'M', so we use 'N' as the stoprow
                        scan.setStopRow("N".getBytes(Charsets.UTF_8));
                        scan.addFamily(self.colfam);
                        scan.setCaching(10000);
                        scan.setBatch(10000);
                        scan.setMaxResultSize(1000000L);

                        ResultScanner scanner = htable.getScanner(scan);

                        do {
                            Result result = scanner.next();

                            if (null == result) {
                                done = true;
                                break;
                            }

                            //
                            // FIXME(hbs): this could be done in a filter on the RS side
                            //

                            int r = (((int) result.getRow()[HBASE_METADATA_KEY_PREFIX.length + 8]) & 0xff)
                                    % self.modulus;

                            //byte r = (byte) (result.getRow()[HBASE_METADATA_KEY_PREFIX.length + 8] % self.modulus);

                            // Skip metadata if its modulus is not the one we expect
                            if (self.remainder != r) {
                                continue;
                            }

                            //
                            // Store the current row so we can restart from there if an exception occurs
                            //

                            lastrow = result.getRow();

                            boolean interrupted = true;

                            while (interrupted) {
                                interrupted = false;
                                try {
                                    resultQ.put(result);
                                    count++;
                                    if (0 == count % 1000) {
                                        Sensision.set(
                                                SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_GTS,
                                                Sensision.EMPTY_LABELS, count);
                                    }
                                } catch (InterruptedException ie) {
                                    interrupted = true;
                                }
                            }

                        } while (true);

                    } catch (Exception e) {
                        LOG.error(
                                "Caught exception in scanning loop, will attempt to continue where we stopped",
                                e);
                    } finally {
                        if (null != htable) {
                            try {
                                htable.close();
                            } catch (Exception e) {
                            }
                        }
                        Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_GTS,
                                Sensision.EMPTY_LABELS, count);
                    }
                }

                //
                // Wait until resultQ is empty
                //

                while (!resultQ.isEmpty()) {
                    LockSupport.parkNanos(100000000L);
                }

                //
                // Notify the init threads to stop
                //

                for (int i = 0; i < initNThreads; i++) {
                    stopMarkers[i].set(true);
                }

                self.cachePopulated.set(true);

                nano = System.nanoTime() - nano;

                LOG.info("Loaded " + count + " GTS in " + (nano / 1000000.0D) + " ms");
            }
        });

        populator.setName("Warp Directory Populator");
        populator.setDaemon(true);
        populator.start();
    } else {
        LOG.info("Skipped initialization");
        this.cachePopulated.set(true);
    }

    this.commitPeriod = Long.valueOf(
            properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_COMMITPERIOD));

    this.maxPendingPutsSize = Long.parseLong(properties
            .getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_MAXPENDINGPUTSSIZE));

    this.host = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HOST);
    this.port = Integer.parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_PORT));
    this.streamingport = Integer
            .parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_PORT));
    this.streamingacceptors = Integer
            .parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_ACCEPTORS));
    this.streamingselectors = Integer
            .parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_SELECTORS));

    int streamingMaxThreads = Integer
            .parseInt(props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_THREADPOOL));

    final String groupid = properties
            .getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_GROUPID);

    final KafkaOffsetCounters counters = new KafkaOffsetCounters(topic, groupid, this.commitPeriod * 2);

    Thread t = new Thread(new Runnable() {
        @Override
        public void run() {

            //
            // Wait until directory is fully initialized
            //

            while (!self.fullyInitialized.get()) {
                LockSupport.parkNanos(1000000000L);
            }

            Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_CLASSES,
                    Sensision.EMPTY_LABELS, classNames.size());

            //
            // Enter an endless loop which will spawn 'nthreads' threads
            // each time the Kafka consumer is shut down (which will happen if an error
            // happens while talking to HBase, to get a chance to re-read data from the
            // previous snapshot).
            //

            while (true) {
                try {
                    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();

                    topicCountMap.put(topic, nthreads);

                    Properties props = new Properties();
                    props.setProperty("zookeeper.connect", properties
                            .getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_ZKCONNECT));
                    props.setProperty("group.id", groupid);
                    if (null != properties.getProperty(
                            io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_CLIENTID)) {
                        props.setProperty("client.id", properties.getProperty(
                                io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_CLIENTID));
                    }
                    if (null != properties.getProperty(
                            io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY)) {
                        props.setProperty("partition.assignment.strategy", properties.getProperty(
                                io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY));
                    }
                    props.setProperty("auto.commit.enable", "false");

                    if (null != properties.getProperty(
                            io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_AUTO_OFFSET_RESET)) {
                        props.setProperty("auto.offset.reset", properties.getProperty(
                                io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_AUTO_OFFSET_RESET));
                    }

                    ConsumerConfig config = new ConsumerConfig(props);
                    ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

                    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = connector
                            .createMessageStreams(topicCountMap);

                    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);

                    self.barrier = new CyclicBarrier(streams.size() + 1);

                    ExecutorService executor = Executors.newFixedThreadPool(nthreads);

                    //
                    // now create runnables which will consume messages
                    //

                    // Reset counters
                    counters.reset();

                    for (final KafkaStream<byte[], byte[]> stream : streams) {
                        executor.submit(new DirectoryConsumer(self, stream, counters));
                    }

                    while (!abort.get() && !Thread.currentThread().isInterrupted()) {
                        try {
                            if (streams.size() == barrier.getNumberWaiting()) {
                                //
                                // Check if we should abort, which could happen when
                                // an exception was thrown when flushing the commits just before
                                // entering the barrier
                                //

                                if (abort.get()) {
                                    break;
                                }

                                //
                                // All processing threads are waiting on the barrier, this means we can flush the offsets because
                                // they have all processed data successfully for the given activity period
                                //

                                // Commit offsets
                                connector.commitOffsets(true);
                                counters.commit();

                                counters.sensisionPublish();

                                Sensision.update(
                                        SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_KAFKA_COMMITS,
                                        Sensision.EMPTY_LABELS, 1);

                                // Release the waiting threads
                                try {
                                    barrier.await();
                                } catch (Exception e) {
                                    break;
                                }
                            }
                        } catch (Throwable t) {
                            // We need to catch possible errors in commitOffsets
                            LOG.error("", t);
                            abort.set(true);
                        }

                        LockSupport.parkNanos(1000000L);
                    }

                    //
                    // We exited the loop, this means one of the threads triggered an abort,
                    // we will shut down the executor and shut down the connector to start over.
                    //

                    executor.shutdownNow();
                    connector.shutdown();
                    abort.set(false);
                } catch (Throwable t) {
                    LOG.error("", t);
                } finally {
                    LockSupport.parkNanos(1000000000L);
                }
            }
        }
    });

    t.setName("Warp Directory Spawner");
    t.setDaemon(true);
    t.start();

    t = new Thread(this);
    t.setName("Warp Directory");
    t.setDaemon(true);
    t.start();

    //
    // Start Jetty for the streaming service
    //

    //
    // Start Jetty server for the streaming service
    //

    BlockingArrayQueue<Runnable> queue = null;

    if (props.containsKey(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_MAXQUEUESIZE)) {
        int queuesize = Integer.parseInt(
                props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_MAXQUEUESIZE));
        queue = new BlockingArrayQueue<Runnable>(queuesize);
    }

    Server server = new Server(new QueuedThreadPool(streamingMaxThreads, 8, (int) idleTimeout, queue));

    //ServerConnector connector = new ServerConnector(server, this.streamingacceptors, this.streamingselectors);
    HttpConfiguration config = new HttpConfiguration();
    config.setRequestHeaderSize(DIRECTORY_REQUEST_HEADER_SIZE);
    HttpConnectionFactory factory = new HttpConnectionFactory(config);
    ServerConnector connector = new ServerConnector(server, null, null, null, this.streamingacceptors,
            this.streamingselectors, factory);

    connector.setIdleTimeout(idleTimeout);
    connector.setPort(this.streamingport);
    connector.setHost(host);
    connector.setName("Directory Streaming Service");

    server.setConnectors(new Connector[] { connector });

    server.setHandler(this);

    JettyUtil.setSendServerVersion(server, false);

    //
    // Wait for initialization to be done
    //

    while (!this.fullyInitialized.get()) {
        LockSupport.parkNanos(1000000000L);
    }

    try {
        server.start();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:io.warp10.continuum.store.Directory.java

License:Apache License

void handleStats(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
        throws IOException, ServletException {
    if (!Constants.API_ENDPOINT_DIRECTORY_STATS_INTERNAL.equals(target)) {
        return;/*from   w ww .j a v  a  2  s. c o m*/
    }

    long nano = System.nanoTime();

    baseRequest.setHandled(true);

    //
    // Read DirectoryRequests from stdin
    //

    BufferedReader br = new BufferedReader(request.getReader());

    while (true) {
        String line = br.readLine();

        if (null == line) {
            break;
        }

        byte[] raw = OrderPreservingBase64.decode(line.getBytes(Charsets.US_ASCII));

        // Extract DirectoryStatsRequest
        TDeserializer deser = new TDeserializer(new TCompactProtocol.Factory());
        DirectoryStatsRequest req = new DirectoryStatsRequest();

        try {
            deser.deserialize(req, raw);
            DirectoryStatsResponse resp = stats(req);

            response.setContentType("text/plain");
            OutputStream out = response.getOutputStream();

            TSerializer ser = new TSerializer(new TCompactProtocol.Factory());
            byte[] data = ser.serialize(resp);

            OrderPreservingBase64.encodeToStream(data, out);

            out.write('\r');
            out.write('\n');
        } catch (TException te) {
            throw new IOException(te);
        }
    }
}

From source file:io.warp10.script.functions.ENCODERTO.java

License:Apache License

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {
    Object top = stack.pop();/*from   w  ww.  jav a2 s  . co  m*/

    if (!(top instanceof String) && !(top instanceof byte[]) && !(top instanceof GTSEncoder)) {
        throw new WarpScriptException(getName() + " operates on a string, byte array or encoder.");
    }

    List<Object> elements = new ArrayList<Object>();

    GTSDecoder decoder;

    if (top instanceof GTSEncoder) {
        decoder = ((GTSEncoder) top).getDecoder(true);
    } else {
        try {
            byte[] bytes = top instanceof String
                    ? OrderPreservingBase64.decode(top.toString().getBytes(Charsets.US_ASCII))
                    : (byte[]) top;

            TDeserializer deser = new TDeserializer(new TCompactProtocol.Factory());

            GTSWrapper wrapper = new GTSWrapper();

            deser.deserialize(wrapper, bytes);

            decoder = GTSWrapperHelper.fromGTSWrapperToGTSDecoder(wrapper);

        } catch (TException te) {
            throw new WarpScriptException(getName() + " failed to unwrap encoder.", te);
        }
    }

    while (decoder.next()) {
        List<Object> element = new ArrayList<Object>(5);
        element.add(decoder.getTimestamp());
        long location = decoder.getLocation();
        if (GeoTimeSerie.NO_LOCATION == location) {
            element.add(Double.NaN);
            element.add(Double.NaN);
        } else {
            double[] latlon = GeoXPLib.fromGeoXPPoint(location);
            element.add(latlon[0]);
            element.add(latlon[1]);
        }
        long elevation = decoder.getElevation();
        if (GeoTimeSerie.NO_ELEVATION == elevation) {
            element.add(Double.NaN);
        } else {
            element.add(elevation);
        }
        element.add(decoder.getValue());
        elements.add(element);
    }

    stack.push(decoder.getName());
    stack.push(decoder.getLabels());
    stack.push(decoder.getMetadata().getAttributes());
    stack.push(elements);

    return stack;
}

From source file:io.warp10.script.functions.FETCH.java

License:Apache License

private Map<String, Object> paramsFromMap(WarpScriptStack stack, Map<String, Object> map)
        throws WarpScriptException {
    Map<String, Object> params = new HashMap<String, Object>();

    ///*from   w  w w  .ja  v  a  2 s. c  o m*/
    // Handle the case where a MetaSet was passed as this will
    // modify some other parameters
    //

    MetaSet metaset = null;

    if (map.containsKey(PARAM_METASET)) {

        if (null == AES_METASET) {
            throw new WarpScriptException(getName() + " MetaSet support not available.");
        }

        Object ms = map.get(PARAM_METASET);

        if (!(ms instanceof byte[])) {
            // Decode
            byte[] decoded = OrderPreservingBase64.decode(ms.toString().getBytes(Charsets.US_ASCII));

            // Decrypt
            byte[] decrypted = CryptoUtils.unwrap(AES_METASET, decoded);

            // Decompress

            try {
                ByteArrayOutputStream out = new ByteArrayOutputStream(decrypted.length);
                InputStream in = new GZIPInputStream(new ByteArrayInputStream(decrypted));

                byte[] buf = new byte[1024];

                while (true) {
                    int len = in.read(buf);
                    if (len < 0) {
                        break;
                    }
                    out.write(buf, 0, len);
                }

                in.close();
                out.close();

                ms = out.toByteArray();
            } catch (IOException e) {
                throw new WarpScriptException(getName() + " encountered an invalid MetaSet.");
            }
        }

        metaset = new MetaSet();
        TDeserializer deser = new TDeserializer(new TCompactProtocol.Factory());

        try {
            deser.deserialize(metaset, (byte[]) ms);
        } catch (TException te) {
            throw new WarpScriptException(getName() + " was unable to decode the provided MetaSet.");
        }

        //
        // Check if MetaSet has expired
        //

        if (metaset.getExpiry() < System.currentTimeMillis()) {
            throw new WarpScriptException(getName() + " MetaSet has expired.");
        }

        // Attempt to extract token, this will raise an exception if token has expired or was revoked
        ReadToken rtoken = Tokens.extractReadToken(metaset.getToken());

        params.put(PARAM_METASET, metaset);
        params.put(PARAM_TOKEN, metaset.getToken());
    }

    if (!params.containsKey(PARAM_TOKEN)) {
        if (!map.containsKey(PARAM_TOKEN)) {
            throw new WarpScriptException(getName() + " Missing '" + PARAM_TOKEN + "' parameter");
        }

        params.put(PARAM_TOKEN, map.get(PARAM_TOKEN));
    }

    if (map.containsKey(PARAM_SELECTORS)) {
        Object sels = map.get(PARAM_SELECTORS);
        if (!(sels instanceof List)) {
            throw new WarpScriptException(getName() + " Invalid parameter '" + PARAM_SELECTORS + "'");
        }
        List<Pair<Object, Object>> selectors = new ArrayList<Pair<Object, Object>>();

        for (Object sel : (List) sels) {
            Object[] clslbls = PARSESELECTOR.parse(sel.toString());
            selectors.add(Pair.of(clslbls[0], clslbls[1]));
        }
        params.put(PARAM_SELECTOR_PAIRS, selectors);
    } else if (map.containsKey(PARAM_SELECTOR)) {
        Object[] clslbls = PARSESELECTOR.parse(map.get(PARAM_SELECTOR).toString());
        params.put(PARAM_CLASS, clslbls[0]);
        params.put(PARAM_LABELS, clslbls[1]);
    } else if (map.containsKey(PARAM_CLASS) && map.containsKey(PARAM_LABELS)) {
        params.put(PARAM_CLASS, map.get(PARAM_CLASS));
        params.put(PARAM_LABELS, new HashMap<String, String>((Map<String, String>) map.get(PARAM_LABELS)));
    } else if (!params.containsKey(PARAM_METASET)) {
        throw new WarpScriptException(getName() + " Missing '" + PARAM_SELECTOR + "', '" + PARAM_SELECTORS
                + "' or '" + PARAM_CLASS + "' and '" + PARAM_LABELS + "' parameters.");
    }

    if (!map.containsKey(PARAM_END)) {
        throw new WarpScriptException(getName() + " Missing '" + PARAM_END + "' parameter.");
    }

    if (map.get(PARAM_END) instanceof Long) {
        params.put(PARAM_END, map.get(PARAM_END));
    } else if (map.get(PARAM_END) instanceof String) {
        params.put(PARAM_END,
                fmt.parseDateTime(map.get(PARAM_END).toString()).getMillis() * Constants.TIME_UNITS_PER_MS);
    } else {
        throw new WarpScriptException(getName() + " Invalid format for parameter '" + PARAM_END + "'.");
    }

    if (map.containsKey(PARAM_TIMESPAN)) {
        params.put(PARAM_TIMESPAN, (long) map.get(PARAM_TIMESPAN));
    } else if (map.containsKey(PARAM_COUNT)) {
        params.put(PARAM_COUNT, (long) map.get(PARAM_COUNT));
    } else if (map.containsKey(PARAM_START)) {
        long end = (long) params.get(PARAM_END);
        long start;

        if (map.get(PARAM_START) instanceof Long) {
            start = (long) map.get(PARAM_START);
        } else {
            start = fmt.parseDateTime(map.get(PARAM_END).toString()).getMillis() * Constants.TIME_UNITS_PER_MS;
        }

        long timespan;

        if (start < end) {
            timespan = end - start;
        } else {
            timespan = start - end;
            end = start;
        }

        params.put(PARAM_END, end);
        params.put(PARAM_TIMESPAN, timespan);
    } else {
        throw new WarpScriptException(getName() + " Missing parameter '" + PARAM_TIMESPAN + "' or '"
                + PARAM_COUNT + "' or '" + PARAM_START + "'");
    }

    //
    // Check end/timespan against MetaSet, adjust limits accordingly
    //

    if (null != metaset) {

        long end = (long) params.get(PARAM_END);
        long timespan = params.containsKey(PARAM_TIMESPAN) ? (long) params.get(PARAM_TIMESPAN) : -1;
        long count = params.containsKey(PARAM_COUNT) ? (long) params.get(PARAM_COUNT) : -1;

        if (metaset.isSetMaxduration()) {
            // Force 'end' to 'now'
            params.put(PARAM_END, TimeSource.getTime());

            if (-1 != count && metaset.getMaxduration() >= 0) {
                throw new WarpScriptException(getName() + " MetaSet forbids count based requests.");
            }

            if (-1 != timespan && metaset.getMaxduration() <= 0) {
                throw new WarpScriptException(getName() + " MetaSet forbids duration based requests.");
            }

            if (-1 != count && count > -metaset.getMaxduration()) {
                count = -metaset.getMaxduration();
                params.put(PARAM_COUNT, count);
            }

            if (-1 != timespan && timespan > metaset.getMaxduration()) {
                timespan = metaset.getMaxduration();
                params.put(PARAM_TIMESPAN, timespan);
            }
        }

        if (metaset.isSetNotbefore()) {
            // forbid count based requests
            if (-1 != count) {
                throw new WarpScriptException(getName() + " MetaSet forbids count based requests.");
            }

            if (end < metaset.getNotbefore()) {
                throw new WarpScriptException(
                        getName() + " MetaSet forbids time ranges before " + metaset.getNotbefore());
            }

            // Adjust timespan so maxDuration is respected
            if (timespan > metaset.getMaxduration()) {
                timespan = metaset.getMaxduration();
                params.put(PARAM_TIMESPAN, timespan);
            }
        }

        if (metaset.isSetNotafter() && end >= metaset.getNotafter()) {
            end = metaset.getNotafter();
            params.put(PARAM_END, end);
        }
    }

    if (map.containsKey(PARAM_GEO)) {
        if (!(map.get(PARAM_GEO) instanceof GeoXPShape)) {
            throw new WarpScriptException(getName() + " Invalid '" + PARAM_GEO + "' type.");
        }

        if (!map.containsKey(PARAM_GEODIR)) {
            throw new WarpScriptException(getName() + " Missing '" + PARAM_GEODIR + "' parameter.");
        }

        if (!stack.getGeoDirectoryClient().knowsDirectory(map.get(PARAM_GEODIR).toString())) {
            throw new WarpScriptException(getName() + " Unknwon directory '" + map.get(PARAM_GEODIR)
                    + "' for parameter '" + PARAM_GEODIR + "'.");
        }

        params.put(PARAM_GEODIR, map.get(PARAM_GEODIR));
        params.put(PARAM_GEO, map.get(PARAM_GEO));

        if (map.containsKey(PARAM_GEOOP)) {
            if (PARAM_GEOOP_IN.equals(map.get(PARAM_GEOOP))) {
                params.put(PARAM_GEOOP, PARAM_GEOOP_IN);
            } else if (PARAM_GEOOP_OUT.equals(map.get(PARAM_GEOOP))) {
                params.put(PARAM_GEOOP, PARAM_GEOOP_OUT);
            } else {
                throw new WarpScriptException(getName() + " Invalid value for parameter '" + PARAM_GEOOP + "'");
            }
        } else {
            params.put(PARAM_GEOOP, PARAM_GEOOP_IN);
        }
    }

    if (map.containsKey(PARAM_TYPE)) {
        String type = map.get(PARAM_TYPE).toString();

        if (TYPE.LONG.name().equalsIgnoreCase(type)) {
            params.put(PARAM_TYPE, TYPE.LONG);
        } else if (TYPE.DOUBLE.name().equalsIgnoreCase(type)) {
            params.put(PARAM_TYPE, TYPE.DOUBLE);
        } else if (TYPE.STRING.name().equalsIgnoreCase(type)) {
            params.put(PARAM_TYPE, TYPE.STRING);
        } else if (TYPE.BOOLEAN.name().equalsIgnoreCase(type)) {
            params.put(PARAM_TYPE, TYPE.BOOLEAN);
        } else {
            throw new WarpScriptException(getName() + " Invalid value for parameter '" + PARAM_TYPE + "'.");
        }
    }

    if (map.containsKey(PARAM_TYPEATTR)) {
        if (map.containsKey(PARAM_TYPE)) {
            throw new WarpScriptException(
                    getName() + " Incompatible parameters '" + PARAM_TYPE + "' and '" + PARAM_TYPEATTR + "'.");
        }

        params.put(PARAM_TYPEATTR, map.get(PARAM_TYPEATTR).toString());
    }

    if (map.containsKey(PARAM_EXTRA)) {
        // Check that we are not using a MetaSet
        if (params.containsKey(PARAM_METASET)) {
            throw new WarpScriptException(
                    getName() + " Cannot specify '" + PARAM_EXTRA + "' when '" + PARAM_METASET + "' is used.");
        }

        if (!(map.get(PARAM_EXTRA) instanceof List)) {
            throw new WarpScriptException(getName() + " Invalid type for parameter '" + PARAM_EXTRA + "'.");
        }

        Set<String> extra = new HashSet<String>();

        for (Object o : (List) map.get(PARAM_EXTRA)) {
            if (!(o instanceof String)) {
                throw new WarpScriptException(getName() + " Invalid type for parameter '" + PARAM_EXTRA + "'.");
            }
            extra.add(o.toString());
        }

        params.put(PARAM_EXTRA, extra);
    }

    if (map.containsKey(PARAM_WRITE_TIMESTAMP)) {
        params.put(PARAM_WRITE_TIMESTAMP, Boolean.TRUE.equals(map.get(PARAM_WRITE_TIMESTAMP)));
    }

    return params;
}

From source file:io.warp10.script.functions.GEOUNPACK.java

License:Apache License

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {

    Object o = stack.pop();/*from w  ww  .  ja v a 2 s . co m*/

    if (!(o instanceof String)) {
        throw new WarpScriptException(getName() + " expects a packed shape on top of the stack.");
    }

    byte[] serialized = OrderPreservingBase64.decode(o.toString().getBytes(Charsets.US_ASCII));

    TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

    GTSWrapper wrapper = new GTSWrapper();

    try {
        deserializer.deserialize(wrapper, serialized);
    } catch (TException te) {
        throw new WarpScriptException(te);
    }

    GTSDecoder decoder = GTSWrapperHelper.fromGTSWrapperToGTSDecoder(wrapper);

    long[] cells = new long[(int) wrapper.getCount()];

    int idx = 0;

    while (idx < cells.length && decoder.next()) {
        long cell = decoder.getTimestamp();
        Object value = decoder.getValue();

        if (!Boolean.TRUE.equals(value)) {
            throw new WarpScriptException(getName() + " invalid GeoXPShape.");
        }

        cells[idx++] = cell;
    }

    if (idx != cells.length) {
        throw new WarpScriptException(getName() + " invalid GeoXPShape.");
    }

    GeoXPShape shape = GeoXPLib.fromCells(cells, false);

    stack.push(shape);

    return stack;
}

From source file:io.warp10.script.functions.TOGTS.java

License:Apache License

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {
    Object top = stack.pop();//from w w w. ja  v  a  2 s.c  o m

    if (!(top instanceof String) && !(top instanceof byte[]) && !(top instanceof GTSEncoder)) {
        throw new WarpScriptException(getName() + " operates on a string, byte array or encoder.");
    }

    Map<String, GeoTimeSerie> series = new HashMap<String, GeoTimeSerie>();

    GTSDecoder decoder;

    if (top instanceof GTSEncoder) {
        decoder = ((GTSEncoder) top).getUnsafeDecoder(false);
    } else {
        try {
            byte[] bytes = top instanceof String
                    ? OrderPreservingBase64.decode(top.toString().getBytes(Charsets.US_ASCII))
                    : (byte[]) top;

            TDeserializer deser = new TDeserializer(new TCompactProtocol.Factory());

            GTSWrapper wrapper = new GTSWrapper();

            deser.deserialize(wrapper, bytes);

            decoder = GTSWrapperHelper.fromGTSWrapperToGTSDecoder(wrapper);
        } catch (TException te) {
            throw new WarpScriptException(getName() + " failed to unwrap encoder.", te);
        }
    }

    GeoTimeSerie gts;

    while (decoder.next()) {
        Object value = decoder.getValue();

        String type = "DOUBLE";

        if (value instanceof String) {
            type = "STRING";
        } else if (value instanceof Boolean) {
            type = "BOOLEAN";
        } else if (value instanceof Long) {
            type = "LONG";
        } else {
            type = "DOUBLE";
        }

        gts = series.get(type);
        if (null == gts) {
            gts = new GeoTimeSerie();
            gts.setMetadata(decoder.getMetadata());
            series.put(type, gts);
        }

        GTSHelper.setValue(gts, decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(), value,
                false);
    }

    stack.push(series);

    return stack;
}

From source file:io.warp10.script.functions.UNSECURE.java

License:Apache License

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {

    Object o = stack.pop();//w w  w  .j a  v  a 2  s  .  c  o m

    if (this.checkkey && null == stack.getAttribute(WarpScriptStack.ATTRIBUTE_SECURE_KEY)) {
        throw new WarpScriptException("You need to set the secure key first.");
    }

    if (!(o instanceof String)) {
        throw new WarpScriptException(getName() + " operates on a string.");
    }

    // Retrieve raw bytes
    byte[] raw = OrderPreservingBase64.decode(o.toString().getBytes(Charsets.US_ASCII));

    // Unwrap

    synchronized (SECURE.class) {
        if (null == aesKey) {
            try {
                aesKey = WarpDist.getKeyStore().getKey(KeyStore.AES_SECURESCRIPTS);
            } catch (Throwable t) {
                // Catch NoClassDefFoundError
            }
        }
    }

    if (null == aesKey) {
        throw new WarpScriptException("Missing secure script encryption key.");
    }

    byte[] unwrapped = CryptoUtils.unwrap(aesKey, raw);

    // Deserialize
    TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

    SecureScript sscript = new SecureScript();

    try {
        deserializer.deserialize(sscript, unwrapped);
    } catch (TException te) {
        throw new WarpScriptException("Unable to unsecure script.");
    }

    if (this.checkkey) {
        if (!stack.getAttribute(WarpScriptStack.ATTRIBUTE_SECURE_KEY).toString().equals(sscript.getKey())) {
            throw new WarpScriptException("Invalid secure key.");
        }
    }

    // Decompress script content if needed

    if (sscript.isCompressed()) {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        ByteArrayInputStream bais = new ByteArrayInputStream(sscript.getScript());

        try {
            GZIPInputStream gzipin = new GZIPInputStream(bais);

            byte[] buf = new byte[128];

            while (true) {
                int len = gzipin.read(buf);
                if (len < 0) {
                    break;
                }
                baos.write(buf, 0, len);
            }

            sscript.setCompressed(false);
            sscript.setScript(baos.toByteArray());
        } catch (IOException ioe) {
            throw new WarpScriptException("Unable to unsecure script.");
        }
    }

    // Convert bytes to String
    String script = new String(sscript.getScript(), Charsets.UTF_8);

    stack.push(script);

    return stack;
}