Example usage for org.apache.commons.collections4 MapUtils getString

List of usage examples for org.apache.commons.collections4 MapUtils getString

Introduction

In this page you can find the example usage for org.apache.commons.collections4 MapUtils getString.

Prototype

public static <K> String getString(final Map<? super K, ?> map, final K key) 

Source Link

Document

Gets a String from a Map in a null-safe manner.

Usage

From source file:com.crosstreelabs.jaxrs.api.versioned.fixtures.vo.UserV2.java

@Override
public ValueObject consume(final Map data) {
    name = MapUtils.getString(data, "name");
    username = MapUtils.getString(data, "username");
    email = MapUtils.getString(data, "email");
    age = MapUtils.getIntValue(data, "age", 0);
    return this;
}

From source file:io.hakbot.publishers.kennasecurity.KennaSecurityPublisher.java

@Override
public boolean initialize(Job job) {
    super.initialize(job);

    final JsonObject payload = JsonUtil.toJsonObject(getPublisherPayload(job).getContents());
    remoteInstance = instanceMap.get(MapUtils.getString(payload, "instance"));
    if (remoteInstance == null) {
        addProcessingMessage(job, "KennaSecurity instance cannot be found or is not defined.");
        return false;
    }//from   w w  w  . j a v a2 s . co  m
    return true;
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.trace.MsgTraceReporter.java

@Override
public void send(TNTKafkaPInterceptor interceptor, ProducerRecord<Object, Object> producerRecord) {
    if (producerRecord == null) {
        return;//  w w w .ja v a 2 s . com
    }
    if (shouldSendTrace(producerRecord.topic(), true)) {
        try {
            ActivityInfo ai = new ActivityInfo();
            ai.setFieldValue(new ActivityField(StreamFieldType.EventType.name()), OpType.EVENT);
            ai.setFieldValue(new ActivityField(StreamFieldType.EventName.name()), "Kafka_Producer_Send"); // NON-NLS
            ai.setFieldValue(new ActivityField("Partition"), producerRecord.partition()); // NON-NLS
            ai.setFieldValue(new ActivityField("Topic"), producerRecord.topic()); // NON-NLS
            ai.setFieldValue(new ActivityField("Key"), producerRecord.key()); // NON-NLS
            ai.setFieldValue(new ActivityField(StreamFieldType.Message.name()), producerRecord.value());
            ai.setFieldValue(new ActivityField(StreamFieldType.StartTime.name()), producerRecord.timestamp());
            // ai.addCorrelator(producerRecord.topic());

            appendResourceFields(ai, producerRecord.topic(),
                    MapUtils.getString(interceptor.getConfig(), ProducerConfig.CLIENT_ID_CONFIG));

            stream.addInputToBuffer(ai);
        } catch (Exception exc) {
            Utils.logThrowable(LOGGER, OpLevel.ERROR,
                    StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
                    "MsgTraceReporter.send.failed", exc);
        }
    }
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.metrics.MetricsReporter.java

@Override
public void send(TNTKafkaPInterceptor interceptor, ProducerRecord<Object, Object> producerRecord) {
    String topic = producerRecord.topic();
    String clientId = MapUtils.getString(interceptor.getConfig(), ProducerConfig.CLIENT_ID_CONFIG);
    ProducerTopicMetrics topicMetrics = getProducerTopicMetrics(topic, producerRecord.partition(), clientId,
            "send"); // NON-NLS
    long now = System.currentTimeMillis();
    long jitter = now - topicMetrics.lastSend;
    topicMetrics.jitter.update(jitter);/*from   w ww.  j a v  a 2s.co m*/
    topicMetrics.lastSend = now;
    topicMetrics.sendM.mark();
    topicMetrics.sendC.inc();

    topicMetrics.offset.update(-1,
            producerRecord.timestamp() == null ? System.currentTimeMillis() : producerRecord.timestamp());
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.trace.MsgTraceReporter.java

@Override
public void acknowledge(TNTKafkaPInterceptor interceptor, RecordMetadata recordMetadata, Exception e,
        ClusterResource clusterResource) {
    if (recordMetadata == null) {
        return;//from w w  w . ja v  a 2 s  . c  o m
    }
    if (shouldSendTrace(recordMetadata.topic(), false)) {
        try {
            ActivityInfo ai = new ActivityInfo();
            ai.setFieldValue(new ActivityField(StreamFieldType.EventType.name()), OpType.SEND);
            ai.setFieldValue(new ActivityField(StreamFieldType.EventName.name()), "Kafka_Producer_Acknowledge"); // NON-NLS
            ai.setFieldValue(new ActivityField("Offset"), recordMetadata.offset()); // NON-NLS
            ai.setFieldValue(new ActivityField(StreamFieldType.StartTime.name()),
                    TimeUnit.MILLISECONDS.toMicros(recordMetadata.timestamp()));
            ai.setFieldValue(new ActivityField("Checksum"), recordMetadata.checksum()); // NON-NLS
            ai.setFieldValue(new ActivityField("Topic"), recordMetadata.topic()); // NON-NLS
            ai.setFieldValue(new ActivityField("Partition"), recordMetadata.partition()); // NON-NLS
            if (e != null) {
                ai.setFieldValue(new ActivityField(StreamFieldType.Exception.name()),
                        Utils.getExceptionMessages(e));
            }
            if (clusterResource != null) {
                ai.setFieldValue(new ActivityField("ClusterId"), clusterResource.clusterId()); // NON-NLS
            }

            int size = Math.max(recordMetadata.serializedKeySize(), 0)
                    + Math.max(recordMetadata.serializedValueSize(), 0);

            ai.setFieldValue(new ActivityField(StreamFieldType.MsgLength.name()), size);
            ai.setFieldValue(new ActivityField(StreamFieldType.TrackingId.name()),
                    calcSignature(recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()));
            ai.addCorrelator(createCorrelator(recordMetadata.topic(), recordMetadata.offset()));

            appendResourceFields(ai, recordMetadata.topic(),
                    MapUtils.getString(interceptor.getConfig(), ProducerConfig.CLIENT_ID_CONFIG));

            stream.addInputToBuffer(ai);
        } catch (Exception exc) {
            Utils.logThrowable(LOGGER, OpLevel.ERROR,
                    StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
                    "MsgTraceReporter.acknowledge.failed", exc);
        }
    }
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.metrics.MetricsReporter.java

@Override
public void acknowledge(TNTKafkaPInterceptor interceptor, RecordMetadata recordMetadata, Exception e,
        ClusterResource clusterResource) {
    String clientId = MapUtils.getString(interceptor.getConfig(), ProducerConfig.CLIENT_ID_CONFIG);
    ProducerTopicMetrics topicMetrics = getProducerTopicMetrics(recordMetadata.topic(),
            recordMetadata.partition(), clientId, "acknowledge"); // NON-NLS
    if (e != null) {
        topicMetrics.errorMeter.mark();//from w w w  . j a  v a  2s  .c o  m
    }
    topicMetrics.ackM.mark();
    topicMetrics.ackC.inc();

    if (recordMetadata.offset() != -1L) {
        topicMetrics.offset.update(recordMetadata.offset(), recordMetadata.timestamp());
    }
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.metrics.MetricsReporter.java

@Override
public void consume(TNTKafkaCInterceptor interceptor, ConsumerRecords<Object, Object> consumerRecords,
        ClusterResource clusterResource) {
    String clientId = MapUtils.getString(interceptor.getConfig(), ConsumerConfig.CLIENT_ID_CONFIG);
    for (ConsumerRecord<?, ?> record : consumerRecords) {
        ConsumerTopicMetrics topicMetrics = getConsumerTopicMetrics(record.topic(), record.partition(),
                clientId, "consume"); // NON-NLS
        long duration = System.currentTimeMillis() - record.timestamp();
        topicMetrics.latency.update(duration, TimeUnit.MILLISECONDS);
        topicMetrics.keySize.update(record.serializedKeySize());
        topicMetrics.valueSize.update(record.serializedValueSize());
        topicMetrics.consumeMessagesC.inc();
        topicMetrics.consumeM.mark();//from   ww w.jav  a 2s  . co  m
        topicMetrics.consumeC.inc();

        topicMetrics.offset.update(record.offset(), record.timestamp(), System.currentTimeMillis());
    }
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.metrics.MetricsReporter.java

@Override
public void commit(TNTKafkaCInterceptor interceptor, Map<TopicPartition, OffsetAndMetadata> tpomMap) {
    String clientId = MapUtils.getString(interceptor.getConfig(), ConsumerConfig.CLIENT_ID_CONFIG);
    for (Map.Entry<TopicPartition, OffsetAndMetadata> tpom : tpomMap.entrySet()) {
        TopicPartition partition = tpom.getKey();
        ConsumerTopicMetrics topicMetrics = getConsumerTopicMetrics(partition.topic(), partition.partition(),
                clientId, "commit"); // NON-NLS
        topicMetrics.commitC.inc();//from ww  w. j  a v  a 2 s  .  c  om

        topicMetrics.offset.update(tpom.getValue().offset(), -1, System.currentTimeMillis());
    }
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.trace.MsgTraceReporter.java

@Override
public void consume(TNTKafkaCInterceptor interceptor, ConsumerRecords<Object, Object> consumerRecords,
        ClusterResource clusterResource) {
    if (consumerRecords == null) {
        return;/*  ww  w  .  j  a  v a2  s  .  c  om*/
    }
    String tid = null;
    ActivityInfo ai;
    try {
        ai = new ActivityInfo();
        ai.setFieldValue(new ActivityField(StreamFieldType.EventType.name()), OpType.ACTIVITY);
        ai.setFieldValue(new ActivityField(StreamFieldType.EventName.name()), "Kafka_Consumer_Consume"); // NON-NLS
        ai.setFieldValue(new ActivityField(StreamFieldType.TrackingId.name()),
                DefaultUUIDFactory.getInstance().newUUID());
        stream.addInputToBuffer(ai);

        tid = ai.getTrackingId();
    } catch (Exception exc) {
        Utils.logThrowable(LOGGER, OpLevel.ERROR,
                StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
                "MsgTraceReporter.consume.failed", exc);
    }
    for (ConsumerRecord<Object, Object> cr : consumerRecords) {
        if (cr == null) {
            continue;
        }
        if (shouldSendTrace(cr.topic(), true)) {
            try {
                ai = new ActivityInfo();
                if (tid != null) {
                    ai.setFieldValue(new ActivityField(StreamFieldType.ParentId.name()), tid);
                }
                ai.setFieldValue(new ActivityField(StreamFieldType.EventType.name()), OpType.RECEIVE);
                ai.setFieldValue(new ActivityField(StreamFieldType.EventName.name()),
                        "Kafka_Consumer_Consume_Record"); // NON-NLS
                ai.setFieldValue(new ActivityField("Topic"), cr.topic()); // NON-NLS
                ai.setFieldValue(new ActivityField("Partition"), cr.partition()); // NON-NLS
                ai.setFieldValue(new ActivityField("Offset"), cr.offset()); // NON-NLS
                ai.setFieldValue(new ActivityField(StreamFieldType.StartTime.name()),
                        TimeUnit.MILLISECONDS.toMicros(cr.timestamp()));
                ai.setFieldValue(new ActivityField("TimestampType"), cr.timestampType()); // NON-NLS
                ai.setFieldValue(new ActivityField("Key"), cr.key()); // NON-NLS
                ai.setFieldValue(new ActivityField(StreamFieldType.Message.name()), cr.value());
                ai.setFieldValue(new ActivityField("Checksum"), cr.checksum()); // NON-NLS

                int size = Math.max(cr.serializedKeySize(), 0) + Math.max(cr.serializedValueSize(), 0);
                long latency = System.currentTimeMillis() - cr.timestamp();

                ai.setFieldValue(new ActivityField(StreamFieldType.MsgLength.name()), size);
                ai.setFieldValue(new ActivityField("Latency"), latency); // NON-NLS

                if (clusterResource != null) {
                    ai.setFieldValue(new ActivityField("ClusterId"), clusterResource.clusterId()); // NON-NLS
                }

                ai.setFieldValue(new ActivityField(StreamFieldType.TrackingId.name()),
                        calcSignature(cr.topic(), cr.partition(), cr.offset()));
                ai.addCorrelator(createCorrelator(cr.topic(), cr.offset()));

                appendResourceFields(ai, cr.topic(),
                        MapUtils.getString(interceptor.getConfig(), ConsumerConfig.CLIENT_ID_CONFIG));

                stream.addInputToBuffer(ai);
            } catch (Exception exc) {
                Utils.logThrowable(LOGGER, OpLevel.ERROR,
                        StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
                        "MsgTraceReporter.consume.failed", exc);
            }
        }
    }
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.trace.MsgTraceReporter.java

@Override
public void commit(TNTKafkaCInterceptor interceptor, Map<TopicPartition, OffsetAndMetadata> map) {
    if (map == null) {
        return;// w  ww  . j a  v  a2 s. co  m
    }
    String tid = null;
    ActivityInfo ai;
    try {
        ai = new ActivityInfo();
        ai.setFieldValue(new ActivityField(StreamFieldType.EventType.name()), OpType.ACTIVITY);
        ai.setFieldValue(new ActivityField(StreamFieldType.EventName.name()), "Kafka_Consumer_Commit"); // NON-NLS
        ai.setFieldValue(new ActivityField(StreamFieldType.TrackingId.name()),
                DefaultUUIDFactory.getInstance().newUUID());
        stream.addInputToBuffer(ai);

        tid = ai.getTrackingId();
    } catch (Exception exc) {
        Utils.logThrowable(LOGGER, OpLevel.ERROR,
                StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
                "MsgTraceReporter.commit.failed", exc);
    }
    for (Map.Entry<TopicPartition, OffsetAndMetadata> me : map.entrySet()) {
        if (me == null) {
            continue;
        }
        if (shouldSendTrace(me.getKey().topic(), false)) {
            try {
                ai = new ActivityInfo();
                if (tid != null) {
                    ai.setFieldValue(new ActivityField(StreamFieldType.ParentId.name()), tid);
                }
                ai.setFieldValue(new ActivityField(StreamFieldType.EventType.name()), OpType.EVENT);
                ai.setFieldValue(new ActivityField(StreamFieldType.EventName.name()),
                        "Kafka_Consumer_Commit_Entry"); // NON-NLS
                ai.setFieldValue(new ActivityField("Partition"), me.getKey().partition()); // NON-NLS
                ai.setFieldValue(new ActivityField("Topic"), me.getKey().topic()); // NON-NLS
                ai.setFieldValue(new ActivityField("Offset"), me.getValue().offset()); // NON-NLS
                ai.setFieldValue(new ActivityField("Metadata"), me.getValue().metadata()); // NON-NLS
                ai.setFieldValue(new ActivityField(StreamFieldType.TrackingId.name()),
                        calcSignature(me.getKey().topic(), me.getKey().partition(), me.getValue().offset()));
                ai.addCorrelator(createCorrelator(me.getKey().topic(), me.getValue().offset()));

                appendResourceFields(ai, me.getKey().topic(),
                        MapUtils.getString(interceptor.getConfig(), ConsumerConfig.CLIENT_ID_CONFIG));

                stream.addInputToBuffer(ai);
            } catch (Exception exc) {
                Utils.logThrowable(LOGGER, OpLevel.ERROR,
                        StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
                        "MsgTraceReporter.commit.failed", exc);
            }
        }
    }
}