Example usage for java.util.concurrent.atomic AtomicLong incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong incrementAndGet.

Prototype

public final long incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.distributedlog.auditor.DLAuditor.java

private long calculateLedgerSpaceUsage(BookKeeperClient bkc, final ExecutorService executorService)
        throws IOException {
    final AtomicLong totalBytes = new AtomicLong(0);
    final AtomicLong totalEntries = new AtomicLong(0);
    final AtomicLong numLedgers = new AtomicLong(0);

    LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get());

    final CompletableFuture<Void> doneFuture = FutureUtils.createFuture();
    final BookKeeper bk = bkc.get();

    BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() {
        @Override//w ww .ja  v  a 2  s .  c o m
        public void process(final Long lid, final AsyncCallback.VoidCallback cb) {
            numLedgers.incrementAndGet();
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    bk.asyncOpenLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32,
                            conf.getBKDigestPW().getBytes(UTF_8),
                            new org.apache.bookkeeper.client.AsyncCallback.OpenCallback() {
                                @Override
                                public void openComplete(int rc, LedgerHandle lh, Object ctx) {
                                    final int cbRc;
                                    if (BKException.Code.OK == rc) {
                                        totalBytes.addAndGet(lh.getLength());
                                        totalEntries.addAndGet(lh.getLastAddConfirmed() + 1);
                                        cbRc = rc;
                                    } else {
                                        cbRc = BKException.Code.ZKException;
                                    }
                                    executorService.submit(new Runnable() {
                                        @Override
                                        public void run() {
                                            cb.processResult(cbRc, null, null);
                                        }
                                    });
                                }
                            }, null);
                }
            });
        }
    };
    AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() {
        @Override
        public void processResult(int rc, String path, Object ctx) {
            if (BKException.Code.OK == rc) {
                doneFuture.complete(null);
            } else {
                doneFuture.completeExceptionally(BKException.create(rc));
            }
        }
    };
    lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException);
    try {
        doneFuture.get();
        logger.info("calculated {} ledgers\n\ttotal bytes = {}\n\ttotal entries = {}",
                new Object[] { numLedgers.get(), totalBytes.get(), totalEntries.get() });
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new DLInterruptedException("Interrupted on calculating ledger space : ", e);
    } catch (ExecutionException e) {
        if (e.getCause() instanceof IOException) {
            throw (IOException) (e.getCause());
        } else {
            throw new IOException("Failed to calculate ledger space : ", e.getCause());
        }
    }
    return totalBytes.get();
}

From source file:org.apache.flume.channel.recoverable.memory.wal.TestWAL.java

@Test
public void testThreadedAppend() throws IOException, InterruptedException {
    int numThreads = 10;
    final CountDownLatch startLatch = new CountDownLatch(numThreads);
    final CountDownLatch stopLatch = new CountDownLatch(numThreads);
    final AtomicLong seqid = new AtomicLong(0);
    final List<String> globalExpected = Collections.synchronizedList(new ArrayList<String>());
    final List<Exception> errors = Collections.synchronizedList(new ArrayList<Exception>());
    for (int i = 0; i < numThreads; i++) {
        final int id = i;
        Thread t = new Thread() {
            @Override// w ww.  j  av a  2s  .  c o  m
            public void run() {
                try {
                    List<String> expected = strings(100);
                    globalExpected.addAll(expected);
                    startLatch.countDown();
                    startLatch.await();
                    // half batch, half do not
                    if (id % 2 == 0) {
                        for (String s : expected) {
                            wal.writeEntry(new WALEntry<Text>(new Text(s), seqid.incrementAndGet()));
                        }
                    } else {
                        List<WALEntry<Text>> batch = Lists.newArrayList();
                        for (String s : expected) {
                            batch.add(new WALEntry<Text>(new Text(s), seqid.incrementAndGet()));
                        }
                        wal.writeEntries(batch);
                    }
                } catch (Exception e) {
                    logger.warn("Error doing appends", e);
                    errors.add(e);
                } finally {
                    stopLatch.countDown();
                }
            }
        };
        t.setDaemon(true);
        t.start();
    }
    Assert.assertTrue(stopLatch.await(30, TimeUnit.SECONDS));
    Assert.assertEquals(Collections.EMPTY_LIST, errors);
    wal.close();
    wal = new WAL<Text>(dataDir, Text.class);
    WALReplayResult<Text> result = wal.replay();
    Assert.assertEquals(1000, result.getSequenceID());
    List<String> actual = toStringList(result.getResults());
    // we don't know what order the items threads will be able to
    // append to the wal, so sort to the lists to make then sensible
    Collections.sort(actual);
    Collections.sort(globalExpected);
    Assert.assertEquals(globalExpected, actual);
}

From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java

@Test(timeout = 50_000)
public void testShutdown()
        throws NoSuchFieldException, IllegalAccessException, InterruptedException, ExecutionException {
    final CountDownLatch latch = new CountDownLatch(1);
    final ListenableFuture future;
    final AtomicLong runs = new AtomicLong(0);
    long prior = 0;
    try {//from ww w.  j  a va 2s  .com

        final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(),
                new URIExtractionNamespace.ObjectMapperFlatDataParser(
                        URIExtractionNamespaceTest.registerTypes(new ObjectMapper())),
                new Period(1l), null);
        final String cacheId = UUID.randomUUID().toString();
        final Runnable runnable = manager.getPostRunnable(namespace, factory, cacheId);
        future = manager.schedule(namespace, factory, new Runnable() {
            @Override
            public void run() {
                runnable.run();
                latch.countDown();
                runs.incrementAndGet();
            }
        }, cacheId);

        latch.await();
        Assert.assertFalse(future.isCancelled());
        Assert.assertFalse(future.isDone());
        prior = runs.get();
        while (runs.get() <= prior) {
            Thread.sleep(50);
        }
        Assert.assertTrue(runs.get() > prior);
    } finally {
        lifecycle.stop();
    }
    manager.waitForServiceToEnd(1_000, TimeUnit.MILLISECONDS);

    prior = runs.get();
    Thread.sleep(50);
    Assert.assertEquals(prior, runs.get());

    Field execField = NamespaceExtractionCacheManager.class
            .getDeclaredField("listeningScheduledExecutorService");
    execField.setAccessible(true);
    Assert.assertTrue(((ListeningScheduledExecutorService) execField.get(manager)).isShutdown());
    Assert.assertTrue(((ListeningScheduledExecutorService) execField.get(manager)).isTerminated());
}

From source file:org.apache.activemq.leveldb.test.ReplicatedLevelDBBrokerTest.java

@Test
@Ignore//w w w  . j  a v  a  2s  . c  o m
public void testReplicationQuorumLoss() throws Throwable {

    System.out.println("======================================");
    System.out.println(" Start 2 ActiveMQ nodes.");
    System.out.println("======================================");
    startBrokerAsync(createBrokerNode("node-1", port));
    startBrokerAsync(createBrokerNode("node-2", port));
    BrokerService master = waitForNextMaster();
    System.out.println("======================================");
    System.out.println(" Start the producer and consumer");
    System.out.println("======================================");

    final AtomicBoolean stopClients = new AtomicBoolean(false);
    final ArrayBlockingQueue<String> errors = new ArrayBlockingQueue<String>(100);
    final AtomicLong receivedCounter = new AtomicLong();
    final AtomicLong sentCounter = new AtomicLong();
    Thread producer = startFailoverClient("producer", new Client() {
        @Override
        public void execute(Connection connection) throws Exception {
            Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
            MessageProducer producer = session.createProducer(session.createQueue("test"));
            long actual = 0;
            while (!stopClients.get()) {
                TextMessage msg = session.createTextMessage("Hello World");
                msg.setLongProperty("id", actual++);
                producer.send(msg);
                sentCounter.incrementAndGet();
            }
        }
    });

    Thread consumer = startFailoverClient("consumer", new Client() {
        @Override
        public void execute(Connection connection) throws Exception {
            connection.start();
            Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
            MessageConsumer consumer = session.createConsumer(session.createQueue("test"));
            long expected = 0;
            while (!stopClients.get()) {
                Message msg = consumer.receive(200);
                if (msg != null) {
                    long actual = msg.getLongProperty("id");
                    if (actual != expected) {
                        errors.offer("Received got unexpected msg id: " + actual + ", expected: " + expected);
                    }
                    msg.acknowledge();
                    expected = actual + 1;
                    receivedCounter.incrementAndGet();
                }
            }
        }
    });

    try {
        assertCounterMakesProgress(sentCounter, 10, TimeUnit.SECONDS);
        assertCounterMakesProgress(receivedCounter, 5, TimeUnit.SECONDS);
        assertNull(errors.poll());

        System.out.println("======================================");
        System.out.println(" Master should stop once the quorum is lost.");
        System.out.println("======================================");
        ArrayList<BrokerService> stopped = stopSlaves();// stopping the slaves should kill the quorum.
        assertStopsWithin(master, 10, TimeUnit.SECONDS);
        assertNull(errors.poll()); // clients should not see an error since they are failover clients.
        stopped.add(master);

        System.out.println("======================================");
        System.out.println(" Restart the slave. Clients should make progress again..");
        System.out.println("======================================");
        startBrokersAsync(createBrokerNodes(stopped));
        assertCounterMakesProgress(sentCounter, 10, TimeUnit.SECONDS);
        assertCounterMakesProgress(receivedCounter, 5, TimeUnit.SECONDS);
        assertNull(errors.poll());
    } catch (Throwable e) {
        e.printStackTrace();
        throw e;
    } finally {
        // Wait for the clients to stop..
        stopClients.set(true);
        producer.join();
        consumer.join();
    }
}

From source file:org.apache.hadoop.hbase.client.TestClientNoCluster.java

static ScanResponse doMetaScanResponse(final SortedMap<byte[], Pair<HRegionInfo, ServerName>> meta,
        final AtomicLong sequenceids, final ScanRequest request) {
    ScanResponse.Builder builder = ScanResponse.newBuilder();
    int max = request.getNumberOfRows();
    int count = 0;
    Map<byte[], Pair<HRegionInfo, ServerName>> tail = request.hasScan()
            ? meta.tailMap(request.getScan().getStartRow().toByteArray())
            : meta;/*from   ww  w  . ja v  a  2  s.c  om*/
    ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
    for (Map.Entry<byte[], Pair<HRegionInfo, ServerName>> e : tail.entrySet()) {
        // Can be 0 on open of a scanner -- i.e. rpc to setup scannerid only.
        if (max <= 0)
            break;
        if (++count > max)
            break;
        HRegionInfo hri = e.getValue().getFirst();
        ByteString row = HBaseZeroCopyByteString.wrap(hri.getRegionName());
        resultBuilder.clear();
        resultBuilder.addCell(getRegionInfo(row, hri));
        resultBuilder.addCell(getServer(row, e.getValue().getSecond()));
        resultBuilder.addCell(getStartCode(row));
        builder.addResults(resultBuilder.build());
        // Set more to false if we are on the last region in table.
        if (hri.getEndKey().length <= 0)
            builder.setMoreResults(false);
        else
            builder.setMoreResults(true);
    }
    // If no scannerid, set one.
    builder.setScannerId(request.hasScannerId() ? request.getScannerId() : sequenceids.incrementAndGet());
    return builder.build();
}

From source file:org.apache.nifi.processors.kite.ConvertCSVToAvro.java

@Override
public void onTrigger(ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile incomingCSV = session.get();
    if (incomingCSV == null) {
        return;//from   w  ww.  j a v  a 2 s  .  co m
    }

    CSVProperties props = new CSVProperties.Builder()
            .charset(context.getProperty(CHARSET).evaluateAttributeExpressions(incomingCSV).getValue())
            .delimiter(context.getProperty(DELIMITER).evaluateAttributeExpressions(incomingCSV).getValue())
            .quote(context.getProperty(QUOTE).evaluateAttributeExpressions(incomingCSV).getValue())
            .escape(context.getProperty(ESCAPE).evaluateAttributeExpressions(incomingCSV).getValue())
            .hasHeader(context.getProperty(HAS_HEADER).evaluateAttributeExpressions(incomingCSV).asBoolean())
            .linesToSkip(
                    context.getProperty(LINES_TO_SKIP).evaluateAttributeExpressions(incomingCSV).asInteger())
            .build();

    String schemaProperty = context.getProperty(SCHEMA).evaluateAttributeExpressions(incomingCSV).getValue();
    final Schema schema;
    try {
        schema = getSchema(schemaProperty, DefaultConfiguration.get());
    } catch (SchemaNotFoundException e) {
        getLogger().error("Cannot find schema: " + schemaProperty);
        session.transfer(incomingCSV, FAILURE);
        return;
    }

    try (final DataFileWriter<Record> writer = new DataFileWriter<>(
            AvroUtil.newDatumWriter(schema, Record.class))) {
        writer.setCodec(getCodecFactory(context.getProperty(COMPRESSION_TYPE).getValue()));

        try {
            final AtomicLong written = new AtomicLong(0L);
            final FailureTracker failures = new FailureTracker();

            FlowFile badRecords = session.clone(incomingCSV);
            FlowFile outgoingAvro = session.write(incomingCSV, new StreamCallback() {
                @Override
                public void process(InputStream in, OutputStream out) throws IOException {
                    try (CSVFileReader<Record> reader = new CSVFileReader<>(in, props, schema, Record.class)) {
                        reader.initialize();
                        try (DataFileWriter<Record> w = writer.create(schema, out)) {
                            while (reader.hasNext()) {
                                try {
                                    Record record = reader.next();
                                    w.append(record);
                                    written.incrementAndGet();
                                } catch (DatasetRecordException e) {
                                    failures.add(e);
                                }
                            }
                        }
                    }
                }
            });

            long errors = failures.count();

            session.adjustCounter("Converted records", written.get(),
                    false /* update only if file transfer is successful */);
            session.adjustCounter("Conversion errors", errors,
                    false /* update only if file transfer is successful */);

            if (written.get() > 0L) {
                session.transfer(outgoingAvro, SUCCESS);

                if (errors > 0L) {
                    getLogger().warn("Failed to convert {}/{} records from CSV to Avro",
                            new Object[] { errors, errors + written.get() });
                    badRecords = session.putAttribute(badRecords, "errors", failures.summary());
                    session.transfer(badRecords, INCOMPATIBLE);
                } else {
                    session.remove(badRecords);
                }

            } else {
                session.remove(outgoingAvro);

                if (errors > 0L) {
                    getLogger().warn("Failed to convert {}/{} records from CSV to Avro",
                            new Object[] { errors, errors });
                    badRecords = session.putAttribute(badRecords, "errors", failures.summary());
                } else {
                    badRecords = session.putAttribute(badRecords, "errors", "No incoming records");
                }

                session.transfer(badRecords, FAILURE);
            }

        } catch (ProcessException | DatasetIOException e) {
            getLogger().error("Failed reading or writing", e);
            session.transfer(incomingCSV, FAILURE);
        } catch (DatasetException e) {
            getLogger().error("Failed to read FlowFile", e);
            session.transfer(incomingCSV, FAILURE);
        }
    } catch (final IOException ioe) {
        throw new RuntimeException("Unable to close Avro Writer", ioe);
    }
}

From source file:hello.MetricsActivator.java

private MessageSourceMetrics enhanceSourceMonitor(MessageSourceMetrics monitor) {

    MessageSourceMetrics result = monitor;

    if (monitor.getManagedName() != null) {
        return monitor;
    }//w  w  w.  j  a  v a 2 s .  com

    // Assignment algorithm and bean id, with bean id pulled reflectively out of enclosing endpoint if possible
    String[] names = this.applicationContext.getBeanNamesForType(AbstractEndpoint.class);

    String name = null;
    String endpointName = null;
    String source = "endpoint";
    Object endpoint = null;

    for (String beanName : names) {
        endpoint = this.applicationContext.getBean(beanName);
        Object field = null;
        try {
            field = extractTarget(getField(endpoint, "source"));
        } catch (Exception e) {
            logger.trace("Could not get source from bean = " + beanName);
        }
        if (field == monitor) {
            name = beanName;
            endpointName = beanName;
            break;
        }
    }
    if (name != null && endpoint != null && name.startsWith("_org.springframework.integration")) {
        name = getInternalComponentName(name);
        source = "internal";
    }
    if (name != null && endpoint != null && name.startsWith("org.springframework.integration")) {
        Object target = endpoint;
        if (endpoint instanceof Advised) {
            TargetSource targetSource = ((Advised) endpoint).getTargetSource();
            if (targetSource != null) {
                try {
                    target = targetSource.getTarget();
                } catch (Exception e) {
                    logger.debug("Could not get handler from bean = " + name);
                }
            }
        }
        Object field = getField(target, "outputChannel");
        if (field != null) {
            if (!anonymousSourceCounters.containsKey(field)) {
                anonymousSourceCounters.put(field, new AtomicLong());
            }
            AtomicLong count = anonymousSourceCounters.get(field);
            long total = count.incrementAndGet();
            String suffix = "";
            /*
             * Short hack to makes sure object names are unique if more than one endpoint has the same input channel
             */
            if (total > 1) {
                suffix = "#" + total;
            }
            name = field + suffix;
            source = "anonymous";
        }
    }

    if (endpoint instanceof Lifecycle) {
        // Wrap the monitor in a lifecycle so it exposes the start/stop operations
        if (endpoint instanceof TrackableComponent) {
            result = new LifecycleTrackableMessageSourceMetrics((Lifecycle) endpoint, monitor);
        } else {
            result = new LifecycleMessageSourceMetrics((Lifecycle) endpoint, monitor);
        }
    }

    if (name == null) {
        name = monitor.toString();
        source = "handler";
    }

    if (endpointName != null) {
        beansByEndpointName.put(name, endpointName);
    }

    monitor.setManagedType(source);
    monitor.setManagedName(name);

    return result;
}

From source file:hello.MetricsActivator.java

private MessageHandlerMetrics enhanceHandlerMonitor(MessageHandlerMetrics monitor) {

    MessageHandlerMetrics result = monitor;

    if (monitor.getManagedName() != null && monitor.getManagedType() != null) {
        return monitor;
    }/*from ww  w. j av  a2 s.co  m*/

    // Assignment algorithm and bean id, with bean id pulled reflectively out of enclosing endpoint if possible
    String[] names = this.applicationContext.getBeanNamesForType(AbstractEndpoint.class);

    String name = null;
    String endpointName = null;
    String source = "endpoint";
    Object endpoint = null;

    for (String beanName : names) {
        endpoint = this.applicationContext.getBean(beanName);
        try {
            Object field = extractTarget(getField(endpoint, "handler"));
            if (field == monitor || this.extractTarget(this.handlerInAnonymousWrapper(field)) == monitor) {
                name = beanName;
                endpointName = beanName;
                break;
            }
        } catch (Exception e) {
            logger.trace("Could not get handler from bean = " + beanName);
        }
    }
    if (name != null && endpoint != null && name.startsWith("_org.springframework.integration")) {
        name = getInternalComponentName(name);
        source = "internal";
    }
    if (name != null && endpoint != null && name.startsWith("org.springframework.integration")) {
        Object target = endpoint;
        if (endpoint instanceof Advised) {
            TargetSource targetSource = ((Advised) endpoint).getTargetSource();
            if (targetSource != null) {
                try {
                    target = targetSource.getTarget();
                } catch (Exception e) {
                    logger.debug("Could not get handler from bean = " + name);
                }
            }
        }
        Object field = getField(target, "inputChannel");
        if (field != null) {
            if (!anonymousHandlerCounters.containsKey(field)) {
                anonymousHandlerCounters.put(field, new AtomicLong());
            }
            AtomicLong count = anonymousHandlerCounters.get(field);
            long total = count.incrementAndGet();
            String suffix = "";
            /*
             * Short hack to makes sure object names are unique if more than one endpoint has the same input channel
             */
            if (total > 1) {
                suffix = "#" + total;
            }
            name = field + suffix;
            source = "anonymous";
        }
    }

    if (endpoint instanceof Lifecycle) {
        // Wrap the monitor in a lifecycle so it exposes the start/stop operations
        if (monitor instanceof MappingMessageRouterManagement) {
            if (monitor instanceof TrackableComponent) {
                result = new TrackableRouterMetrics((Lifecycle) endpoint,
                        (MappingMessageRouterManagement) monitor);
            } else {
                result = new RouterMetrics((Lifecycle) endpoint, (MappingMessageRouterManagement) monitor);
            }
        } else {
            if (monitor instanceof TrackableComponent) {
                result = new LifecycleTrackableMessageHandlerMetrics((Lifecycle) endpoint, monitor);
            } else {
                result = new LifecycleMessageHandlerMetrics((Lifecycle) endpoint, monitor);
            }
        }
    }

    if (name == null) {
        if (monitor instanceof NamedComponent) {
            name = ((NamedComponent) monitor).getComponentName();
        }
        if (name == null) {
            name = monitor.toString();
        }
        source = "handler";
    }

    if (endpointName != null) {
        beansByEndpointName.put(name, endpointName);
    }

    monitor.setManagedType(source);
    monitor.setManagedName(name);

    return result;

}

From source file:org.springframework.integration.monitor.IntegrationMBeanExporter.java

private MessageSourceMetrics enhanceSourceMonitor(SimpleMessageSourceMetrics monitor) {

    MessageSourceMetrics result = monitor;

    if (monitor.getName() != null && monitor.getSource() != null) {
        return monitor;
    }//from   ww  w  . j av a2  s  . c om

    // Assignment algorithm and bean id, with bean id pulled reflectively out of enclosing endpoint if possible
    String[] names = beanFactory.getBeanNamesForType(AbstractEndpoint.class);

    String name = null;
    String endpointName = null;
    String source = "endpoint";
    Object endpoint = null;

    for (String beanName : names) {
        endpoint = beanFactory.getBean(beanName);
        Object field = null;
        try {
            field = extractTarget(getField(endpoint, "source"));
        } catch (Exception e) {
            logger.trace("Could not get source from bean = " + beanName);
        }
        if (field == monitor.getMessageSource()) {
            name = beanName;
            endpointName = beanName;
            break;
        }
    }
    if (name != null && endpoint != null && name.startsWith("_org.springframework.integration")) {
        name = getInternalComponentName(name);
        source = "internal";
    }
    if (name != null && endpoint != null && name.startsWith("org.springframework.integration")) {
        Object target = endpoint;
        if (endpoint instanceof Advised) {
            TargetSource targetSource = ((Advised) endpoint).getTargetSource();
            if (targetSource != null) {
                try {
                    target = targetSource.getTarget();
                } catch (Exception e) {
                    logger.debug("Could not get handler from bean = " + name);
                }
            }
        }
        Object field = getField(target, "outputChannel");
        if (field != null) {
            if (!anonymousSourceCounters.containsKey(field)) {
                anonymousSourceCounters.put(field, new AtomicLong());
            }
            AtomicLong count = anonymousSourceCounters.get(field);
            long total = count.incrementAndGet();
            String suffix = "";
            /*
             * Short hack to makes sure object names are unique if more than one endpoint has the same input channel
             */
            if (total > 1) {
                suffix = "#" + total;
            }
            name = field + suffix;
            source = "anonymous";
        }
    }

    if (endpoint instanceof Lifecycle) {
        // Wrap the monitor in a lifecycle so it exposes the start/stop operations
        result = new LifecycleMessageSourceMetrics((Lifecycle) endpoint, monitor);
    }

    if (name == null) {
        name = monitor.getMessageSource().toString();
        source = "handler";
    }

    if (endpointName != null) {
        beansByEndpointName.put(name, endpointName);
    }

    monitor.setSource(source);
    monitor.setName(name);

    return result;
}

From source file:org.springframework.integration.monitor.IntegrationMBeanExporter.java

private MessageHandlerMetrics enhanceHandlerMonitor(SimpleMessageHandlerMetrics monitor) {

    MessageHandlerMetrics result = monitor;

    if (monitor.getName() != null && monitor.getSource() != null) {
        return monitor;
    }//from  w w  w. j ava 2s.c  o  m

    // Assignment algorithm and bean id, with bean id pulled reflectively out of enclosing endpoint if possible
    String[] names = beanFactory.getBeanNamesForType(AbstractEndpoint.class);

    String name = null;
    String endpointName = null;
    String source = "endpoint";
    Object endpoint = null;

    for (String beanName : names) {
        endpoint = beanFactory.getBean(beanName);
        Object field = null;
        try {
            field = extractTarget(getField(endpoint, "handler"));
        } catch (Exception e) {
            logger.trace("Could not get handler from bean = " + beanName);
        }
        if (field == monitor.getMessageHandler()) {
            name = beanName;
            endpointName = beanName;
            break;
        }
    }
    if (name != null && endpoint != null && name.startsWith("_org.springframework.integration")) {
        name = getInternalComponentName(name);
        source = "internal";
    }
    if (name != null && endpoint != null && name.startsWith("org.springframework.integration")) {
        Object target = endpoint;
        if (endpoint instanceof Advised) {
            TargetSource targetSource = ((Advised) endpoint).getTargetSource();
            if (targetSource != null) {
                try {
                    target = targetSource.getTarget();
                } catch (Exception e) {
                    logger.debug("Could not get handler from bean = " + name);
                }
            }
        }
        Object field = getField(target, "inputChannel");
        if (field != null) {
            if (!anonymousHandlerCounters.containsKey(field)) {
                anonymousHandlerCounters.put(field, new AtomicLong());
            }
            AtomicLong count = anonymousHandlerCounters.get(field);
            long total = count.incrementAndGet();
            String suffix = "";
            /*
             * Short hack to makes sure object names are unique if more than one endpoint has the same input channel
             */
            if (total > 1) {
                suffix = "#" + total;
            }
            name = field + suffix;
            source = "anonymous";
        }
    }

    if (endpoint instanceof Lifecycle) {
        // Wrap the monitor in a lifecycle so it exposes the start/stop operations
        result = new LifecycleMessageHandlerMetrics((Lifecycle) endpoint, monitor);
    }

    if (name == null) {
        name = monitor.getMessageHandler().toString();
        source = "handler";
    }

    if (endpointName != null) {
        beansByEndpointName.put(name, endpointName);
    }

    monitor.setSource(source);
    monitor.setName(name);

    return result;

}