Example usage for java.util.concurrent LinkedTransferQueue LinkedTransferQueue

List of usage examples for java.util.concurrent LinkedTransferQueue LinkedTransferQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedTransferQueue LinkedTransferQueue.

Prototype

public LinkedTransferQueue() 

Source Link

Document

Creates an initially empty LinkedTransferQueue .

Usage

From source file:TQProducer.java

public static void main(String[] args) {
    final TransferQueue<Integer> tQueue = new LinkedTransferQueue<>();
    final AtomicInteger sequence = new AtomicInteger();

    for (int i = 0; i < 5; i++) {
        try {/*  ww  w  . j  av a 2  s.  c o m*/
            tQueue.put(sequence.incrementAndGet());
            System.out.println("Initial queue: " + tQueue);

            new TQProducer("Producer-1", tQueue, sequence).start();
            new TQConsumer("Consumer-1", tQueue).start();

        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

}

From source file:com.streamsets.pipeline.stage.origin.kinesis.TestKinesisSource.java

@SuppressWarnings("unchecked")
@Test//from   w  w  w  .ja  v  a2 s  .c o  m
public void testDefaultConsume() throws Exception {
    KinesisConsumerConfigBean config = getKinesisConsumerConfig();

    KinesisSource source = PowerMockito.spy(new KinesisSource(config));
    SourceRunner sourceRunner = new SourceRunner.Builder(KinesisDSource.class, source).addOutputLane("lane")
            .build();

    KinesisTestUtil.mockKinesisUtil(1);

    PowerMockito.doReturn(null).when(source, "createKinesisWorker", any(IRecordProcessorFactory.class));

    sourceRunner.runInit();

    // Set this flag to avoid actually launching a KCL worker
    Whitebox.setInternalState(source, "isStarted", true);

    // Generate test records
    List<Record> testRecords = KinesisTestUtil.getConsumerTestRecords(3);

    // Drop them into the work queue
    LinkedTransferQueue<RecordsAndCheckpointer> queue = new LinkedTransferQueue<>();

    IRecordProcessorCheckpointer checkpointer = mock(IRecordProcessorCheckpointer.class);

    List<Record> batch1 = ImmutableList.of(testRecords.get(0));
    List<Record> batch2 = ImmutableList.of(testRecords.get(1), testRecords.get(2));
    queue.add(new RecordsAndCheckpointer(batch1, checkpointer));
    queue.add(new RecordsAndCheckpointer(batch2, checkpointer));

    Whitebox.setInternalState(source, "batchQueue", queue);

    StageRunner.Output output = sourceRunner.runProduce("", 1);
    assertEquals("sequenceNumber=0::subSequenceNumber=0", output.getNewOffset());
    List<com.streamsets.pipeline.api.Record> records = output.getRecords().get("lane");
    assertEquals(1, records.size());

    output = sourceRunner.runProduce("", 10);
    assertEquals("sequenceNumber=2::subSequenceNumber=0", output.getNewOffset());
    records = output.getRecords().get("lane");
    assertEquals(2, records.size());
}

From source file:com.hurence.logisland.connect.opc.CommonOpcSourceTask.java

@Override
public void start(Map<String, String> props) {
    setConfigurationProperties(props);//from  ww  w  .j  a v  a2 s.c  o  m

    transferQueue = new LinkedTransferQueue<>();
    opcOperations = new SmartOpcOperations<>(createOpcOperations());
    ConnectionProfile connectionProfile = createConnectionProfile();
    host = connectionProfile.getConnectionUri().getHost();
    tagInfoMap = CommonUtils.parseTagsFromProperties(props).stream()
            .collect(Collectors.toMap(TagInfo::getTagId, Function.identity()));
    minWaitTime = Math.min(10, tagInfoMap.values().stream().map(TagInfo::getSamplingInterval)
            .mapToLong(Duration::toMillis).min().getAsLong());
    opcOperations.connect(connectionProfile);
    if (!opcOperations.awaitConnected()) {
        throw new ConnectException("Unable to connect");
    }

    //set up polling source emission
    pollingScheduler = Executors.newSingleThreadScheduledExecutor();
    streamingThread = Executors.newSingleThreadExecutor();
    Map<Duration, List<TagInfo>> pollingMap = tagInfoMap.values().stream()
            .filter(tagInfo -> StreamingMode.POLL.equals(tagInfo.getStreamingMode()))
            .collect(Collectors.groupingBy(TagInfo::getSamplingInterval));
    final Map<String, OpcData> lastValues = Collections.synchronizedMap(new HashMap<>());
    pollingMap.forEach((k, v) -> pollingScheduler.scheduleAtFixedRate(() -> {
        final Instant now = Instant.now();
        v.stream().map(TagInfo::getTagId).map(lastValues::get).filter(Functions.not(Objects::isNull))
                .map(data -> Pair.of(now, data)).forEach(transferQueue::add);

    }, 0, k.toNanos(), TimeUnit.NANOSECONDS));
    //then subscribe for all
    final SubscriptionConfiguration subscriptionConfiguration = new SubscriptionConfiguration()
            .withDefaultSamplingInterval(Duration.ofMillis(10_000));
    tagInfoMap.values().forEach(tagInfo -> subscriptionConfiguration
            .withTagSamplingIntervalForTag(tagInfo.getTagId(), tagInfo.getSamplingInterval()));
    running.set(true);
    streamingThread.submit(() -> {
        while (running.get()) {
            try {
                createSessionIfNeeded();
                if (session == null) {
                    return;
                }

                session.stream(subscriptionConfiguration,
                        tagInfoMap.keySet().toArray(new String[tagInfoMap.size()])).forEach(opcData -> {
                            if (tagInfoMap.get(opcData.getTag()).getStreamingMode()
                                    .equals(StreamingMode.SUBSCRIBE)) {
                                transferQueue.add(Pair.of(
                                        hasServerSideSampling() ? opcData.getTimestamp() : Instant.now(),
                                        opcData));
                            } else {
                                lastValues.put(opcData.getTag(), opcData);
                            }
                        });
            } catch (Exception e) {
                if (running.get()) {
                    logger.warn("Stream interrupted while reading from " + host, e);
                    safeCloseSession();
                    lastValues.clear();

                }
            }
        }
    });

}

From source file:com.streamsets.pipeline.stage.origin.kinesis.KinesisSource.java

@Override
protected List<ConfigIssue> init() {
    List<ConfigIssue> issues = super.init();

    checkStreamExists(issues);/*w  w w. ja  v a2  s  . com*/

    if (issues.isEmpty()) {
        batchQueue = new LinkedTransferQueue<>();

        DataParserFactoryBuilder builder = new DataParserFactoryBuilder(getContext(),
                dataFormat.getParserFormat()).setMaxDataLen(50 * 1024); // Max Message for Kinesis is 50KiB

        switch (dataFormat) {
        case SDC_JSON:
            break;
        case JSON:
            builder.setMode(JsonMode.MULTIPLE_OBJECTS);
            break;
        }

        parserFactory = builder.build();

        executorService = Executors.newFixedThreadPool(1);

        IRecordProcessorFactory recordProcessorFactory = new StreamSetsRecordProcessorFactory(batchQueue);

        // Create the KCL worker with the StreamSets record processor factory
        KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(applicationName, streamName,
                new DefaultAWSCredentialsProviderChain(), UUID.randomUUID().toString());
        kclConfig.withRegionName(region.getName()).withMaxRecords(maxBatchSize)
                .withIdleTimeBetweenReadsInMillis(idleTimeBetweenReads)
                .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON); // Configurable?

        worker = new Worker.Builder().recordProcessorFactory(recordProcessorFactory).config(kclConfig).build();

        executorService.execute(worker);
        LOG.info("Launched KCL Worker");
    }
    return issues;
}

From source file:org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore.java

@Override
public void start(int numSlots) throws IOException {
    if (!setRunning(true)) {
        return;/*from  ww  w.  ja v a2 s. c o m*/
    }

    // Init buffer slots
    slots = new ByteSlot[numSlots];
    slotsCache = new LinkedTransferQueue();
    while (slotsCache.size() < numSlots) {
        slotsCache.offer(new ByteSlot());
    }

    // Tunings
    maxRetriesBeforeRoll = conf.getInt(MAX_RETRIES_BEFORE_ROLL_CONF_KEY, DEFAULT_MAX_RETRIES_BEFORE_ROLL);
    maxSyncFailureRoll = conf.getInt(MAX_SYNC_FAILURE_ROLL_CONF_KEY, DEFAULT_MAX_SYNC_FAILURE_ROLL);
    waitBeforeRoll = conf.getInt(WAIT_BEFORE_ROLL_CONF_KEY, DEFAULT_WAIT_BEFORE_ROLL);
    rollRetries = conf.getInt(ROLL_RETRIES_CONF_KEY, DEFAULT_ROLL_RETRIES);
    rollThreshold = conf.getLong(ROLL_THRESHOLD_CONF_KEY, DEFAULT_ROLL_THRESHOLD);
    periodicRollMsec = conf.getInt(PERIODIC_ROLL_CONF_KEY, DEFAULT_PERIODIC_ROLL);
    syncWaitMsec = conf.getInt(SYNC_WAIT_MSEC_CONF_KEY, DEFAULT_SYNC_WAIT_MSEC);
    useHsync = conf.getBoolean(USE_HSYNC_CONF_KEY, DEFAULT_USE_HSYNC);

    // Init sync thread
    syncThread = new Thread("WALProcedureStoreSyncThread") {
        @Override
        public void run() {
            try {
                syncLoop();
            } catch (Throwable e) {
                LOG.error("Got an exception from the sync-loop", e);
                if (!isSyncAborted()) {
                    sendAbortProcessSignal();
                }
            }
        }
    };
    syncThread.start();
}