Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:org.codice.ddf.commands.catalog.ReplicationCommand.java

@Override
protected Object executeWithSubject() throws Exception {
    final CatalogFacade catalog = getCatalog();

    final CatalogFacade framework = new Framework(getService(CatalogFramework.class));
    Set<String> sourceIds = framework.getSourceIds();

    while (true) {
        if (StringUtils.isBlank(sourceId) || !sourceIds.contains(sourceId)) {
            console.println("Please enter the Source ID you would like to replicate:");
            for (String id : sourceIds) {
                console.println("\t" + id);
            }/*  w w  w  .  j  a v a 2  s. c o m*/
        } else {
            break;
        }
        sourceId = getInput("ID:  ");
    }

    if (batchSize > MAX_BATCH_SIZE || batchSize < 1) {
        console.println("Batch Size must be between 1 and 1000.");
        return null;
    }

    start = System.currentTimeMillis();

    final Filter filter = (cqlFilter != null) ? CQL.toFilter(cqlFilter)
            : getFilter(getFilterStartTime(start), start, Metacard.EFFECTIVE);

    QueryImpl query = new QueryImpl(filter);
    query.setRequestsTotalResultsCount(true);
    query.setPageSize(batchSize);
    query.setSortBy(new SortByImpl(Metacard.EFFECTIVE, SortOrder.DESCENDING));
    QueryRequest queryRequest = new QueryRequestImpl(query, Arrays.asList(sourceId));
    SourceResponse response;
    try {
        response = framework.query(queryRequest);
    } catch (Exception e) {
        printErrorMessage("Error occurred while querying the Federated Source.\n" + e.getMessage());
        return null;
    }

    final long totalHits = response.getHits();
    final long totalPossible;
    if (totalHits == 0) {
        console.println("No records were found to replicate.");
        return null;
    }

    // If the maxMetacards is set, restrict the totalPossible to the number of maxMetacards
    if (maxMetacards > 0 && maxMetacards <= totalHits) {
        totalPossible = maxMetacards;
    } else {
        totalPossible = totalHits;
    }

    console.println("Starting replication for " + totalPossible + " Records");

    if (multithreaded > 1 && totalPossible > batchSize) {
        BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
        RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
        final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
                TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);
        console.printf("Running %d threads during replication.%n", multithreaded);

        do {
            LOGGER.debug("In loop at iteration {}", queryIndex.get());
            final int startIndex = queryIndex.get();
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    int count = queryAndIngest(framework, catalog, startIndex, filter);
                    printProgressAndFlush(start, totalPossible, ingestCount.addAndGet(count));
                }
            });
        } while (queryIndex.addAndGet(batchSize) <= totalPossible);
        executorService.shutdown();

        while (!executorService.isTerminated()) {
            try {
                TimeUnit.SECONDS.sleep(1);
            } catch (InterruptedException e) {
                // ignore
            }
        }
    } else {
        do {
            int count = queryAndIngest(framework, catalog, queryIndex.get(), filter);
            printProgressAndFlush(start, totalPossible, ingestCount.addAndGet(count));
        } while (queryIndex.addAndGet(batchSize) <= totalPossible);
    }

    console.println();
    long end = System.currentTimeMillis();
    String completed = String.format(
            " %d record(s) replicated; %d record(s) failed; completed in %3.3f seconds.", ingestCount.get(),
            failedCount.get(), (end - start) / MS_PER_SECOND);
    LOGGER.info("Replication Complete: {}", completed);
    console.println(completed);

    if (StringUtils.isNotBlank(failedDir)) {
        writeFailedMetacards(failedMetacards);
    }

    return null;
}

From source file:org.apache.activemq.leveldb.test.ReplicatedLevelDBBrokerTest.java

@Test
@Ignore//  w w w.  j a v a 2  s . co  m
public void testReplicationQuorumLoss() throws Throwable {

    System.out.println("======================================");
    System.out.println(" Start 2 ActiveMQ nodes.");
    System.out.println("======================================");
    startBrokerAsync(createBrokerNode("node-1", port));
    startBrokerAsync(createBrokerNode("node-2", port));
    BrokerService master = waitForNextMaster();
    System.out.println("======================================");
    System.out.println(" Start the producer and consumer");
    System.out.println("======================================");

    final AtomicBoolean stopClients = new AtomicBoolean(false);
    final ArrayBlockingQueue<String> errors = new ArrayBlockingQueue<String>(100);
    final AtomicLong receivedCounter = new AtomicLong();
    final AtomicLong sentCounter = new AtomicLong();
    Thread producer = startFailoverClient("producer", new Client() {
        @Override
        public void execute(Connection connection) throws Exception {
            Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
            MessageProducer producer = session.createProducer(session.createQueue("test"));
            long actual = 0;
            while (!stopClients.get()) {
                TextMessage msg = session.createTextMessage("Hello World");
                msg.setLongProperty("id", actual++);
                producer.send(msg);
                sentCounter.incrementAndGet();
            }
        }
    });

    Thread consumer = startFailoverClient("consumer", new Client() {
        @Override
        public void execute(Connection connection) throws Exception {
            connection.start();
            Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
            MessageConsumer consumer = session.createConsumer(session.createQueue("test"));
            long expected = 0;
            while (!stopClients.get()) {
                Message msg = consumer.receive(200);
                if (msg != null) {
                    long actual = msg.getLongProperty("id");
                    if (actual != expected) {
                        errors.offer("Received got unexpected msg id: " + actual + ", expected: " + expected);
                    }
                    msg.acknowledge();
                    expected = actual + 1;
                    receivedCounter.incrementAndGet();
                }
            }
        }
    });

    try {
        assertCounterMakesProgress(sentCounter, 10, TimeUnit.SECONDS);
        assertCounterMakesProgress(receivedCounter, 5, TimeUnit.SECONDS);
        assertNull(errors.poll());

        System.out.println("======================================");
        System.out.println(" Master should stop once the quorum is lost.");
        System.out.println("======================================");
        ArrayList<BrokerService> stopped = stopSlaves();// stopping the slaves should kill the quorum.
        assertStopsWithin(master, 10, TimeUnit.SECONDS);
        assertNull(errors.poll()); // clients should not see an error since they are failover clients.
        stopped.add(master);

        System.out.println("======================================");
        System.out.println(" Restart the slave. Clients should make progress again..");
        System.out.println("======================================");
        startBrokersAsync(createBrokerNodes(stopped));
        assertCounterMakesProgress(sentCounter, 10, TimeUnit.SECONDS);
        assertCounterMakesProgress(receivedCounter, 5, TimeUnit.SECONDS);
        assertNull(errors.poll());
    } catch (Throwable e) {
        e.printStackTrace();
        throw e;
    } finally {
        // Wait for the clients to stop..
        stopClients.set(true);
        producer.join();
        consumer.join();
    }
}

From source file:org.apache.htrace.impl.HBaseSpanReceiver.java

public HBaseSpanReceiver(HTraceConfiguration conf) {
    this.queue = new ArrayBlockingQueue<Span>(1000);
    this.hconf = HBaseConfiguration.create();
    this.table = Bytes.toBytes(conf.get(TABLE_KEY, DEFAULT_TABLE));
    this.cf = Bytes.toBytes(conf.get(COLUMNFAMILY_KEY, DEFAULT_COLUMNFAMILY));
    this.icf = Bytes.toBytes(conf.get(INDEXFAMILY_KEY, DEFAULT_INDEXFAMILY));
    this.maxSpanBatchSize = conf.getInt(MAX_SPAN_BATCH_SIZE_KEY, DEFAULT_MAX_SPAN_BATCH_SIZE);
    String quorum = conf.get(COLLECTOR_QUORUM_KEY, DEFAULT_COLLECTOR_QUORUM);
    hconf.set(HConstants.ZOOKEEPER_QUORUM, quorum);
    String znodeParent = conf.get(ZOOKEEPER_ZNODE_PARENT_KEY, DEFAULT_ZOOKEEPER_ZNODE_PARENT);
    hconf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, znodeParent);
    int clientPort = conf.getInt(ZOOKEEPER_CLIENT_PORT_KEY, DEFAULT_ZOOKEEPER_CLIENT_PORT);
    hconf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, clientPort);

    // If there are already threads runnnig tear them down.
    if (this.service != null) {
        this.service.shutdownNow();
        this.service = null;
    }//from  w w  w . j  a va2 s.  co  m
    int numThreads = conf.getInt(NUM_THREADS_KEY, DEFAULT_NUM_THREADS);
    this.service = Executors.newFixedThreadPool(numThreads, tf);
    for (int i = 0; i < numThreads; i++) {
        this.service.submit(new WriteSpanRunnable());
    }
}

From source file:com.streamsets.pipeline.stage.origin.websocket.WebSocketClientSource.java

@Override
public List<ConfigIssue> init(Info info, Context context) {
    List<ConfigIssue> issues = new ArrayList<>();
    this.context = context;

    String resourceUrl = conf.resourceUrl.toLowerCase();

    if (!resourceUrl.startsWith("ws://") && !resourceUrl.startsWith("wss://")) {
        LOG.error("Invalid URL: " + conf.resourceUrl);
        issues.add(context.createConfigIssue(Groups.WEB_SOCKET.name(), RESOURCE_URL_CONFIG,
                Errors.WEB_SOCKET_02, conf.resourceUrl));
    }//from   w  w  w  .jav  a2 s . co m

    try {
        new URI(conf.resourceUrl);
    } catch (URISyntaxException e) {
        LOG.error("Invalid URL: " + conf.resourceUrl, e);
        issues.add(context.createConfigIssue(Groups.WEB_SOCKET.name(), RESOURCE_URL_CONFIG,
                Errors.WEB_SOCKET_02, e.toString()));
    }

    webSocketClient = WebSocketCommon.createWebSocketClient(conf.resourceUrl, conf.tlsConfig);
    webSocketClient.setMaxIdleTimeout(0);

    conf.dataFormatConfig.stringBuilderPoolSize = getNumberOfThreads();
    conf.dataFormatConfig.init(context, conf.dataFormat, Groups.DATA_FORMAT.name(), "dataFormatConfig",
            DataFormatConstants.MAX_OVERRUN_LIMIT, issues);
    dataParserFactory = conf.dataFormatConfig.getParserFactory();

    responseConfig.dataGeneratorFormatConfig.init(context, responseConfig.dataFormat,
            WebSocketOriginGroups.WEB_SOCKET_RESPONSE.name(), "responseConfig.dataGeneratorFormatConfig",
            issues);
    dataGeneratorFactory = responseConfig.dataGeneratorFormatConfig.getDataGeneratorFactory();

    errorQueue = new ArrayBlockingQueue<>(100);
    errorList = new ArrayList<>(100);
    return issues;
}

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testSimpleMultiple() throws Exception {
    AsyncTableBase table = getTable.get();
    int count = 100;
    CountDownLatch putLatch = new CountDownLatch(count);
    IntStream.range(0, count)/*from w w  w  . java  2  s .com*/
            .forEach(i -> table.put(new Put(concat(row, i)).addColumn(FAMILY, QUALIFIER, concat(VALUE, i)))
                    .thenAccept(x -> putLatch.countDown()));
    putLatch.await();
    BlockingQueue<Boolean> existsResp = new ArrayBlockingQueue<>(count);
    IntStream.range(0, count).forEach(i -> table.exists(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER))
            .thenAccept(x -> existsResp.add(x)));
    for (int i = 0; i < count; i++) {
        assertTrue(existsResp.take());
    }
    BlockingQueue<Pair<Integer, Result>> getResp = new ArrayBlockingQueue<>(count);
    IntStream.range(0, count).forEach(i -> table.get(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER))
            .thenAccept(x -> getResp.add(Pair.newPair(i, x))));
    for (int i = 0; i < count; i++) {
        Pair<Integer, Result> pair = getResp.take();
        assertArrayEquals(concat(VALUE, pair.getFirst()), pair.getSecond().getValue(FAMILY, QUALIFIER));
    }
    CountDownLatch deleteLatch = new CountDownLatch(count);
    IntStream.range(0, count)
            .forEach(i -> table.delete(new Delete(concat(row, i))).thenAccept(x -> deleteLatch.countDown()));
    deleteLatch.await();
    IntStream.range(0, count).forEach(i -> table.exists(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER))
            .thenAccept(x -> existsResp.add(x)));
    for (int i = 0; i < count; i++) {
        assertFalse(existsResp.take());
    }
    IntStream.range(0, count).forEach(i -> table.get(new Get(concat(row, i)).addColumn(FAMILY, QUALIFIER))
            .thenAccept(x -> getResp.add(Pair.newPair(i, x))));
    for (int i = 0; i < count; i++) {
        Pair<Integer, Result> pair = getResp.take();
        assertTrue(pair.getSecond().isEmpty());
    }
}

From source file:org.flowable.job.service.impl.asyncexecutor.DefaultAsyncJobExecutor.java

protected void initAsyncJobExecutionThreadPool() {
    if (threadPoolQueue == null) {
        LOGGER.info("Creating thread pool queue of size {}", queueSize);
        threadPoolQueue = new ArrayBlockingQueue<>(queueSize);
    }/*from w  w  w .  j a  v  a  2s .  c  o m*/

    if (executorService == null) {
        LOGGER.info("Creating executor service with corePoolSize {}, maxPoolSize {} and keepAliveTime {}",
                corePoolSize, maxPoolSize, keepAliveTime);

        BasicThreadFactory threadFactory = new BasicThreadFactory.Builder()
                .namingPattern("flowable-async-job-executor-thread-%d").build();
        executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime,
                TimeUnit.MILLISECONDS, threadPoolQueue, threadFactory);
    }
}

From source file:skewtune.mapreduce.lib.input.MapOutputInputStream.java

MapOutputInputStream(Configuration conf, TaskID reduceId, Counter inputCounter, SecretKey jobTokenSecret,
        List<MapOutputSplit> splits) throws IOException {
    if (conf.getBoolean(JobContext.MAP_OUTPUT_COMPRESS, false)) {
        Class<? extends CompressionCodec> codecClass = getMapOutputCompressorClass(conf, DefaultCodec.class);
        codec = ReflectionUtils.newInstance(codecClass, conf);
        decompressor = CodecPool.getDecompressor(codec);
    } else {// w w  w.j  av  a2  s  .c o  m
        codec = null;
        decompressor = null;
    }

    this.inputCounter = inputCounter;
    this.jobTokenSecret = jobTokenSecret;
    this.reduceTaskId = reduceId;

    int totalBufSz = conf.getInt("skewtune.map.io.inputbuf", 4 * 1024 * 1024); // 4 MB
    PACKET_SIZE = conf.getInt("skewtune.map.io.packetsize", 128 * 1024); // 128KB

    final int numBuf = totalBufSz / PACKET_SIZE;
    buffers = new ByteBuffer[numBuf];
    for (int i = 0; i < numBuf; ++i) {
        buffers[i] = ByteBuffer.allocate(PACKET_SIZE);
    }
    this.splits = splits;

    this.q = new ArrayBlockingQueue<ByteBuffer>(numBuf - 2); // producer and consumer may keep one buffer at their hands
    this.fetcher = new Fetcher(conf, reduceId);
    this.fetcher.start();

    progress = new Progress();
    progress.addPhases(splits.size());
}

From source file:org.apache.cayenne.datasource.UnmanagedPoolingDataSource.java

public UnmanagedPoolingDataSource(DataSource nonPoolingDataSource, PoolingDataSourceParameters parameters) {

    int minConnections = parameters.getMinConnections();
    int maxConnections = parameters.getMaxConnections();

    // sanity check
    if (minConnections < 0) {
        throw new IllegalArgumentException("Negative min connections: " + minConnections);
    }// w  w  w  . j  a v a 2s  . co  m

    if (maxConnections < 0) {
        throw new IllegalArgumentException("Negative max connections: " + maxConnections);
    }

    if (minConnections > maxConnections) {
        throw new IllegalArgumentException("Min connections (" + minConnections
                + ") is greater than max connections (" + maxConnections + ")");
    }

    this.nonPoolingDataSource = nonPoolingDataSource;
    this.maxQueueWaitTime = parameters.getMaxQueueWaitTime();
    this.validationQuery = parameters.getValidationQuery();
    this.minConnections = minConnections;
    this.maxConnections = maxConnections;
    this.pool = new ConcurrentHashMap<PoolAwareConnection, Object>((int) (maxConnections / 0.75));
    this.available = new ArrayBlockingQueue<PoolAwareConnection>(maxConnections);
    this.poolCap = new Semaphore(maxConnections);
    this.maxIdleConnections = maxIdleConnections(minConnections, maxConnections);

    // grow pool to min connections
    try {
        for (int i = 0; i < minConnections; i++) {
            PoolAwareConnection c = createUnchecked();
            reclaim(c);
        }
    } catch (BadValidationQueryException e) {
        throw new CayenneRuntimeException("Bad validation query: " + validationQuery, e);
    } catch (SQLException e) {
        LOGGER.info("Error creating new connection when starting connection pool, ignoring", e);
    }
}

From source file:org.attribyte.api.pubsub.impl.server.BroadcastServlet.java

/**
 * Creates a servlet with a specified maximum body size.
 * @param endpoint The hub endpoint./*from w  w  w .jav  a2 s . c o m*/
 * @param maxBodyBytes The maximum size of accepted for a notification body.
 * @param autocreateTopics If <code>true</code>, topics will be automatically created if they do not exist.
 * @param logger The logger.
 * @param filters A list of filters to be applied.
 * @param topicCache A topic cache.
 * @param replicationTopic A system topic to which all notifications are replicated. May be <code>null</code>.
 * @param maxSavedNotifications The maximum number of notifications saved in-memory for debugging purposes.
 * @param jsonEnabled If <code>true</code> a JSON body will be sent with the notification response.
 */
public BroadcastServlet(final HubEndpoint endpoint, final int maxBodyBytes, final boolean autocreateTopics,
        final Logger logger, final List<BasicAuthFilter> filters, final Cache<String, Topic> topicCache,
        final Topic replicationTopic, final int maxSavedNotifications, final boolean jsonEnabled) {
    this.endpoint = endpoint;
    this.datastore = endpoint.getDatastore();
    this.maxBodyBytes = maxBodyBytes;
    this.autocreateTopics = autocreateTopics;
    this.logger = logger;
    this.filters = filters != null ? ImmutableList.copyOf(filters) : ImmutableList.<BasicAuthFilter>of();
    this.topicCache = topicCache;
    this.replicationTopic = replicationTopic;
    this.maxSavedNotifications = maxSavedNotifications;
    this.jsonEnabled = jsonEnabled;

    final int queueLimit = maxSavedNotifications * 2;
    final int drainTriggerLimit = queueLimit - maxSavedNotifications / 2;

    this.recentNotifications = maxSavedNotifications > 0 ? new ArrayBlockingQueue<>(queueLimit) : null;
    this.recentNotificationsSize = new AtomicInteger();
    if (recentNotifications != null) {
        this.recentNotificationsMonitor = new Thread(new Runnable() {
            @Override
            public void run() {
                while (true) {
                    try {
                        int currSize = recentNotificationsSize.get();
                        if (currSize >= drainTriggerLimit) {
                            int maxDrained = currSize - maxSavedNotifications;
                            List<NotificationRecord> drain = Lists.newArrayListWithCapacity(maxDrained);
                            int numDrained = recentNotifications.drainTo(drain, maxSavedNotifications);
                            recentNotificationsSize.addAndGet(-1 * numDrained);
                        } else {
                            Thread.sleep(100L);
                        }
                    } catch (InterruptedException ie) {
                        return;
                    }
                }
            }
        });
        this.recentNotificationsMonitor.setName("recent-notifications-monitor");
        this.recentNotificationsMonitor.setDaemon(true);
        this.recentNotificationsMonitor.start();
    } else {
        this.recentNotificationsMonitor = null;
    }
}

From source file:eagle.jobrunning.crawler.RunningJobCrawlerImpl.java

public RunningJobCrawlerImpl(RunningJobCrawlConfig config, JobRunningZKStateManager zkStateManager,
        RunningJobCallback callback, JobFilter jobFilter, ReadWriteLock readWriteLock) {
    this.endpointConfig = config.endPointConfig;
    this.controlConfig = config.controlConfig;
    this.callback = callback;
    this.fetcher = new RMResourceFetcher(endpointConfig);
    this.jobFilter = jobFilter;
    this.readWriteLock = readWriteLock;
    if (config.controlConfig.jobInfoEnabled) {
        jobCompleteInfoProcessThread = new Thread() {
            @Override/*from   ww  w.j  a va 2  s .c  o  m*/
            public void run() {
                startCompleteJobInfoProcessThread();
            }
        };
        jobCompleteInfoProcessThread.setName("JobCompleteInfo-process-thread");
        jobCompleteInfoProcessThread.setDaemon(true);

        jobCompleteStatusCheckerThread = new Thread() {
            @Override
            public void run() {
                startCompleteStatusCheckerThread();
            }
        };
        jobCompleteStatusCheckerThread.setName("JobComplete-statusChecker-thread");
        jobCompleteStatusCheckerThread.setDaemon(true);
    }

    if (config.controlConfig.jobConfigEnabled) {
        jobConfigProcessThread = new Thread() {
            @Override
            public void run() {
                startJobConfigProcessThread();
            }
        };
        jobConfigProcessThread.setName("JobConfig-process-thread");
        jobConfigProcessThread.setDaemon(true);
    }

    zkCleanupThread = new Thread() {
        @Override
        public void run() {
            startzkCleanupThread();
        }
    };
    zkCleanupThread.setName("zk-cleanup-thread");
    zkCleanupThread.setDaemon(true);

    this.zkStateManager = zkStateManager;
    this.processingJobMap.put(ResourceType.JOB_CONFIGURATION, new ConcurrentHashMap<String, JobContext>());
    this.processingJobMap.put(ResourceType.JOB_COMPLETE_INFO, new ConcurrentHashMap<String, JobContext>());
    this.queueOfConfig = new ArrayBlockingQueue<JobContext>(controlConfig.sizeOfJobConfigQueue);
    this.queueOfCompleteJobInfo = new ArrayBlockingQueue<JobContext>(controlConfig.sizeOfJobCompletedInfoQueue);
}