Example usage for java.util.concurrent LinkedBlockingDeque LinkedBlockingDeque

List of usage examples for java.util.concurrent LinkedBlockingDeque LinkedBlockingDeque

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingDeque LinkedBlockingDeque.

Prototype

public LinkedBlockingDeque(Collection<? extends E> c) 

Source Link

Document

Creates a LinkedBlockingDeque with a capacity of Integer#MAX_VALUE , initially containing the elements of the given collection, added in traversal order of the collection's iterator.

Usage

From source file:nu.nethome.home.impl.HomeServer.java

public HomeServer() {
    eventQueue = new LinkedBlockingQueue<>(MAX_QUEUE_SIZE);
    logRecords = new LinkedBlockingDeque<>(LOG_RECORD_CAPACITY);
    setupLogger();/*from   w w w .j av a  2  s  . co m*/
    eventCountlogger.activate(this);
    commandLineExecutor = new CommandLineExecutor(this, true);
    python = new Python();
}

From source file:org.commoncrawl.service.listcrawler.DataTransferAgent.java

static int uploadSingeFile(CCBridgeServerMapping mapping, FileSystem fs, Configuration conf, Path hdfsFilePath,
        String uploadName, EventLoop eventLoop) throws IOException {

    final FileStatus fileStatus = fs.getFileStatus(hdfsFilePath);
    LOG.info("Uploading:" + uploadName + " size:" + fileStatus.getLen() + " to:" + mapping._internalName);

    {/*from  w  w  w .  j a  v a2  s. co m*/
        // construct url 
        URL fePostURL = new URL("http://" + mapping._externalName + ":8090/");
        LOG.info("POST URL IS:" + fePostURL.toString());

        // open input stream 
        final FSDataInputStream is = fs.open(hdfsFilePath);
        final Semaphore blockingSemaphore = new Semaphore(0);
        NIOHttpConnection connection = null;
        try {
            // create connection 
            connection = new NIOHttpConnection(fePostURL, eventLoop.getSelector(), eventLoop.getResolver(),
                    null);
            // set listener 
            connection.setListener(new Listener() {

                @Override
                public void HttpConnectionStateChanged(NIOHttpConnection theConnection, State oldState,
                        State state) {
                    LOG.info("Connection State Changed to:" + state.toString());
                    if (state == State.DONE || state == State.ERROR) {
                        //LOG.info("Connection Transition to Done or Error");
                        //LOG.info("Response Headers:" + theConnection.getResponseHeaders().toString());
                        blockingSemaphore.release();
                    }
                }

                @Override
                public void HttpContentAvailable(NIOHttpConnection theConnection, NIOBufferList contentBuffer) {
                    // TODO Auto-generated method stub

                }
            });
            // set headers 
            connection.getRequestHeaders().reset();
            connection.getRequestHeaders().prepend("PUT /put?src=" + uploadName + " HTTP/1.1", null);
            connection.getRequestHeaders().set("Host", mapping._internalName + ":8090");
            connection.getRequestHeaders().set("Content-Length", Long.toString(fileStatus.getLen()));
            connection.getRequestHeaders().set("Connection", "keep-alive");
            connection.setPopulateDefaultHeaderItems(false);

            final LinkedBlockingDeque<BufferStruct> _loaderQueue = new LinkedBlockingDeque<BufferStruct>(20);
            final AtomicBoolean eof = new AtomicBoolean();
            final ByteBuffer sentinel = ByteBuffer.allocate(4096);
            sentinel.position(sentinel.position());
            final Thread loaderThread = new Thread(new Runnable() {

                int _id = 0;

                @Override
                public void run() {
                    int bytesRead;
                    byte incomingBuffer[] = new byte[4096 * 10];
                    try {
                        while ((bytesRead = is.read(incomingBuffer)) != -1) {
                            ByteBuffer buffer = ByteBuffer.wrap(incomingBuffer, 0, bytesRead);
                            buffer.position(bytesRead);

                            //LOG.info("Loader Thread Read:"+ bytesRead + " Buffer:" + ++_id);
                            try {
                                _loaderQueue.put(new BufferStruct(buffer, _id));
                            } catch (InterruptedException e) {
                                LOG.error(CCStringUtils.stringifyException(e));
                                break;
                            }
                            incomingBuffer = new byte[4096 * 10];
                        }
                        try {
                            _loaderQueue.put(new BufferStruct(sentinel, ++_id));
                        } catch (InterruptedException e) {
                        }
                    } catch (IOException e) {
                        LOG.error(CCStringUtils.stringifyException(e));
                        return;
                    }
                }

            });

            loaderThread.start();

            // set data source ... 
            connection.setDataSource(new DataSource() {

                int bytesTransferred = 0;

                @Override
                public boolean read(NIOBufferList dataBuffer) throws IOException {
                    if (eof.get())
                        return true;
                    //LOG.info("Connect read callback triggered");
                    BufferStruct buffer = _loaderQueue.poll();
                    if (buffer != null) {
                        if (buffer._buffer != sentinel) {
                            //LOG.info("Got Buffer:"+ buffer._id);
                            if (buffer._id == 1) {
                                //LOG.info("Inital Buffer Bytes:" + new String(buffer._buffer.array(),0,10).toString());
                            }
                            dataBuffer.write(buffer._buffer);
                            bytesTransferred += buffer._buffer.limit();
                            //LOG.info("Read:" + buffer._buffer.limit() + " Transfered:" + bytesTransferred);
                            return false;
                        } else {
                            //LOG.info("EOF Condition");
                            dataBuffer.write(sentinel);
                            eof.set(true);
                            return true;
                        }
                    }
                    return false;
                }
            });

            // open connection 
            connection.open();
            // wait for connection to complete ... 
            blockingSemaphore.acquireUninterruptibly();
            // kill loader thread 
            loaderThread.interrupt();
            try {
                LOG.info("Waiting for Loader Thread");
                loaderThread.join();
                LOG.info("Done Waiting for Loader Thread");
            } catch (InterruptedException e) {
            }
        } finally {
            is.close();
            if (connection != null) {
                connection.close();
                LOG.info("Response Code for File:" + uploadName + "to Host: " + mapping._internalName + " is:"
                        + connection.getResponseHeaders().getHttpResponseCode());
                return connection.getResponseHeaders().getHttpResponseCode();
                /*
                if (connection.getResponseHeaders().getHttpResponseCode() != 200) { 
                  throw new IOException("Failed to upload file:" + dataFile.getName() + " responseCode:" + connection.getResponseHeaders().getHttpResponseCode());
                }
                */
            }
        }
    }
    // something went wrong ??? 
    LOG.error("Failed to upload file:" + uploadName + " unknown response code");
    return 500;
}

From source file:org.apache.drill.sql.client.ref.DrillRefImpl.java

public Enumerator<E> enumerator() {
    // TODO: use a completion service from the container
    final ExecutorCompletionService<Collection<RunOutcome>> service = new ExecutorCompletionService<Collection<RunOutcome>>(
            new ThreadPoolExecutor(1, 1, 1, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(10)));

    // Run the plan using an executor. It runs in a different thread, writing
    // results to our queue.
    ////  w  w  w  .  ja va  2s  . co  m
    // TODO: use the result of task, and check for exceptions
    final Future<Collection<RunOutcome>> task = runRefInterpreterPlan(service);

    return new JsonEnumerator(task, queue, fields);

}

From source file:com.splout.db.qnode.QNodeHandlerContext.java

/**
 * This method can be called to initialize a pool of connections to a dnode. This method may be called from multiple
 * threads so it should be safe to call it concurrently.
 *//*from  w  w w. j a va2s. co  m*/
public void initializeThriftClientCacheFor(String dnode) throws TTransportException, InterruptedException {
    // this lock is on the whole cache but we would actually be interested in a per-DNode lock...
    // there's only one lock for simplicity.
    thriftClientCacheLock.lock();
    try {
        // initialize queue for this DNode
        BlockingQueue<DNodeService.Client> dnodeQueue = thriftClientCache.get(dnode);
        if (dnodeQueue == null) {
            // this assures that the per-DNode queue is only created once and then reused.
            dnodeQueue = new LinkedBlockingDeque<DNodeService.Client>(thriftClientPoolSize);
        }
        if (dnodeQueue.isEmpty()) {
            try {
                for (int i = dnodeQueue.size(); i < thriftClientPoolSize; i++) {
                    dnodeQueue.put(DNodeClient.get(dnode));
                }
                // we only put the queue if all connections have been populated
                thriftClientCache.put(dnode, dnodeQueue);
            } catch (TTransportException e) {
                log.error("Error while trying to populate queue for " + dnode
                        + ", will discard created connections.", e);
                while (!dnodeQueue.isEmpty()) {
                    dnodeQueue.poll().getOutputProtocol().getTransport().close();
                }
                throw e;
            }
        } else {
            // it should be safe to call this method from different places concurrently
            // so we contemplate the case where another Thread already populated the queue
            // and only populate it if it's really empty.
            log.warn(Thread.currentThread().getName() + " : queue for [" + dnode
                    + "] is not empty - it was populated before.");
        }
    } finally {
        thriftClientCacheLock.unlock();
    }
}

From source file:com.emc.ecs.sync.EcsSync.java

public void run() {
    try {/*from w ww.ja  va2 s. co m*/
        assert syncConfig != null : "syncConfig is null";
        assert syncConfig.getOptions() != null : "syncConfig.options is null";
        final SyncOptions options = syncConfig.getOptions();

        // Some validation (must have source and target)
        assert source != null || syncConfig.getSource() != null : "source must be specified";
        assert target != null || syncConfig.getTarget() != null : "target plugin must be specified";

        if (source == null)
            source = PluginUtil.newStorageFromConfig(syncConfig.getSource(), options);
        else
            syncConfig.setSource(source.getConfig());

        if (target == null)
            target = PluginUtil.newStorageFromConfig(syncConfig.getTarget(), options);
        else
            syncConfig.setTarget(target.getConfig());

        if (filters == null) {
            if (syncConfig.getFilters() != null)
                filters = PluginUtil.newFiltersFromConfigList(syncConfig.getFilters(), options);
            else
                filters = new ArrayList<>();
        } else {
            List<Object> filterConfigs = new ArrayList<>();
            for (SyncFilter filter : filters) {
                filterConfigs.add(filter.getConfig());
            }
            syncConfig.setFilters(filterConfigs);
        }

        // Summarize config for reference
        if (log.isInfoEnabled())
            log.info(summarizeConfig());

        // Ask each plugin to configure itself and validate the chain (resolves incompatible plugins)
        String currentPlugin = "source storage";
        try {
            source.configure(source, filters.iterator(), target);
            currentPlugin = "target storage";
            target.configure(source, filters.iterator(), target);
            for (SyncFilter filter : filters) {
                currentPlugin = filter.getClass().getSimpleName() + " filter";
                filter.configure(source, filters.iterator(), target);
            }
        } catch (Exception e) {
            log.error("Error configuring " + currentPlugin);
            throw e;
        }

        // Build the plugin chain
        Iterator<SyncFilter> i = filters.iterator();
        SyncFilter next, previous = null;
        while (i.hasNext()) {
            next = i.next();
            if (previous != null)
                previous.setNext(next);
            previous = next;
        }

        // add target to chain
        SyncFilter targetFilter = new TargetFilter(target, options);
        if (previous != null)
            previous.setNext(targetFilter);

        firstFilter = filters.isEmpty() ? targetFilter : filters.get(0);

        // register for timings
        if (options.isTimingsEnabled())
            TimingUtil.register(options);
        else
            TimingUtil.unregister(options); // in case of subsequent runs with same options instance

        log.info("Sync started at " + new Date());
        // make sure any old stats are closed to terminate the counter threads
        try (SyncStats oldStats = stats) {
            stats = new SyncStats();
        }
        stats.setStartTime(System.currentTimeMillis());
        stats.setCpuStartTime(
                ((OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean()).getProcessCpuTime()
                        / 1000000);

        // initialize DB Service if necessary
        if (dbService == null) {
            if (options.getDbFile() != null) {
                dbService = new SqliteDbService(options.getDbFile());
            } else if (options.getDbConnectString() != null) {
                dbService = new MySQLDbService(options.getDbConnectString(), null, null);
            } else {
                dbService = new NoDbService();
            }
            if (options.getDbTable() != null)
                dbService.setObjectsTableName(options.getDbTable());
        }

        // create thread pools
        listExecutor = new EnhancedThreadPoolExecutor(options.getThreadCount(),
                new LinkedBlockingDeque<Runnable>(options.getThreadCount() * 20), "list-pool");
        estimateExecutor = new EnhancedThreadPoolExecutor(options.getThreadCount(),
                new LinkedBlockingDeque<Runnable>(options.getThreadCount() * 20), "estimate-pool");
        queryExecutor = new EnhancedThreadPoolExecutor(options.getThreadCount() * 2,
                new LinkedBlockingDeque<Runnable>(), "query-pool");
        syncExecutor = new EnhancedThreadPoolExecutor(options.getThreadCount(),
                new LinkedBlockingDeque<Runnable>(options.getThreadCount() * 20), "sync-pool");
        retrySubmitter = new EnhancedThreadPoolExecutor(options.getThreadCount(),
                new LinkedBlockingDeque<Runnable>(), "retry-submitter");

        // initialize verifier
        verifier = new Md5Verifier(options);

        // setup performance reporting
        startPerformanceReporting();

        // set status to running
        syncControl.setRunning(true);
        stats.reset();
        log.info("syncing from {} to {}", ConfigUtil.generateUri(syncConfig.getSource()),
                ConfigUtil.generateUri(syncConfig.getTarget()));

        // start estimating
        syncEstimate = new SyncEstimate();
        estimateExecutor.submit(new Runnable() {
            @Override
            public void run() {
                // do we have a list-file?
                if (options.getSourceListFile() != null) {
                    FileLineIterator lineIterator = new FileLineIterator(options.getSourceListFile());
                    while (lineIterator.hasNext()) {
                        estimateExecutor
                                .blockingSubmit(new EstimateTask(lineIterator.next(), source, syncEstimate));
                    }
                } else {
                    for (ObjectSummary summary : source.allObjects()) {
                        estimateExecutor.blockingSubmit(new EstimateTask(summary, source, syncEstimate));
                    }
                }
            }
        });

        // iterate through root objects and submit tasks for syncing and crawling (querying).
        if (options.getSourceListFile() != null) { // do we have a list-file?
            FileLineIterator lineIterator = new FileLineIterator(options.getSourceListFile());
            while (lineIterator.hasNext()) {
                if (!syncControl.isRunning())
                    break;
                final String listLine = lineIterator.next();
                listExecutor.blockingSubmit(new Runnable() {
                    @Override
                    public void run() {
                        ObjectSummary summary = source.parseListLine(listLine);
                        submitForSync(source, summary);
                        if (summary.isDirectory())
                            submitForQuery(source, summary);
                    }
                });
            }
        } else {
            for (ObjectSummary summary : source.allObjects()) {
                if (!syncControl.isRunning())
                    break;
                submitForSync(source, summary);
                if (summary.isDirectory())
                    submitForQuery(source, summary);
            }
        }

        // now we must wait until all submitted tasks are complete
        while (syncControl.isRunning()) {
            if (listExecutor.getUnfinishedTasks() <= 0 && queryExecutor.getUnfinishedTasks() <= 0
                    && syncExecutor.getUnfinishedTasks() <= 0) {
                // done
                log.info("all tasks complete");
                break;
            } else {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    log.warn("interrupted while sleeping", e);
                }
            }
        }

        // run a final timing log
        TimingUtil.logTimings(options);
    } catch (Throwable t) {
        log.error("unexpected exception", t);
        runError = t;
        throw t;
    } finally {
        if (!syncControl.isRunning())
            log.warn("terminated early!");
        syncControl.setRunning(false);
        if (paused) {
            paused = false;
            // must interrupt the threads that are blocked
            if (listExecutor != null)
                listExecutor.shutdownNow();
            if (estimateExecutor != null)
                estimateExecutor.shutdownNow();
            if (queryExecutor != null)
                queryExecutor.shutdownNow();
            if (retrySubmitter != null)
                retrySubmitter.shutdownNow();
            if (syncExecutor != null)
                syncExecutor.shutdownNow();
        } else {
            if (listExecutor != null)
                listExecutor.shutdown();
            if (estimateExecutor != null)
                estimateExecutor.shutdown();
            if (queryExecutor != null)
                queryExecutor.shutdown();
            if (retrySubmitter != null)
                retrySubmitter.shutdown();
            if (syncExecutor != null)
                syncExecutor.shutdown();
        }
        if (stats != null)
            stats.setStopTime(System.currentTimeMillis());

        // clean up any resources in the plugins
        cleanup();
    }
}

From source file:no.sesat.search.http.filters.SiteLocatorFilter.java

private static Deque<ServletRequest> getUsersDeque(final HttpSession session) {

    @SuppressWarnings("unchecked")
    Deque<ServletRequest> deque = (BlockingDeque<ServletRequest>) session.getAttribute(USER_REQUEST_QUEUE);

    // construct deque if necessary
    if (null == deque) {
        // it may be possible for duplicates across threads to be constructed here
        deque = new LinkedBlockingDeque<ServletRequest>(REQUEST_QUEUE_SIZE);
        session.setAttribute(USER_REQUEST_QUEUE, deque);
        session.setAttribute(USER_REQUEST_LOCK, new ReentrantLock());
    }/*  w w  w .  j  a  va  2s.c  o  m*/

    return deque;
}

From source file:gobblin.couchbase.writer.CouchbaseWriterTest.java

private List<Pair<AbstractDocument, Future>> writeRecords(Iterator<AbstractDocument> recordIterator,
        CouchbaseWriter writer, int outstandingRequests, long kvTimeout, TimeUnit kvTimeoutUnit)
        throws DataConversionException, UnsupportedEncodingException {
    final BlockingQueue<Pair<AbstractDocument, Future>> outstandingCallQueue = new LinkedBlockingDeque<>(
            outstandingRequests);//from w  ww . jav a  2s.  c o m
    final List<Pair<AbstractDocument, Future>> failedFutures = new ArrayList<>(outstandingRequests);

    int index = 0;
    long runTime = 0;
    final AtomicInteger callbackSuccesses = new AtomicInteger(0);
    final AtomicInteger callbackFailures = new AtomicInteger(0);
    final ConcurrentLinkedDeque<Throwable> callbackExceptions = new ConcurrentLinkedDeque<>();
    Verifier verifier = new Verifier();
    while (recordIterator.hasNext()) {
        AbstractDocument doc = recordIterator.next();
        index++;
        verifier.onWrite(doc);
        final long startTime = System.nanoTime();
        Future callFuture = writer.write(doc, new WriteCallback<TupleDocument>() {
            @Override
            public void onSuccess(WriteResponse<TupleDocument> writeResponse) {
                callbackSuccesses.incrementAndGet();
            }

            @Override
            public void onFailure(Throwable throwable) {
                callbackFailures.incrementAndGet();
                callbackExceptions.add(throwable);
            }
        });
        drainQueue(outstandingCallQueue, 1, kvTimeout, kvTimeoutUnit, failedFutures);
        outstandingCallQueue.add(new Pair<>(doc, callFuture));
        runTime += System.nanoTime() - startTime;
    }
    int failedWrites = 0;
    long responseStartTime = System.nanoTime();
    drainQueue(outstandingCallQueue, outstandingRequests, kvTimeout, kvTimeoutUnit, failedFutures);
    runTime += System.nanoTime() - responseStartTime;

    for (Throwable failure : callbackExceptions) {
        System.out.println(failure.getClass() + " : " + failure.getMessage());
    }
    failedWrites += failedFutures.size();

    System.out.println("Total time to send " + index + " records = " + runTime / 1000000.0 + "ms, "
            + "Failed writes = " + failedWrites + " Callback Successes = " + callbackSuccesses.get()
            + "Callback Failures = " + callbackFailures.get());

    verifier.verify(writer.getBucket());
    return failedFutures;
}

From source file:org.codice.ddf.spatial.ogc.csw.catalog.transformer.CswQueryResponseTransformer.java

public void init() {
    int numThreads = Runtime.getRuntime().availableProcessors();
    LOGGER.debug(QUERY_POOL_NAME + " size: {}", numThreads);

    /*/*from  w w  w  . j  a v  a 2  s .  c o m*/
    - when first two args the same, get fixed size thread pool.
    - 3rd arg, keepAliveTime, ignored when !allowsCoreThreadTimeOut (the default); thus pass zero.
    - fixed (and arbitrarily) size blocking queue.
    - CswThreadFactory gives pool threads a name to ease debug.
    - tried arbitrarily large numThreads/queue-size, but did not see performance gain.
    - big queue + small pool minimizes CPU usage, OS resources, and context-switching overhead,
      but *can* lead to artificially low throughput.
    - todo: externalize config to support runtime tuning.
    */
    queryExecutor = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingDeque<Runnable>(BLOCKING_Q_INITIAL_SIZE), new CswThreadFactory(),
            new ThreadPoolExecutor.CallerRunsPolicy());

    queryExecutor.prestartAllCoreThreads();
}