Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity, boolean fair) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and the specified access policy.

Usage

From source file:org.compass.core.lucene.engine.transaction.async.AsyncTransactionProcessorFactory.java

public void configure(CompassSettings settings) throws CompassException {
    this.settings = settings;
    jobsToProcess = new ArrayBlockingQueue<TransactionJobs>(
            settings.getSettingAsInt(LuceneEnvironment.Transaction.Processor.Async.BACKLOG, 10), true);

    addTimeout = settings.getSettingAsTimeInMillis(LuceneEnvironment.Transaction.Processor.Async.ADD_TIMEOUT,
            10 * 1000);/* w w w. j  ava 2 s.  c o  m*/
    if (logger.isDebugEnabled()) {
        logger.debug("Async Transaction Processor will wait for [" + addTimeout + "ms] if backlog is full");
    }

    batchJobsSize = settings.getSettingAsInt(LuceneEnvironment.Transaction.Processor.Async.BATCH_JOBS_SIZE, 5);
    batchJobTimeout = settings
            .getSettingAsTimeInMillis(LuceneEnvironment.Transaction.Processor.Async.BATCH_JOBS_SIZE, 100);
    if (logger.isDebugEnabled()) {
        logger.debug("Async Transaction Processor blocking batch size is [" + batchJobsSize
                + "] with timeout of [" + batchJobTimeout + "ms]");
    }

    nonBlockingBatchSize = settings
            .getSettingAsInt(LuceneEnvironment.Transaction.Processor.Async.NON_BLOCKING_BATCH_JOBS_SIZE, 5);
    if (logger.isDebugEnabled()) {
        logger.debug("Async Transaction Processor non blocking batch size is [" + nonBlockingBatchSize + "]");
    }

    processBeforeClose = settings
            .getSettingAsBoolean(LuceneEnvironment.Transaction.Processor.Async.PROCESS_BEFORE_CLOSE, true);
    if (logger.isDebugEnabled()) {
        logger.debug("Async Transaction Processor process before close is set to [" + processBeforeClose + "]");
    }

    this.concurrencyLevel = settings
            .getSettingAsInt(LuceneEnvironment.Transaction.Processor.Async.CONCURRENCY_LEVEL, 5);
    if (logger.isDebugEnabled()) {
        logger.debug("Async Transaction Processor will use [" + concurrencyLevel
                + "] concrrent threads to process transactions");
    }

    hashing = ResourceHashing
            .fromName(settings.getSetting(LuceneEnvironment.Transaction.Processor.Async.HASHING, "uid"));
    if (logger.isDebugEnabled()) {
        logger.debug(
                "Async Transaction Processor uses [" + hashing + "] based hashing for concurrent processing");
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Starting Async polling transaction processor");
    }
}

From source file:de.fhg.fokus.diameter.DiameterPeer.DiameterPeer.java

/**
 * Configure a DiameterPeer based on a configuration file or string. Path && isFile==true is equivalent to DiameterPeer(Path).
 * /*from  w ww . ja  v a  2 s .  c  om*/
 * @param xml
 *            XML configuration payload.
 * @param isFile
 *            Set to true if string is a path/uri, false if raw xml.
 */
public void configure(String xml, boolean isFile) {
    Acceptor acc;
    NodeList nl;
    Node n, nv;
    int port, app_id, vendor_id;
    InetAddress addr;

    Application app;

    this.eventListeners = new Vector<EventListener>();

    java.util.Random rand = new java.util.Random();
    this.hopbyhop_id = rand.nextInt();
    this.endtoend_id = ((int) (System.currentTimeMillis() & 0xFFF)) << 20;
    this.endtoend_id |= rand.nextInt() & 0xFFFFF;

    if (isFile) {
        /* parse the config */
        if (!readConfigFile(xml)) {
            log.error("DiameterPeer: Error parsing config file");
            return;
        }
    } else {
        /* parse the config */
        if (!readConfigString(xml)) {
            log.error("DiameterPeer: Error parsing config String");
            return;
        }
    }

    this.FQDN = this.config.getDocumentElement().getAttribute("FQDN");
    this.Realm = this.config.getDocumentElement().getAttribute("Realm");
    this.Vendor_Id = Integer.parseInt(this.config.getDocumentElement().getAttribute("Vendor_Id"));
    this.Product_Name = this.config.getDocumentElement().getAttribute("Product_Name");
    this.AcceptUnknownPeers = Integer
            .parseInt(this.config.getDocumentElement().getAttribute("AcceptUnknownPeers")) != 0;
    this.DropUnknownOnDisconnect = Integer
            .parseInt(this.config.getDocumentElement().getAttribute("DropUnknownOnDisconnect")) != 0;
    this.Tc = Integer.parseInt(this.config.getDocumentElement().getAttribute("Tc"));
    this.workerCount = Integer.parseInt(this.config.getDocumentElement().getAttribute("Workers"));
    this.queueLength = Integer.parseInt(this.config.getDocumentElement().getAttribute("QueueLength"));
    this.queueTasks = new ArrayBlockingQueue<DiameterTask>(this.queueLength, true);
    this.reqTimeout = Long.parseLong(this.config.getDocumentElement().getAttribute("ReqTimeout"));

    log.info("FQDN: " + this.FQDN);
    log.info("Realm: " + this.Realm);
    log.info("Vendor_ID : " + this.Vendor_Id);
    log.info("Product Name: " + this.Product_Name);
    log.info("AcceptUnknwonPeers: " + this.AcceptUnknownPeers);
    log.info("DropUnknownOnDisconnect: " + this.DropUnknownOnDisconnect);
    log.info("ReqTimeout: " + this.reqTimeout);

    startWorkers();

    /* Read Supported Application ids */
    this.AuthApp = new Vector<Application>();
    this.AcctApp = new Vector<Application>();
    this.hostIpAdds = new Vector<String>();
    // get auth data from DiameterPeerHSS.xml.env-vm
    nl = this.config.getDocumentElement().getElementsByTagName("Auth");
    for (int i = 0; i < nl.getLength(); i++) {
        n = nl.item(i);
        app_id = 0;
        app_id = Integer.parseInt(n.getAttributes().getNamedItem("id").getNodeValue());
        vendor_id = 0;
        if (n.getAttributes().getNamedItem("vendor") != null)
            vendor_id = Integer.parseInt(n.getAttributes().getNamedItem("vendor").getNodeValue());

        app = new Application(app_id, vendor_id, Application.Auth);
        this.AuthApp.add(app);
    }
    // get acct data from DiameterPeerHSS.xml.env-vm
    nl = this.config.getDocumentElement().getElementsByTagName("Acct");
    for (int i = 0; i < nl.getLength(); i++) {
        n = nl.item(i);
        app_id = 0;
        app_id = Integer.parseInt(n.getAttributes().getNamedItem("id").getNodeValue());
        vendor_id = 0;
        if (n.getAttributes().getNamedItem("vendor") != null)
            vendor_id = Integer.parseInt(n.getAttributes().getNamedItem("vendor").getNodeValue());

        app = new Application(app_id, vendor_id, Application.Acct);
        this.AcctApp.add(app);
    }

    /* Initialize the Peer Manager */
    this.peerManager = new PeerManager(this);

    /* Read the peers from the configuration file */
    nl = this.config.getDocumentElement().getElementsByTagName("Peer");
    for (int i = 0; i < nl.getLength(); i++) {
        n = nl.item(i);

        String localIp = n.getAttributes().getNamedItem("LocalIp").getNodeValue();

        int localPort = Integer.parseInt(n.getAttributes().getNamedItem("LocalPort").getNodeValue());

        this.peerFQDN = n.getAttributes().getNamedItem("FQDN").getNodeValue();

        this.peerRealm = n.getAttributes().getNamedItem("Realm").getNodeValue();

        port = 3868;
        nv = n.getAttributes().getNamedItem("port");
        if (nv == null) {
            port = 3868;
        } else {
            port = Integer.parseInt(nv.getNodeValue());
        }

        this.peerManager.configurePeer(localIp, localPort, this.peerFQDN, this.peerRealm, "", port);
    }

    /* Create & start connection acceptors */
    this.acceptors = new Vector<Acceptor>();
    nl = this.config.getDocumentElement().getElementsByTagName("Acceptor");
    for (int i = 0; i < nl.getLength(); i++) {
        n = nl.item(i);

        nv = n.getAttributes().getNamedItem("port");
        if (nv != null) {
            this.bindPort = Integer.parseInt(nv.getNodeValue());
        }

        addr = null;
        nv = n.getAttributes().getNamedItem("bind");
        if (nv != null && nv.getNodeValue().length() != 0)
            try {
                this.bindIp = nv.getNodeValue();
                this.hostIpAdds.add(this.bindIp);
                addr = InetAddress.getByName(this.bindIp);
            } catch (UnknownHostException e) {
                log.error("DiameterPeer: Can not resolve " + this.bindIp);
                e.printStackTrace();
                continue;
            }
        acc = new Acceptor(bindPort, addr, this);
        acc.startAccepting();
        this.acceptors.add(acc);
    }

    // get host ip address data from DiameterPeerHSS.xml.env-vm
    nl = this.config.getDocumentElement().getElementsByTagName("HostIpAdd");
    for (int i = 0; i < nl.getLength(); i++) {
        n = nl.item(i);
        String hostIpAddress = null;
        hostIpAddress = n.getAttributes().getNamedItem("ip").getNodeValue();
        this.hostIpAdds.add(hostIpAddress);
    }

    initRoutingTable(this.config);

    this.peerManager.start();

}

From source file:org.apache.solr.handler.component.HttpShardHandlerFactory.java

@Override
public void init(PluginInfo info) {
    StringBuilder sb = new StringBuilder();
    NamedList args = info.initArgs;/*  ww  w.  j  a  v  a  2s .co m*/
    this.soTimeout = getParameter(args, HttpClientUtil.PROP_SO_TIMEOUT, soTimeout, sb);
    this.scheme = getParameter(args, INIT_URL_SCHEME, null, sb);
    if (StringUtils.endsWith(this.scheme, "://")) {
        this.scheme = StringUtils.removeEnd(this.scheme, "://");
    }

    String strategy = getParameter(args, "metricNameStrategy",
            UpdateShardHandlerConfig.DEFAULT_METRICNAMESTRATEGY, sb);
    this.metricNameStrategy = KNOWN_METRIC_NAME_STRATEGIES.get(strategy);
    if (this.metricNameStrategy == null) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown metricNameStrategy: " + strategy
                + " found. Must be one of: " + KNOWN_METRIC_NAME_STRATEGIES.keySet());
    }

    this.connectionTimeout = getParameter(args, HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout, sb);
    this.maxConnectionsPerHost = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST,
            maxConnectionsPerHost, sb);
    this.maxConnections = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS, maxConnections, sb);
    this.corePoolSize = getParameter(args, INIT_CORE_POOL_SIZE, corePoolSize, sb);
    this.maximumPoolSize = getParameter(args, INIT_MAX_POOL_SIZE, maximumPoolSize, sb);
    this.keepAliveTime = getParameter(args, MAX_THREAD_IDLE_TIME, keepAliveTime, sb);
    this.queueSize = getParameter(args, INIT_SIZE_OF_QUEUE, queueSize, sb);
    this.accessPolicy = getParameter(args, INIT_FAIRNESS_POLICY, accessPolicy, sb);
    log.debug("created with {}", sb);

    // magic sysprop to make tests reproducible: set by SolrTestCaseJ4.
    String v = System.getProperty("tests.shardhandler.randomSeed");
    if (v != null) {
        r.setSeed(Long.parseLong(v));
    }

    BlockingQueue<Runnable> blockingQueue = (this.queueSize == -1)
            ? new SynchronousQueue<Runnable>(this.accessPolicy)
            : new ArrayBlockingQueue<Runnable>(this.queueSize, this.accessPolicy);

    this.commExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(this.corePoolSize, this.maximumPoolSize,
            this.keepAliveTime, TimeUnit.SECONDS, blockingQueue,
            new DefaultSolrThreadFactory("httpShardExecutor"));

    ModifiableSolrParams clientParams = getClientParams();
    httpRequestExecutor = new InstrumentedHttpRequestExecutor(this.metricNameStrategy);
    clientConnectionManager = new InstrumentedPoolingHttpClientConnectionManager(
            HttpClientUtil.getSchemaRegisteryProvider().getSchemaRegistry());
    this.defaultClient = HttpClientUtil.createClient(clientParams, clientConnectionManager, false,
            httpRequestExecutor);
    this.loadbalancer = createLoadbalancer(defaultClient);
}

From source file:com.jkoolcloud.tnt4j.streams.inputs.RedirectTNT4JStream.java

@Override
protected void initialize() throws Exception {
    super.initialize();

    inputBuffer = new ArrayBlockingQueue<>(bufferSize, true);

    initializeStreamInternals();//from  w  ww .  j av a 2 s  .  c o  m
}

From source file:com.yobidrive.diskmap.needles.NeedleManager.java

private void initializeBuffersAndCache() throws NeedleManagerException {
    if (logNumber < 0)
        createNewLog();//from w  w  w  .j a  v a2s. c  o  m
    // Now prepare read threads
    threadBufferQ = new ArrayBlockingQueue<ByteBuffer>(readThreads, true);
    try {
        for (int i = 0; i < readThreads; i++) {
            ByteBuffer needleBuffer = ByteBuffer
                    .allocateDirect(MAXKEYSIZE + MAXVERSIONSIZE + MAXDATASIZE + Needle.NEEDLEOVERHEAD);
            threadBufferQ.put(needleBuffer);
        }
    } catch (Throwable th) {
        logger.error("Error building needle reader buffers", th);
    }
    // Finally create readCaches
    NeedleWeighter needleWeighter = new NeedleWeighter();
    needleReadCache = CacheBuilder.newBuilder().weigher(needleWeighter).maximumWeight(this.maxCachedBytes)
            .build(new NeedleCacheLoader(this));
    NeedleHeaderWeighter needleHeaderWeighter = new NeedleHeaderWeighter();
    needleHeaderReadCache = (CacheBuilder.newBuilder()).weigher(needleHeaderWeighter)
            .maximumWeight(this.maxCachedHeaderBytes).build(new NeedleHeaderCacheLoader(this));
    logger.info(needleHeaderReadCache.stats().toString());
    // Create compacting buffer
    compactBuffer = ByteBuffer
            .allocateDirect(MAXKEYSIZE + MAXVERSIONSIZE + MAXDATASIZE + Needle.NEEDLEOVERHEAD);
}

From source file:de.tudarmstadt.lt.seg.app.Segmenter.java

private void run_parallel() throws Exception {

    InputStream in = System.in;
    if (!"-".equals(_filename_in))
        in = new FileInputStream(_filename_in);
    Stream<String> liter = new BufferedReader(new InputStreamReader(in, Charset.defaultCharset())).lines();

    ThreadLocal<ISentenceSplitter> sentenceSplitter = ThreadLocal.withInitial(() -> {
        try {//ww w .j a  v a2  s .c o m
            return newSentenceSplitter();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });
    ThreadLocal<ITokenizer> tokenizer = ThreadLocal.withInitial(() -> {
        try {
            return newTokenizer();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });

    final PrintWriter[] w = new PrintWriter[_parallelism];
    // init writers
    for (int i = 0; i < _parallelism; i++) {
        OutputStream out = System.out;
        if (!"-".equals(_filename_out)) {
            out = new FileOutputStream(String.format("%s_%d", _filename_out, i));
        }
        w[i] = new PrintWriter(new OutputStreamWriter(out, Charset.defaultCharset()));
    }

    BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(_parallelism * 2, true);
    ExecutorService es = new ThreadPoolExecutor(_parallelism, _parallelism, 0L, TimeUnit.MILLISECONDS, queue);

    AtomicLong lc = new AtomicLong(0);
    liter.forEach((line) -> {
        // don't try to submit new threads, wait until the thread queue has some capacity again
        while (queue.remainingCapacity() == 0)
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                /**/}
        es.submit(() -> {
            final long docid = lc.incrementAndGet();
            if (docid % 1000 == 0)
                System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
            final int w_i = (int) (docid % _parallelism);
            split_and_tokenize(new StringReader(line.trim()), String.format("%s:%d", _filename_in, docid),
                    sentenceSplitter.get(), tokenizer.get(), _level_filter, _level_normalize, _merge_types,
                    _merge_tokens, _separator_sentence, _separator_token, _separator_desc, w[w_i]);

        });
    });
    es.shutdown();
    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);

    // TODO: the stream parallelism version does not work because it submits too many threads at once
    //      AtomicLong lc = new AtomicLong(0);
    //      ForkJoinPool forkJoinPool = new ForkJoinPool(_parallelism);
    //      forkJoinPool.submit(() -> 
    //         liter.parallel().forEach((line) -> {
    //            final long docid = lc.incrementAndGet();
    //            if(docid % 1000 == 0)
    //               System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
    //   
    //            String l = line.replace("\\t", "\t").replace("\\n", "\n");
    //            split_and_tokenize(
    //                  new StringReader(l),
    //                  String.format("%s:%d", _filename_in, docid),
    //                  sentenceSplitter.get(), 
    //                  tokenizer.get(), 
    //                  _level_filter,
    //                  _level_normalize,
    //                  _merge_types,
    //                  _merge_tokens,
    //                  _separator_sentence,
    //                  _separator_token,
    //                  _separator_desc,
    //                  w);
    //      })).get();

}

From source file:com.streamsets.datacollector.execution.runner.common.TestProductionPipeline.java

private ProductionPipeline createProductionPipeline(DeliveryGuarantee deliveryGuarantee,
        boolean captureNextBatch, long rateLimit, boolean sourceOffsetCommitter) throws Exception {
    SourceOffsetTracker tracker = new TestUtil.SourceOffsetTrackerImpl("1");
    SnapshotStore snapshotStore = Mockito.mock(FileSnapshotStore.class);

    Mockito.when(snapshotStore.getInfo(PIPELINE_NAME, REVISION, SNAPSHOT_NAME))
            .thenReturn(new SnapshotInfoImpl("user", "SNAPSHOT_NAME", "SNAPSHOT LABEL", PIPELINE_NAME, REVISION,
                    System.currentTimeMillis(), false));
    BlockingQueue<Object> productionObserveRequests = new ArrayBlockingQueue<>(100, true /* FIFO */);
    Configuration config = new Configuration();
    config.set("monitor.memory", true);
    ProductionPipelineRunner runner = new ProductionPipelineRunner(PIPELINE_NAME, REVISION, config, runtimeInfo,
            new MetricRegistry(), snapshotStore, null);
    runner.setObserveRequests(productionObserveRequests);
    runner.setMemoryLimitConfiguration(memoryLimit);
    runner.setDeliveryGuarantee(deliveryGuarantee);
    if (rateLimit > 0) {
        runner.setRateLimit(rateLimit);//  ww  w.  j a va  2  s. co  m
    }
    PipelineConfiguration pConf = (sourceOffsetCommitter)
            ? MockStages.createPipelineConfigurationSourceOffsetCommitterProcessorTarget()
            : MockStages.createPipelineConfigurationSourceProcessorTarget();

    ProductionPipeline pipeline = new ProductionPipelineBuilder(PIPELINE_NAME, REVISION, config, runtimeInfo,
            MockStages.createStageLibrary(), runner, null).build(pConf);
    runner.setOffsetTracker(tracker);

    if (captureNextBatch) {
        runner.capture("snapshot", 1, 1);
    }

    return pipeline;
}