Example usage for java.util.concurrent Semaphore Semaphore

List of usage examples for java.util.concurrent Semaphore Semaphore

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore Semaphore.

Prototype

public Semaphore(int permits) 

Source Link

Document

Creates a Semaphore with the given number of permits and nonfair fairness setting.

Usage

From source file:org.commoncrawl.service.listcrawler.DataTransferAgent.java

public static void main(String[] args) {

    Logger logger = Logger.getLogger("org.commoncrawl");
    logger.setLevel(Level.INFO);/* ww w.j  av  a  2 s . c o  m*/
    BasicConfigurator.configure();

    Configuration conf = new Configuration();

    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");

    // set a big io buffer size ... 
    conf.setInt("io.file.buffer.size", 4096 * 1024);

    final File transferLogDir = new File("/home/rana/ccprod/data/proxy_xfr_log");
    final Path hdfsCacheDataPath = new Path("crawl/proxy/cache/");
    final File shutdownFile = new File("/home/rana/ccprod/data/shutdown_xfr");

    // create a deque .. 
    final LinkedBlockingDeque<ProxyTransferItem> itemQueue = new LinkedBlockingDeque<ProxyTransferItem>();

    final EventLoop eventLoop = new EventLoop();
    eventLoop.start();

    try {

        final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(conf);
        Thread transferThreads[] = new Thread[TRANSFER_THREADS_PER_HOST * mappingsTable.size()];
        Semaphore shutdownSemaphore = new Semaphore(0);
        int threadIndex = 0;
        for (int i = 0; i < TRANSFER_THREADS_PER_HOST; ++i) {
            int serverIdx = 0;
            for (CCBridgeServerMapping mapping : mappingsTable) {
                transferThreads[(i * mappingsTable.size()) + serverIdx++] = startTransferThread(threadIndex++,
                        mapping, shutdownFile, fs, conf, itemQueue, eventLoop, shutdownSemaphore);
            }
        }

        Thread scannerThread = new Thread(new Runnable() {

            long _lastScanId = -1;
            long _lastOutOfOrderDataDirId = -1L;

            static final int SCAN_INTERVAL_MS = 500;

            @Override
            public void run() {

                while (true) {

                    try {
                        if (shutdownFile.exists()) {
                            LOG.info("Shutdown File Detected in ScanTimer Outer Loop. Exiting Scan Thread");
                            return;
                        }

                        LOG.info("Scanning For Files based on filter. Last Known Scan Id is:" + _lastScanId);
                        FileStatus fileList[] = fs.listStatus(hdfsCacheDataPath, new PathFilter() {

                            @Override
                            public boolean accept(Path path) {
                                try {
                                    if (path.getName().startsWith("cacheData-")) {
                                        // extract file id ... 
                                        long currentFileId = Long
                                                .parseLong(path.getName().substring("cacheData-".length()));
                                        // figure out if we are going to process it ... 
                                        if (_lastScanId == -1 || currentFileId > _lastScanId) {
                                            return true;
                                        }
                                    }
                                } catch (Exception e) {
                                    LOG.error("Caught Exception Processing Path Filter:"
                                            + CCStringUtils.stringifyException(e));
                                }
                                return false;
                            }
                        });
                        LOG.info("Scan returned:" + fileList.length + " Number of Valid Files");

                        long latestFileId = 0L;
                        for (FileStatus file : fileList) {
                            // extract file id ... 
                            long currentFileId = Long
                                    .parseLong(file.getPath().getName().substring("cacheData-".length()));
                            // figure out if we are going to process it ... 
                            if (_lastScanId == -1 || currentFileId > _lastScanId) {
                                // cache max latest id ..
                                latestFileId = Math.max(latestFileId, currentFileId);
                                File logFile = hdfsCacheFileToLogFileLocation(transferLogDir, file);
                                if (logFile != null) {
                                    if (logFile.exists()) {
                                        LOG.info("Skipping:" + file.getPath().getName());
                                    } else {
                                        LOG.info("Queueing File:" + file.getPath().getName());
                                        itemQueue.add(new ProxyTransferItem(file.getPath(), logFile,
                                                file.getPath().getName()));
                                    }
                                }
                            }
                        }
                        // ok update lastest file id 
                        _lastScanId = Math.max(_lastScanId, latestFileId);

                        FileStatus outofOrderDataDirs[] = fs
                                .globStatus(new Path("crawl/proxy/dtAgentOutOfOrderTransfers/*"));

                        for (FileStatus outOfOrderDataDir : outofOrderDataDirs) {
                            long dataDirId = Long.parseLong(outOfOrderDataDir.getPath().getName());
                            if (dataDirId > _lastOutOfOrderDataDirId) {
                                FileStatus candidates[] = fs
                                        .globStatus(new Path(outOfOrderDataDir.getPath(), "part-*"));

                                for (FileStatus candidate : candidates) {
                                    File logFile = outOfOrderFileToLogFileLocation(transferLogDir,
                                            candidate.getPath());
                                    if (logFile != null) {
                                        String candidateName = candidate.getPath().getParent().getName() + "-"
                                                + candidate.getPath().getName();

                                        if (logFile.exists()) {
                                            LOG.info("Skipping OOB FILE:" + candidateName);

                                        } else {
                                            LOG.info("Queueing OOB FILE:" + candidateName);
                                            itemQueue.add(new ProxyTransferItem(candidate.getPath(), logFile,
                                                    candidateName));
                                        }
                                    }
                                }
                                _lastOutOfOrderDataDirId = dataDirId;
                            }
                        }

                        LOG.info("Finish Scan. Last Known Scan Id is now:" + _lastScanId);

                    } catch (Exception e) {
                        LOG.error(CCStringUtils.stringifyException(e));
                    }

                    try {
                        Thread.sleep(SCAN_INTERVAL_MS);
                    } catch (InterruptedException e) {
                    }
                }
            }
        });

        // start scanner thread ... 
        scannerThread.start();

        LOG.info("Waiting on Transfer Threads");
        shutdownSemaphore.acquireUninterruptibly(TRANSFER_THREADS_PER_HOST * mappingsTable.size());
        LOG.info("ALL Transfer Threads Dead.");
        // wait for scanner thread to die 
        LOG.info("Waiting for Scanner Thread to Die.");
        try {
            scannerThread.join();
        } catch (InterruptedException e) {
        }
        LOG.info("Killing Event Loop");
        eventLoop.stop();

    } catch (IOException e) {
        LOG.error(CCStringUtils.stringifyException(e));
    }

}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessMutexBase.java

@Test
public void testReentrantSingleLock() throws Exception {
    final int THREAD_QTY = 10;

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();// w  ww. ja v a2 s . c om
    try {
        final AtomicBoolean hasLock = new AtomicBoolean(false);
        final AtomicBoolean isFirst = new AtomicBoolean(true);
        final Semaphore semaphore = new Semaphore(1);
        final InterProcessLock mutex = makeLock(client);

        List<Future<Object>> threads = Lists.newArrayList();
        ExecutorService service = Executors.newCachedThreadPool();
        for (int i = 0; i < THREAD_QTY; ++i) {
            Future<Object> t = service.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    semaphore.acquire();
                    mutex.acquire();
                    Assert.assertTrue(hasLock.compareAndSet(false, true));
                    try {
                        if (isFirst.compareAndSet(true, false)) {
                            semaphore.release(THREAD_QTY - 1);
                            while (semaphore.availablePermits() > 0) {
                                Thread.sleep(100);
                            }
                        } else {
                            Thread.sleep(100);
                        }
                    } finally {
                        mutex.release();
                        hasLock.set(false);
                    }
                    return null;
                }
            });
            threads.add(t);
        }

        for (Future<Object> t : threads) {
            t.get();
        }
    } finally {
        client.close();
    }
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testPostSuccess() throws InterruptedException {
    final String start = Instant.now().minusMillis(812).atZone(ZoneId.of("UTC"))
            .format(DateTimeFormatter.ISO_INSTANT);
    final String end = Instant.now().atZone(ZoneId.of("UTC")).format(DateTimeFormatter.ISO_INSTANT);
    final String id = UUID.randomUUID().toString();

    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(7, r.getAnnotationsCount());
        assertAnnotation(r.getAnnotationsList(), "foo", "bar");
        assertAnnotation(r.getAnnotationsList(), "_service", "myservice");
        assertAnnotation(r.getAnnotationsList(), "_cluster", "mycluster");
        assertAnnotation(r.getAnnotationsList(), "_start", start);
        assertAnnotation(r.getAnnotationsList(), "_end", end);
        assertAnnotation(r.getAnnotationsList(), "_id", id);

        // Dimensions
        Assert.assertEquals(3, r.getDimensionsCount());
        assertDimension(r.getDimensionsList(), "host", "some.host.com");
        assertDimension(r.getDimensionsList(), "service", "myservice");
        assertDimension(r.getDimensionsList(), "cluster", "mycluster");

        // Samples
        assertSample(r.getTimersList(), "timerLong", 123L, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getTimersList(), "timerInt", 123, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getTimersList(), "timerShort", (short) 123, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getTimersList(), "timerByte", (byte) 123, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getCountersList(), "counter", 8d);
        assertSample(r.getGaugesList(), "gauge", 10d, ClientV2.Unit.Type.Value.BYTE,
                ClientV2.Unit.Scale.Value.UNIT);
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final AtomicBoolean assertionResult = new AtomicBoolean(false);
    final Semaphore semaphore = new Semaphore(0);
    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH))
            .setEventHandler(new AttemptCompletedAssertionHandler(assertionResult, 1, 451, true,
                    new CompletionHandler(semaphore)))
            .build();/*from w  w  w.ja va 2  s. co m*/

    final Map<String, String> annotations = new LinkedHashMap<>();
    annotations.put("foo", "bar");
    annotations.put("_start", start);
    annotations.put("_end", end);
    annotations.put("_host", "some.host.com");
    annotations.put("_service", "myservice");
    annotations.put("_cluster", "mycluster");
    annotations.put("_id", id);

    final TsdEvent event = new TsdEvent(annotations,
            createQuantityMap("timerLong", TsdQuantity.newInstance(123L, Units.NANOSECOND), "timerInt",
                    TsdQuantity.newInstance(123, Units.NANOSECOND), "timerShort",
                    TsdQuantity.newInstance((short) 123, Units.NANOSECOND), "timerByte",
                    TsdQuantity.newInstance((byte) 123, Units.NANOSECOND)),
            createQuantityMap("counter", TsdQuantity.newInstance(8d, null)),
            createQuantityMap("gauge", TsdQuantity.newInstance(10d, Units.BYTE)));

    sink.record(event);
    semaphore.acquire();

    // Ensure expected handler was invoked
    Assert.assertTrue(assertionResult.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(1, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestBlockReportRateLimiting.java

/**
 * Start a 2-node cluster with only one block report lease.  When the
 * first datanode gets a lease, kill it.  Then wait for the lease to
 * expire, and the second datanode to send a full block report.
 *//*from  w  w w . j a v a2s  . c o m*/
@Test(timeout = 180000)
public void testLeaseExpiration() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES, 1);
    conf.setLong(DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS, 100L);

    final Semaphore gotFbrSem = new Semaphore(0);
    final AtomicReference<String> failure = new AtomicReference<>();
    final AtomicReference<MiniDFSCluster> cluster = new AtomicReference<>();
    final AtomicReference<String> datanodeToStop = new AtomicReference<>();
    final BlockManagerFaultInjector injector = new BlockManagerFaultInjector() {

        @Override
        public void incomingBlockReportRpc(DatanodeID nodeID, BlockReportContext context) throws IOException {
            if (context.getLeaseId() == 0) {
                setFailure(failure,
                        "Got unexpected rate-limiting-" + "bypassing full block report RPC from " + nodeID);
            }
            if (nodeID.getXferAddr().equals(datanodeToStop.get())) {
                throw new IOException("Injecting failure into block " + "report RPC for " + nodeID);
            }
            gotFbrSem.release();
        }

        @Override
        public void requestBlockReportLease(DatanodeDescriptor node, long leaseId) {
            if (leaseId == 0) {
                return;
            }
            datanodeToStop.compareAndSet(null, node.getXferAddr());
        }

        @Override
        public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) {
        }
    };
    try {
        BlockManagerFaultInjector.instance = injector;
        cluster.set(new MiniDFSCluster.Builder(conf).numDataNodes(2).build());
        cluster.get().waitActive();
        Assert.assertNotNull(cluster.get().stopDataNode(datanodeToStop.get()));
        gotFbrSem.acquire();
        Assert.assertNull(failure.get());
    } finally {
        if (cluster.get() != null) {
            cluster.get().shutdown();
        }
    }
}

From source file:org.jimcat.services.jobs.Job.java

/**
 * //  w  ww .  j a va  2s .  c  o  m
 * This constructor creats a new job using the given fields.
 * 
 * @param manager -
 *            the jobManager used to execute this job.
 * @param name -
 *            the name of this job, e.g. Image Import
 * @param description -
 *            a short description for the job
 */
public Job(JobManager manager, String name, String description) {
    // Job values
    state = JobState.PREPARING;
    jobManager = manager;
    jobName = name;
    jobDescription = description;

    // Listener management
    listeners = new CopyOnWriteArrayList<JobListener>();

    // concurrency management
    suspendSemaphore = new Semaphore(1);
    stateLock = new Object();
    failerDescriptionLock = new Object();
}

From source file:com.netflix.curator.framework.recipes.cache.TestNodeCache.java

@Test
public void testBasics() throws Exception {
    NodeCache cache = null;//from www .ja v  a  2  s.  c  om
    Timing timing = new Timing();
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryOneTime(1));
    client.start();
    try {
        client.create().forPath("/test");

        cache = new NodeCache(client, "/test/node");
        cache.start(true);

        final Semaphore semaphore = new Semaphore(0);
        cache.getListenable().addListener(new NodeCacheListener() {
            @Override
            public void nodeChanged() throws Exception {
                semaphore.release();
            }
        });

        Assert.assertNull(cache.getCurrentData());

        client.create().forPath("/test/node", "a".getBytes());
        Assert.assertTrue(timing.acquireSemaphore(semaphore));
        Assert.assertEquals(cache.getCurrentData().getData(), "a".getBytes());

        client.setData().forPath("/test/node", "b".getBytes());
        Assert.assertTrue(timing.acquireSemaphore(semaphore));
        Assert.assertEquals(cache.getCurrentData().getData(), "b".getBytes());

        client.delete().forPath("/test/node");
        Assert.assertTrue(timing.acquireSemaphore(semaphore));
        Assert.assertNull(cache.getCurrentData());
    } finally {
        IOUtils.closeQuietly(cache);
        IOUtils.closeQuietly(client);
    }
}

From source file:net.sourceforge.fullsync.cli.Main.java

public static void startup(String[] args, Launcher launcher) throws Exception {
    initOptions();// w  ww  .  j  av  a2s.c o  m
    String configDir = getConfigDir();
    CommandLineParser parser = new DefaultParser();
    CommandLine line = null;

    try {
        line = parser.parse(options, args);
    } catch (ParseException ex) {
        System.err.println(ex.getMessage());
        printHelp();
        System.exit(1);
    }

    if (line.hasOption('V')) {
        System.out.println(String.format("FullSync version %s", Util.getFullSyncVersion())); //$NON-NLS-1$
        System.exit(0);
    }

    // Apply modifying options
    if (!line.hasOption("v")) { //$NON-NLS-1$
        System.setErr(new PrintStream(new FileOutputStream(getLogFileName())));
    }

    if (line.hasOption("h")) { //$NON-NLS-1$
        printHelp();
        System.exit(0);
    }

    upgradeLegacyPreferencesLocation(configDir);

    String profilesFile;
    if (line.hasOption("P")) { //$NON-NLS-1$
        profilesFile = line.getOptionValue("P"); //$NON-NLS-1$
    } else {
        profilesFile = configDir + FullSync.PROFILES_XML;
        upgradeLegacyProfilesXmlLocation(profilesFile);
    }
    final String prefrencesFile = configDir + FullSync.PREFERENCES_PROPERTIES;
    final Injector injector = Guice.createInjector(new FullSyncModule(line, prefrencesFile));
    final RuntimeConfiguration rtConfig = injector.getInstance(RuntimeConfiguration.class);
    injector.getInstance(ProfileManager.class).setProfilesFileName(profilesFile);
    final ScheduledExecutorService scheduledExecutorService = injector
            .getInstance(ScheduledExecutorService.class);
    final EventListener deadEventListener = new EventListener() {
        private final Logger logger = LoggerFactory.getLogger("DeadEventLogger"); //$NON-NLS-1$

        @Subscribe
        private void onDeadEvent(DeadEvent deadEvent) {
            if (!(deadEvent.getEvent() instanceof ShutdownEvent)) {
                logger.warn("Dead event triggered: {}", deadEvent); //$NON-NLS-1$
            }
        }
    };
    final EventBus eventBus = injector.getInstance(EventBus.class);
    eventBus.register(deadEventListener);

    final Semaphore sem = new Semaphore(0);
    Runtime.getRuntime().addShutdownHook(new Thread(() -> {
        Logger logger = LoggerFactory.getLogger(Main.class);
        logger.debug("shutdown hook called, starting orderly shutdown"); //$NON-NLS-1$
        eventBus.post(new ShutdownEvent());
        scheduledExecutorService.shutdown();
        try {
            scheduledExecutorService.awaitTermination(5, TimeUnit.MINUTES);
        } catch (InterruptedException e) {
            // not relevant
        }
        logger.debug("shutdown hook finished, releaseing main thread semaphore"); //$NON-NLS-1$
        sem.release();
    }));
    if (rtConfig.isDaemon().orElse(false).booleanValue() || rtConfig.getProfileToRun().isPresent()) {
        finishStartup(injector);
        sem.acquireUninterruptibly();
        System.exit(0);
    } else {
        launcher.launchGui(injector);
        System.exit(0);
    }
}

From source file:com.thoughtworks.studios.shine.cruise.stage.details.LazyStageGraphLoaderTest.java

@Test
public void shouldNotReuseTransformerAcrossConcurrentInvocations() throws InterruptedException {
    final StageIdentifier stageId = new StageIdentifier("pipeline-foo", 23, "stage-1", "1");

    final Semaphore invocationBlocker = new Semaphore(1);
    final DummyStageResourceImporter realLoader = new DummyStageResourceImporter(realGraph(), stageId,
            invocationBlocker);// w w  w  .j a  va 2s  .  co  m

    final LazyStageGraphLoader loader = new LazyStageGraphLoader(realLoader, stageStorage, 2);

    final XSLTTransformerRegistry[] transformerRegistryUsed = new XSLTTransformerRegistry[2];

    invocationBlocker.acquire();

    Thread firstThd = new Thread(new Runnable() {
        public void run() {
            loader.load(stageId);
            invocationBlocker.release();
        }
    });

    firstThd.start();

    while (invocationBlocker.getQueueLength() == 0) {
        Thread.sleep(10);
    }
    transformerRegistryUsed[0] = realLoader.transformerRegistry;

    Thread secondThd = new Thread(new Runnable() {
        public void run() {
            stageStorage.clear();
            loader.load(stageId);
        }
    });

    secondThd.start();

    while (invocationBlocker.getQueueLength() == 1) {
        Thread.sleep(10);
    }
    transformerRegistryUsed[1] = realLoader.transformerRegistry;

    invocationBlocker.release();

    firstThd.join();
    secondThd.join();

    assertThat(transformerRegistryUsed[0], not(sameInstance(transformerRegistryUsed[1])));
}

From source file:org.apache.hadoop.raid.ParallelStreamReader.java

/**
 * Reads data from multiple streams in parallel and puts the data in a queue.
 * @param streams The input streams to read from.
 * @param bufSize The amount of data to read from each stream in each go.
 * @param numThreads Number of threads to use for parallelism.
 * @param boundedBuffer The queue to place the results in.
 *///w w  w . ja  va2  s.co m

public ParallelStreamReader(Progressable reporter, InputStream[] streams, int bufSize, int numThreads,
        int boundedBufferCapacity, long maxBytesPerStream, boolean computeChecksum, OutputStream[] outs)
        throws IOException {
    this.reporter = reporter;
    this.computeChecksum = computeChecksum;
    this.streams = new InputStream[streams.length];
    this.endOffsets = new long[streams.length];
    if (computeChecksum) {
        this.checksums = new CRC32[streams.length];
    }
    this.outs = outs;
    for (int i = 0; i < streams.length; i++) {
        this.streams[i] = streams[i];
        if (this.streams[i] instanceof DFSDataInputStream) {
            DFSDataInputStream stream = (DFSDataInputStream) this.streams[i];
            // in directory raiding, the block size for each input stream 
            // might be different, so we need to determine the endOffset of
            // each stream by their own block size.
            List<LocatedBlock> blocks = stream.getAllBlocks();
            if (blocks.size() == 0) {
                this.endOffsets[i] = Long.MAX_VALUE;
                if (computeChecksum) {
                    this.checksums[i] = null;
                }
            } else {
                long blockSize = blocks.get(0).getBlockSize();
                this.endOffsets[i] = stream.getPos() + blockSize;
                if (computeChecksum) {
                    this.checksums[i] = new CRC32();
                }
            }
        } else {
            this.endOffsets[i] = Long.MAX_VALUE;
            if (computeChecksum) {
                this.checksums[i] = null;
            }
        }
        streams[i] = null; // Take over ownership of streams.
    }
    this.bufSize = bufSize;
    this.boundedBuffer = new ArrayBlockingQueue<ReadResult>(boundedBufferCapacity);
    if (numThreads > streams.length) {
        this.numThreads = streams.length;
    } else {
        this.numThreads = numThreads;
    }
    this.remainingBytesPerStream = maxBytesPerStream;
    this.slots = new Semaphore(this.numThreads);
    ThreadFactory ParallelStreamReaderFactory = new ThreadFactoryBuilder()
            .setNameFormat("ParallelStreamReader-read-pool-%d").build();
    this.readPool = Executors.newFixedThreadPool(this.numThreads, ParallelStreamReaderFactory);
    this.mainThread = new MainThread();
    mainThread.setName("ParallelStreamReader-main");
}

From source file:com.pinterest.pinlater.client.PinLaterQueryIssuer.java

private void issueEnqueueRequests(PinLater.ServiceIface iface) throws InterruptedException {
    Preconditions.checkNotNull(queueName, "Queue was not specified.");
    final AtomicLong queriesIssued = new AtomicLong(0);
    final Semaphore permits = new Semaphore(concurrency);
    while (numQueries == -1 || queriesIssued.get() < numQueries) {
        final PinLaterEnqueueRequest request = new PinLaterEnqueueRequest();
        request.setQueueName(queueName);
        for (int i = 0; i < batchSize; i++) {
            PinLaterJob job = new PinLaterJob(
                    ByteBuffer.wrap(new String("task_" + random.nextInt(Integer.MAX_VALUE)).getBytes()));
            job.setPriority(priority);/*w ww  . j a v a2  s . c o  m*/
            request.addToJobs(job);
        }
        final long startTimeNanos = System.nanoTime();
        queriesIssued.incrementAndGet();
        permits.acquire();
        iface.enqueueJobs(REQUEST_CONTEXT, request)
                .respond(new Function<Try<PinLaterEnqueueResponse>, BoxedUnit>() {
                    @Override
                    public BoxedUnit apply(Try<PinLaterEnqueueResponse> responseTry) {
                        permits.release();
                        statsLogger
                                .requestComplete(Duration.fromNanoseconds(System.nanoTime() - startTimeNanos));
                        if (responseTry.isThrow()) {
                            LOG.info("Exception for request: " + request + " : " + ((Throw) responseTry).e());
                        }
                        return BoxedUnit.UNIT;
                    }
                });
    }
    permits.acquire(concurrency);
    LOG.info("Enqueue queries issued: " + queriesIssued);
}