Example usage for java.util.concurrent Semaphore release

List of usage examples for java.util.concurrent Semaphore release

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore release.

Prototype

public void release() 

Source Link

Document

Releases a permit, returning it to the semaphore.

Usage

From source file:org.commoncrawl.util.MapReduceJobStatsWriter.java

public static void main(String[] args) {
    LOG.info("Initializing Hadoop Config");

    Configuration conf = new Configuration();

    conf.addResource("nutch-default.xml");
    conf.addResource("nutch-site.xml");
    conf.addResource("hadoop-default.xml");
    conf.addResource("hadoop-site.xml");
    conf.addResource("commoncrawl-default.xml");
    conf.addResource("commoncrawl-site.xml");

    CrawlEnvironment.setHadoopConfig(conf);
    CrawlEnvironment.setDefaultHadoopFSURI("hdfs://ccn01:9000/");

    // test the stats Writer ... 
    try {/* w w  w  . j a  v  a 2  s  .c om*/

        LOG.info("Opening Stats Writer");
        MapReduceJobStatsWriter<IntWritable, Text> statsWriter = new MapReduceJobStatsWriter<IntWritable, Text>(
                CrawlEnvironment.getDefaultFileSystem(), conf, IntWritable.class, Text.class, "test", "group1",
                12345L);

        LOG.info("Writing Entries");
        for (int i = 0; i < 1000; ++i) {
            statsWriter.appendLogEntry(new IntWritable(i), new Text("Log Entry #" + i));
        }
        LOG.info("Flushing / Closing");
        final Semaphore blockingSempahore = new Semaphore(0);
        statsWriter.close(new Callback() {

            @Override
            public void execute() {
                LOG.info("Completion Callback Triggered");
                blockingSempahore.release();
            }

        });
        LOG.info("Waiting on Semaphore");
        blockingSempahore.acquireUninterruptibly();
        LOG.info("Acquired Semaphore");

        LOG.info("Closed");

        Path hdfsPath = new Path(Environment.HDFS_LOGCOLLECTOR_BASEDIR,
                "test" + "/" + "group1" + "/" + Long.toString(12345L));

        LOG.info("Opening Reader");
        SequenceFile.Reader reader = new SequenceFile.Reader(CrawlEnvironment.getDefaultFileSystem(), hdfsPath,
                conf);
        IntWritable key = new IntWritable();
        Text value = new Text();
        while (reader.next(key, value)) {
            LOG.info("Key:" + key.get() + " Value:" + value.toString());
        }
        reader.close();

    } catch (IOException e) {
        LOG.error(CCStringUtils.stringifyException(e));
    }

}

From source file:org.apache.pulsar.testclient.ManagedLedgerWriter.java

public static void main(String[] args) throws Exception {

    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");

    try {// w  w  w  .  j a  va2  s  . c om
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }

    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }

    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);

    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar managed-ledger perf writer with config: {}", w.writeValueAsString(arguments));

    byte[] payloadData = new byte[arguments.msgSize];
    ByteBuf payloadBuffer = PooledByteBufAllocator.DEFAULT.directBuffer(arguments.msgSize);
    payloadBuffer.writerIndex(arguments.msgSize);

    // Now processing command line arguments
    String managedLedgerPrefix = "test-" + DigestUtils.sha1Hex(UUID.randomUUID().toString()).substring(0, 5);

    ClientConfiguration bkConf = new ClientConfiguration();
    bkConf.setUseV2WireProtocol(true);
    bkConf.setAddEntryTimeout(30);
    bkConf.setReadEntryTimeout(30);
    bkConf.setThrottleValue(0);
    bkConf.setNumChannelsPerBookie(arguments.maxConnections);
    bkConf.setZkServers(arguments.zookeeperServers);

    ManagedLedgerFactoryConfig mlFactoryConf = new ManagedLedgerFactoryConfig();
    mlFactoryConf.setMaxCacheSize(0);
    ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(bkConf, mlFactoryConf);

    ManagedLedgerConfig mlConf = new ManagedLedgerConfig();
    mlConf.setEnsembleSize(arguments.ensembleSize);
    mlConf.setWriteQuorumSize(arguments.writeQuorum);
    mlConf.setAckQuorumSize(arguments.ackQuorum);
    mlConf.setMinimumRolloverTime(10, TimeUnit.MINUTES);
    mlConf.setMetadataEnsembleSize(arguments.ensembleSize);
    mlConf.setMetadataWriteQuorumSize(arguments.writeQuorum);
    mlConf.setMetadataAckQuorumSize(arguments.ackQuorum);
    mlConf.setDigestType(arguments.digestType);
    mlConf.setMaxSizePerLedgerMb(2048);

    List<CompletableFuture<ManagedLedger>> futures = new ArrayList<>();

    for (int i = 0; i < arguments.numManagedLedgers; i++) {
        String name = String.format("%s-%03d", managedLedgerPrefix, i);
        CompletableFuture<ManagedLedger> future = new CompletableFuture<>();
        futures.add(future);
        factory.asyncOpen(name, mlConf, new OpenLedgerCallback() {

            @Override
            public void openLedgerComplete(ManagedLedger ledger, Object ctx) {
                future.complete(ledger);
            }

            @Override
            public void openLedgerFailed(ManagedLedgerException exception, Object ctx) {
                future.completeExceptionally(exception);
            }
        }, null);
    }

    List<ManagedLedger> managedLedgers = futures.stream().map(CompletableFuture::join)
            .collect(Collectors.toList());

    log.info("Created {} managed ledgers", managedLedgers.size());

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            printAggregatedStats();
        }
    });

    Collections.shuffle(managedLedgers);
    AtomicBoolean isDone = new AtomicBoolean();

    List<List<ManagedLedger>> managedLedgersPerThread = Lists.partition(managedLedgers,
            Math.max(1, managedLedgers.size() / arguments.numThreads));

    for (int i = 0; i < arguments.numThreads; i++) {
        List<ManagedLedger> managedLedgersForThisThread = managedLedgersPerThread.get(i);
        int nunManagedLedgersForThisThread = managedLedgersForThisThread.size();
        long numMessagesForThisThread = arguments.numMessages / arguments.numThreads;
        int maxOutstandingForThisThread = arguments.maxOutstanding;

        executor.submit(() -> {
            try {
                final double msgRate = arguments.msgRate / (double) arguments.numThreads;
                final RateLimiter rateLimiter = RateLimiter.create(msgRate);

                // Acquire 1 sec worth of messages to have a slower ramp-up
                rateLimiter.acquire((int) msgRate);
                final long startTime = System.currentTimeMillis();

                final Semaphore semaphore = new Semaphore(maxOutstandingForThisThread);

                final AddEntryCallback addEntryCallback = new AddEntryCallback() {
                    @Override
                    public void addComplete(Position position, Object ctx) {
                        long sendTime = (Long) (ctx);
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);

                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);

                        semaphore.release();
                    }

                    @Override
                    public void addFailed(ManagedLedgerException exception, Object ctx) {
                        log.warn("Write error on message", exception);
                        System.exit(-1);
                    }
                };

                // Send messages on all topics/producers
                long totalSent = 0;
                while (true) {
                    for (int j = 0; j < nunManagedLedgersForThisThread; j++) {
                        if (arguments.testTime > 0) {
                            if (System.currentTimeMillis() - startTime > arguments.testTime) {
                                log.info("------------------- DONE -----------------------");
                                printAggregatedStats();
                                isDone.set(true);
                                Thread.sleep(5000);
                                System.exit(0);
                            }
                        }

                        if (numMessagesForThisThread > 0) {
                            if (totalSent++ >= numMessagesForThisThread) {
                                log.info("------------------- DONE -----------------------");
                                printAggregatedStats();
                                isDone.set(true);
                                Thread.sleep(5000);
                                System.exit(0);
                            }
                        }

                        semaphore.acquire();
                        rateLimiter.acquire();

                        final long sendTime = System.nanoTime();
                        managedLedgersForThisThread.get(j).asyncAddEntry(payloadBuffer, addEntryCallback,
                                sendTime);
                    }
                }
            } catch (Throwable t) {
                log.error("Got error", t);
            }
        });
    }

    // Print report stats
    long oldTime = System.nanoTime();

    Histogram reportHistogram = null;

    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }

        if (isDone.get()) {
            break;
        }

        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;

        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;

        reportHistogram = recorder.getIntervalHistogram(reportHistogram);

        log.info(
                "Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}",
                throughputFormat.format(rate), throughputFormat.format(throughput),
                dec.format(reportHistogram.getMean() / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0),
                dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0),
                dec.format(reportHistogram.getMaxValue() / 1000.0));

        reportHistogram.reset();

        oldTime = now;
    }

    factory.shutdown();
}

From source file:co.paralleluniverse.photon.Photon.java

public static void main(final String[] args) throws InterruptedException, IOException {

    final Options options = new Options();
    options.addOption("rate", true, "Requests per second (default " + rateDefault + ")");
    options.addOption("duration", true,
            "Minimum test duration in seconds: will wait for <duration> * <rate> requests to terminate or, if progress check enabled, no progress after <duration> (default "
                    + durationDefault + ")");
    options.addOption("maxconnections", true,
            "Maximum number of open connections (default " + maxConnectionsDefault + ")");
    options.addOption("timeout", true,
            "Connection and read timeout in millis (default " + timeoutDefault + ")");
    options.addOption("print", true,
            "Print cycle in millis, 0 to disable intermediate statistics (default " + printCycleDefault + ")");
    options.addOption("check", true,
            "Progress check cycle in millis, 0 to disable progress check (default " + checkCycleDefault + ")");
    options.addOption("stats", false, "Print full statistics when finish (default false)");
    options.addOption("minmax", false, "Print min/mean/stddev/max stats when finish (default false)");
    options.addOption("name", true, "Test name to print in the statistics (default '" + testNameDefault + "')");
    options.addOption("help", false, "Print help");

    try {//from   ww w  .  java 2  s.c o  m
        final CommandLine cmd = new BasicParser().parse(options, args);
        final String[] ar = cmd.getArgs();
        if (cmd.hasOption("help") || ar.length != 1)
            printUsageAndExit(options);

        final String url = ar[0];

        final int timeout = Integer.parseInt(cmd.getOptionValue("timeout", timeoutDefault));
        final int maxConnections = Integer
                .parseInt(cmd.getOptionValue("maxconnections", maxConnectionsDefault));
        final int duration = Integer.parseInt(cmd.getOptionValue("duration", durationDefault));
        final int printCycle = Integer.parseInt(cmd.getOptionValue("print", printCycleDefault));
        final int checkCycle = Integer.parseInt(cmd.getOptionValue("check", checkCycleDefault));
        final String testName = cmd.getOptionValue("name", testNameDefault);
        final int rate = Integer.parseInt(cmd.getOptionValue("rate", rateDefault));

        final MetricRegistry metrics = new MetricRegistry();
        final Meter requestMeter = metrics.meter("request");
        final Meter responseMeter = metrics.meter("response");
        final Meter errorsMeter = metrics.meter("errors");
        final Logger log = LoggerFactory.getLogger(Photon.class);
        final ConcurrentHashMap<String, AtomicInteger> errors = new ConcurrentHashMap<>();
        final HttpGet request = new HttpGet(url);
        final StripedTimeSeries<Long> sts = new StripedTimeSeries<>(30000, false);
        final StripedHistogram sh = new StripedHistogram(60000, 5);

        log.info("name: " + testName + " url:" + url + " rate:" + rate + " duration:" + duration
                + " maxconnections:" + maxConnections + ", " + "timeout:" + timeout);
        final DefaultConnectingIOReactor ioreactor = new DefaultConnectingIOReactor(IOReactorConfig.custom()
                .setConnectTimeout(timeout).setIoThreadCount(10).setSoTimeout(timeout).build());

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            final List<ExceptionEvent> events = ioreactor.getAuditLog();
            if (events != null)
                events.stream().filter(event -> event != null).forEach(event -> {
                    System.err.println(
                            "Apache Async HTTP Client I/O Reactor Error Time: " + event.getTimestamp());
                    //noinspection ThrowableResultOfMethodCallIgnored
                    if (event.getCause() != null)
                        //noinspection ThrowableResultOfMethodCallIgnored
                        event.getCause().printStackTrace();
                });
            if (cmd.hasOption("stats"))
                printFinishStatistics(errorsMeter, sts, sh, testName);
            if (!errors.keySet().isEmpty())
                errors.entrySet().stream()
                        .forEach(p -> log.info(testName + " " + p.getKey() + " " + p.getValue() + "ms"));
            System.out.println(
                    testName + " responseTime(90%): " + sh.getHistogramData().getValueAtPercentile(90) + "ms");
            if (cmd.hasOption("minmax")) {
                final HistogramData hd = sh.getHistogramData();
                System.out.format("%s %8s%8s%8s%8s\n", testName, "min", "mean", "sd", "max");
                System.out.format("%s %8d%8.2f%8.2f%8d\n", testName, hd.getMinValue(), hd.getMean(),
                        hd.getStdDeviation(), hd.getMaxValue());
            }
        }));

        final PoolingNHttpClientConnectionManager mngr = new PoolingNHttpClientConnectionManager(ioreactor);
        mngr.setDefaultMaxPerRoute(maxConnections);
        mngr.setMaxTotal(maxConnections);
        final CloseableHttpAsyncClient ahc = HttpAsyncClientBuilder.create().setConnectionManager(mngr)
                .setDefaultRequestConfig(RequestConfig.custom().setLocalAddress(null).build()).build();
        try (final CloseableHttpClient client = new FiberHttpClient(ahc)) {
            final int num = duration * rate;

            final CountDownLatch cdl = new CountDownLatch(num);
            final Semaphore sem = new Semaphore(maxConnections);
            final RateLimiter rl = RateLimiter.create(rate);

            spawnStatisticsThread(printCycle, cdl, log, requestMeter, responseMeter, errorsMeter, testName);

            for (int i = 0; i < num; i++) {
                rl.acquire();
                if (sem.availablePermits() == 0)
                    log.debug("Maximum connections count reached, waiting...");
                sem.acquireUninterruptibly();

                new Fiber<Void>(() -> {
                    requestMeter.mark();
                    final long start = System.nanoTime();
                    try {
                        try (final CloseableHttpResponse ignored = client.execute(request)) {
                            responseMeter.mark();
                        } catch (final Throwable t) {
                            markError(errorsMeter, errors, t);
                        }
                    } catch (final Throwable t) {
                        markError(errorsMeter, errors, t);
                    } finally {
                        final long now = System.nanoTime();
                        final long millis = TimeUnit.NANOSECONDS.toMillis(now - start);
                        sts.record(start, millis);
                        sh.recordValue(millis);
                        sem.release();
                        cdl.countDown();
                    }
                }).start();
            }
            spawnProgressCheckThread(log, duration, checkCycle, cdl);
            cdl.await();
        }
    } catch (final ParseException ex) {
        System.err.println("Parsing failed.  Reason: " + ex.getMessage());
    }
}

From source file:org.commoncrawl.service.parser.client.Dispatcher.java

public static void main(String[] args) throws IOException {
    Configuration conf = new Configuration();
    CrawlEnvironment.setHadoopConfig(conf);
    String baseURL = "http://unknown.com/";
    if (args.length != 0) {
        baseURL = args[0];/*from   w ww .j av  a 2 s  .com*/
    }
    URL baseURLObj;
    try {
        baseURLObj = new URL(baseURL);
    } catch (MalformedURLException e2) {
        throw new IOException("Invalid Base Link");
    }
    final URL finalBaseURL = (baseURLObj != null) ? baseURLObj : null;
    final DataOutputBuffer headerBuffer = new DataOutputBuffer();
    final DataOutputBuffer contentBuffer = new DataOutputBuffer();

    try {
        ByteStreams.readBytes(new InputSupplier<InputStream>() {

            @Override
            public InputStream getInput() throws IOException {
                return System.in;
            }
        }, new ByteProcessor<Long>() {

            @Override
            public Long getResult() {
                return 0L;
            }

            int currLineCharCount = 0;
            boolean processingHeaders = true;

            @Override
            public boolean processBytes(byte[] buf, int start, int length) throws IOException {

                if (processingHeaders) {
                    int current = start;
                    int end = current + length;
                    while (processingHeaders && current != end) {
                        if (buf[current] != '\r' && buf[current] != '\n') {
                            currLineCharCount++;
                        } else if (buf[current] == '\n') {
                            if (currLineCharCount == 0) {
                                headerBuffer.write(buf, start, current - start + 1);
                                processingHeaders = false;
                            }
                            currLineCharCount = 0;
                        }
                        current++;
                    }
                    if (processingHeaders) {
                        headerBuffer.write(buf, start, length);
                    } else {
                        length -= current - start;
                        start = current;
                    }
                }
                if (!processingHeaders) {
                    contentBuffer.write(buf, start, length);
                }
                return true;
            }
        });

        LOG.info("HEADER LEN:" + headerBuffer.getLength());
        // System.out.println(new String(headerBuffer.getData(),0,headerBuffer.getLength(),Charset.forName("UTF-8")));
        LOG.info("CONTENT LEN:" + contentBuffer.getLength());
        //System.out.println(new String(contentBuffer.getData(),0,contentBuffer.getLength(),Charset.forName("UTF-8")));
        // decode header bytes ... 
        String header = "";
        if (headerBuffer.getLength() != 0) {
            try {
                header = new String(headerBuffer.getData(), 0, headerBuffer.getLength(),
                        Charset.forName("UTF-8"));
            } catch (Exception e) {
                LOG.warn(CCStringUtils.stringifyException(e));
                header = new String(headerBuffer.getData(), 0, headerBuffer.getLength(),
                        Charset.forName("ASCII"));
            }
        }
        final String headersFinal = (header != null) ? header : "";

        LOG.info("Starting Event Loop");
        final EventLoop eventLoop = new EventLoop();
        eventLoop.start();

        try {
            // create fake hosts file ...  
            //String hosts = "10.0.20.101:8072";
            // reader 
            //Reader reader = new StringReader(hosts);
            // dispatcher init 
            LOG.info("initializing Dispatcher");
            final Dispatcher dispatcher = new Dispatcher(eventLoop, "parserNodes");
            LOG.info("Waiting for a few seconds");
            Thread.sleep(5000);
            Thread threads[] = new Thread[TEST_THREAD_COUNT];
            final Semaphore threadWaitSem = new Semaphore(-TEST_THREAD_COUNT - 1);
            // start 100 threads 
            for (int threadIdx = 0; threadIdx < TEST_THREAD_COUNT; ++threadIdx) {
                threads[threadIdx] = new Thread(new Runnable() {

                    @Override
                    public void run() {
                        for (int i = 0; i < ITERATIONS_PER_THREAD; ++i) {
                            // build parse request 
                            ParseRequest request = new ParseRequest();
                            request.setDocId(1);
                            request.setDomainId(1);
                            request.setDocURL(finalBaseURL.toString());
                            request.setDocHeaders(headersFinal);
                            request.setDocContent(
                                    new FlexBuffer(contentBuffer.getData(), 0, contentBuffer.getLength()));
                            //LOG.info("Dispatching parse request");
                            ParseResult result = dispatcher.dispatchRequest(request);
                            LOG.info("TID[" + Thread.currentThread().getId() + "]ReqID[" + i + "]" + " Success:"
                                    + ((result != null) ? result.getParseSuccessful() : false) + " LinkCount:"
                                    + ((result != null) ? result.getExtractedLinks().size() : 0));
                        }
                        LOG.info("Thread:" + Thread.currentThread().getId() + " Exiting");
                        threadWaitSem.release();
                    }

                });
                threads[threadIdx].start();
            }

            LOG.info("Waiting for threads to die");
            threadWaitSem.acquireUninterruptibly();
            LOG.info("All Threads dead.");

        } finally {
            eventLoop.stop();
        }
    } catch (IOException e) {
        LOG.error(CCStringUtils.stringifyException(e));
    } catch (InterruptedException e) {
    }
}

From source file:org.commoncrawl.util.HDFSBlockTransferUtility.java

public static void main(String[] args) {
    final String transferFromDisk = args[0];
    final String transferToDisks[] = args[1].split(",");
    final LinkedBlockingQueue<String> queues[] = new LinkedBlockingQueue[transferToDisks.length];
    final Semaphore waitSemaphore = new Semaphore(-(transferToDisks.length - 1));
    for (int i = 0; i < transferToDisks.length; ++i) {
        queues[i] = new LinkedBlockingQueue<String>();
    }//  w  w w .j a va  2s. c  o  m

    File transferSource = new File(transferFromDisk);
    for (File transferFile : transferSource.listFiles()) {
        if (transferFile.isDirectory()) {
            int partition = Math.abs(transferFile.getName().hashCode() % transferToDisks.length);
            try {
                queues[partition].put(transferFile.getAbsolutePath());
            } catch (InterruptedException e) {
            }
        } else {
            try {
                doCopyFile(transferFile, new File(transferToDisks[0], transferFile.getName()), true);
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }

    Thread threads[] = new Thread[transferToDisks.length];
    for (int i = 0; i < transferToDisks.length; ++i) {

        final int threadIdx = i;

        try {
            queues[threadIdx].put("");
        } catch (InterruptedException e1) {
        }

        threads[i] = new Thread(new Runnable() {

            @Override
            public void run() {

                try {
                    File transferToDisk = new File(transferToDisks[threadIdx]);

                    LinkedBlockingQueue<String> queue = queues[threadIdx];

                    while (true) {
                        try {
                            String nextDir = queue.take();
                            if (nextDir.length() == 0) {
                                break;
                            } else {
                                File sourceDir = new File(nextDir);
                                File targetDir = new File(transferToDisk, sourceDir.getName());

                                try {
                                    copyFiles(sourceDir, targetDir, true);
                                } catch (IOException e) {
                                    e.printStackTrace();
                                }

                            }
                        } catch (InterruptedException e) {
                        }
                    }
                } finally {
                    waitSemaphore.release();
                }
            }

        });
        threads[i].start();
    }

    System.out.println("Waiting for Worker Threads");
    try {
        waitSemaphore.acquire();
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    System.out.println("Worker Threads Dead");
}

From source file:org.commoncrawl.service.listcrawler.CacheManager.java

/********************************************************************************************************/

public static void main(String[] args) {

    final EventLoop eventLoop = new EventLoop();
    eventLoop.start();//  w w w  . j a  v  a 2 s. c  o m

    final CacheManager manager = new CacheManager(eventLoop);
    // delete active log if it exists ... 
    manager.getActiveLogFilePath().delete();
    try {
        manager.initialize(INIT_FLAG_SKIP_CACHE_WRITER_INIT | INIT_FLAG_SKIP_HDFS_WRITER_INIT);
    } catch (IOException e1) {
        LOG.error(CCStringUtils.stringifyException(e1));
        return;
    }

    MessageDigest digester;
    try {
        digester = MessageDigest.getInstance("MD5");
    } catch (NoSuchAlgorithmException e1) {
        LOG.error(CCStringUtils.stringifyException(e1));
        return;
    }

    final byte[] randomBytes = new byte[1 << 15];
    LOG.info("Building Random Digest");
    for (int i = 0; i < randomBytes.length; i += 16) {
        long time = System.nanoTime();
        digester.update((new UID() + "@" + time).getBytes());
        System.arraycopy(digester.digest(), 0, randomBytes, i, 16);
    }

    final Semaphore semaphore = new Semaphore(0);

    if (args[0].equals("populate")) {

        manager.startCacheWriterThread();
        manager.startHDFSFlusherThread();

        try {

            LOG.info("Done Building Random Digest");

            LOG.info("Writing Items To Disk");
            for (int i = 0; i < 1000000; ++i) {

                if (i % 1000 == 0) {
                    LOG.info("Wrote:" + i + " entries");
                }

                final CacheItem item1 = new CacheItem();
                item1.setUrl(manager.normalizeURL("http://www.domain.com/foobar/" + i));
                item1.setContent(new Buffer(randomBytes));
                item1.setUrlFingerprint(URLFingerprint.generate64BitURLFPrint(item1.getUrl()));
                manager.cacheItem(item1, null);
                Thread.sleep(1);

                if (i != 0 && i % 10000 == 0) {
                    LOG.info("Hit 10000 items.. sleeping for 20 seconds");
                    Thread.sleep(20 * 1000);
                }
            }

            Thread.sleep(30000);

            for (int i = 0; i < 1000000; ++i) {

                final String url = new String("http://www.domain.com/foobar/" + i);
                manager.checkCacheForItem(url, new CacheItemCheckCallback() {

                    @Override
                    public void cacheItemAvailable(String url, CacheItem item) {
                        Assert.assertTrue(item.getUrl().equals(url));
                        String itemIndex = url.substring("http://www.domain.com/foobar/".length());
                        int itemNumber = Integer.parseInt(itemIndex);
                        if (itemNumber == 999999) {
                            semaphore.release();
                        }
                    }

                    @Override
                    public void cacheItemNotFound(String url) {
                        Assert.assertTrue(false);
                    }
                });
            }
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
        } catch (InterruptedException e2) {

        }
    } else if (args[0].equals("read")) {

        try {
            final CacheItem item1 = new CacheItem();
            item1.setUrl(manager.normalizeURL("http://www.domain.com/barz/"));
            item1.setUrlFingerprint(URLFingerprint.generate64BitURLFPrint(item1.getUrl()));
            item1.setContent(new Buffer(randomBytes));
            manager.cacheItem(item1, null);

            // queue up cache load requests .... 
            for (int i = 0; i < 10000; ++i) {

                final String url = new String("http://www.domain.com/foobar/" + i);

                eventLoop.setTimer(new Timer(1, false, new Timer.Callback() {

                    @Override
                    public void timerFired(Timer timer) {
                        manager.checkCacheForItem(url, new CacheItemCheckCallback() {

                            @Override
                            public void cacheItemAvailable(String url, CacheItem item) {
                                LOG.info("FOUND Item for URL:" + url + " ContentSize:"
                                        + item.getContent().getCount());
                            }

                            @Override
                            public void cacheItemNotFound(String url) {
                                LOG.info("DIDNOT Find Item for URL:" + url);
                            }

                        });
                    }
                }));
            }

            eventLoop.setTimer(new Timer(1, false, new Timer.Callback() {

                @Override
                public void timerFired(Timer timer) {
                    manager.checkCacheForItem(item1.getUrl(), new CacheItemCheckCallback() {

                        @Override
                        public void cacheItemAvailable(String url, CacheItem item) {
                            LOG.info("FOUND Item for URL:" + url + " ContentSize:"
                                    + item.getContent().getCount());
                        }

                        @Override
                        public void cacheItemNotFound(String url) {
                            LOG.info("DIDNOT Find Item for URL:" + url);
                        }

                    });
                }

            }));
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
        }
    }
    semaphore.acquireUninterruptibly();

}

From source file:Main.java

/**
 * Releases the locked semaphore for the given key.
 *
 * @param key key for getting the locked semaphore out of the hashmap.
 *//*  w w w .ja  v  a 2 s .c o  m*/
public static void releaseChat(String key) {
    Semaphore semaphore = semaphoreMap.get(key);
    if (semaphore != null) {
        semaphore.release();
    }
}

From source file:Main.java

public static void waitForRelease(Semaphore semaphore) {
    try {//from   w  ww.  j  a va2 s. c o m
        semaphore.acquire();
    } catch (InterruptedException iex) {
        //ignore
    }
    semaphore.release();
}

From source file:org.jbpm.EventCallback.java

private static void registerNotification(final String event) {
    Synchronization notification = new Synchronization() {

        public void beforeCompletion() {
        }/*w  w w  .  jav  a  2s . c o m*/

        public void afterCompletion(int status) {
            if (status == Status.STATUS_COMMITTED) {
                log.debug("sending '" + event + "' notification");
                Semaphore eventSemaphore = getEventSemaphore(event);
                eventSemaphore.release();
            }
        }

    };
    JbpmContext.getCurrentJbpmContext().getSession().getTransaction().registerSynchronization(notification);
}

From source file:org.marekasf.troughput.XYHistogramChart.java

public static void display(final AdaptiveHistogram h, final String title) {

    final XYHistogramChart demo = new XYHistogramChart(h, title);
    demo.pack();//from ww  w.  ja va 2 s  .c o  m
    RefineryUtilities.centerFrameOnScreen(demo);
    demo.setVisible(true);
    final Semaphore semaphore = new Semaphore(0);
    demo.addWindowListener(new WindowAdapter() {
        @Override
        public void windowClosing(final WindowEvent we) {
            semaphore.release();
        }
    });
    try {
        semaphore.acquire();
    } catch (final InterruptedException e) {
        //
    }
}