Example usage for java.util.concurrent BlockingQueue take

List of usage examples for java.util.concurrent BlockingQueue take

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue take.

Prototype

E take() throws InterruptedException;

Source Link

Document

Retrieves and removes the head of this queue, waiting if necessary until an element becomes available.

Usage

From source file:edu.cmu.cs.lti.ark.fn.Semafor.java

/**
 * Reads conll sentences, parses them, and writes the json-serialized results.
 *
 * @param inputSupplier where to read conll sentences from
 * @param outputSupplier where to write the results to
 * @param numThreads the number of threads to use
 * @throws IOException//from   w w w  . j a  v  a2s  . c om
 * @throws InterruptedException
 */
public void runParser(final InputSupplier<? extends Readable> inputSupplier,
        final OutputSupplier<? extends Writer> outputSupplier, final int numThreads)
        throws IOException, InterruptedException {
    // use the producer-worker-consumer pattern to parse all sentences in multiple threads, while keeping
    // output in order.
    final BlockingQueue<Future<Optional<SemaforParseResult>>> results = Queues
            .newLinkedBlockingDeque(5 * numThreads);
    final ExecutorService workerThreadPool = newFixedThreadPool(numThreads);
    // try to shutdown gracefully. don't worry too much if it doesn't work
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                workerThreadPool.shutdown();
                workerThreadPool.awaitTermination(5, TimeUnit.SECONDS);
            } catch (InterruptedException ignored) {
            }
        }
    }));

    final PrintWriter output = new PrintWriter(outputSupplier.getOutput());
    try {
        // Start thread to fetch computed results and write to file
        final Thread consumer = new Thread(new Runnable() {
            @Override
            public void run() {
                while (!Thread.currentThread().isInterrupted()) {
                    try {
                        final Optional<SemaforParseResult> oResult = results.take().get();
                        if (!oResult.isPresent())
                            break; // got poison pill. we're done
                        output.println(oResult.get().toJson());
                        output.flush();
                    } catch (Exception e) {
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    }
                }
            }
        });
        consumer.start();
        // in main thread, put placeholders on results queue (so results stay in order), then
        // tell a worker thread to fill up the placeholder
        final SentenceCodec.SentenceIterator sentences = ConllCodec.readInput(inputSupplier.getInput());
        try {
            int i = 0;
            while (sentences.hasNext()) {
                final Sentence sentence = sentences.next();
                final int sentenceId = i;
                results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() {
                    @Override
                    public Optional<SemaforParseResult> call() throws Exception {
                        final long start = System.currentTimeMillis();
                        try {
                            final SemaforParseResult result = parseSentence(sentence);
                            final long end = System.currentTimeMillis();
                            System.err.printf("parsed sentence %d in %d millis.%n", sentenceId, end - start);
                            return Optional.of(result);
                        } catch (Exception e) {
                            e.printStackTrace();
                            throw e;
                        }
                    }
                }));
                i++;
            }
            // put a poison pill on the queue to signal that we're done
            results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() {
                @Override
                public Optional<SemaforParseResult> call() throws Exception {
                    return Optional.absent();
                }
            }));
            workerThreadPool.shutdown();
        } finally {
            closeQuietly(sentences);
        }
        // wait for consumer to finish
        consumer.join();
    } finally {
        closeQuietly(output);
    }
    System.err.println("Done.");
}

From source file:com.precioustech.fxtrading.tradingbot.strategies.CopyTwitterStrategyTest.java

@Test
public void harvestAndTradeTest() throws InterruptedException {
    CopyTwitterStrategy<String> copyTwitterStrategy = new CopyTwitterStrategy<String>();
    FXTweetHandler<String> tweetHandlerFoo = createTweetHandlerFoo(copyTwitterStrategy);
    BlockingQueue<TradingDecision<String>> orderQueue = new LinkedBlockingQueue<TradingDecision<String>>();
    copyTwitterStrategy.orderQueue = orderQueue;
    copyTwitterStrategy.init();//from w  ww.  j a  v a2 s.c o m
    Tweet tweet1 = mock(Tweet.class);
    Collection<Tweet> footweets = Lists.newArrayList(tweet1);
    NewFXTradeTweet<String> newTrade = mock(NewFXTradeTweet.class);
    when(tweetHandlerFoo.findNewTweets()).thenReturn(footweets);
    when(tweetHandlerFoo.handleTweet(tweet1)).thenReturn(newTrade);
    when(newTrade.getAction()).thenReturn(TradingSignal.SHORT);
    TradeableInstrument<String> euraud = new TradeableInstrument<String>("EUR_AUD");
    when(newTrade.getInstrument()).thenReturn(euraud);
    double[] profits = { 11.32, 17.7, 8.2, 19.0, 44.5, -11.0, 10, 25.5 };
    Collection<CloseFXTradeTweet<String>> closedTrades = createClosedTrades(profits);
    footweets = Lists.newArrayList();
    for (CloseFXTradeTweet<String> closeTradeTweet : closedTrades) {
        Tweet tweet = mock(Tweet.class);
        when(tweetHandlerFoo.handleTweet(tweet)).thenReturn(closeTradeTweet);
        footweets.add(tweet);
    }
    when(tweetHandlerFoo.findHistoricPnlTweetsForInstrument(euraud)).thenReturn(footweets);
    copyTwitterStrategy.harvestAndTrade();
    TradingDecision<String> decision = orderQueue.take();
    assertEquals(TradingSignal.SHORT, decision.getSignal());
    assertEquals(euraud, decision.getInstrument());
}

From source file:org.apache.hadoop.hdfs.server.datanode.IABlockSender.java

/**
 * Sends upto maxChunks chunks of data. Used by Encoded Read.
 * /*  ww w.j ava 2  s  .c  o m*/
 * When blockInPosition is >= 0, assumes 'out' is a 
 * {@link SocketOutputStream} and tries 
 * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
 * send data (and updates blockInPosition).
 */
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out, BlockingQueue<ByteBuffer> q)
        throws IOException {
    //LOG.info("anchor Send_packet "+seqno);
    // Sends multiple chunks in one packet with a single write().

    int len = (int) Math.min((endOffset - offset), (((long) bytesPerChecksum) * ((long) maxChunks)));
    int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum;
    //boolean lastDataPacket = offset + len == endOffset && len > 0;
    int packetLen = len + numChunks * checksumSize + 4;
    //initial packet
    pkt.clear();

    //header
    PacketHeader header = new PacketHeader(packetLen, offset, seqno, (len == 0), len);
    header.putInBuffer(pkt);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();

    int dataOff = checksumOff + checksumLen;
    /*
    LOG.info("real length of the packet " + (dataOff + len) + " maxchunks " + maxChunks
        + " num chunks " + numChunks);
    */
    //read data from the ring buffer. Due to some padding problems, we need a global cache.
    //may have a better design
    if (cache == null)
        try {
            cache = q.take();
        } catch (InterruptedException e) {
        }

    int r = cache.remaining();
    int taken = 0;
    while (r < len) {
        cache.get(buf, dataOff + taken, r - taken);
        try {
            LOG.info("before taken new package with remaining:" + r);
            cache = q.take();
        } catch (InterruptedException e) {
        }
        taken = r;
        r += cache.remaining();
    }

    //LOG.info("dataOff: "+dataOff+" taken: "+taken+" len:"+len);
    cache.get(buf, dataOff + taken, len - taken);

    //create checksum
    for (int i = checksumOff; i < checksumOff + checksumLen; i += checksumSize) {
        checksum.reset();
        int bufOff = (i - checksumOff) / checksumSize * bytesPerChecksum + dataOff;
        checksum.update(buf, bufOff, bytesPerChecksum);
        checksum.writeValue(buf, i, true);
    }
    //LOG.info("anchor Send_packet "+seqno+" Checksum_generated");

    try {
        if (blockInPosition >= 0) {
            //should not be used.
            LOG.warn("encoded read should not used transferTo().");
            //use transferTo(). Checks on out and blockIn are already done. 

            //SocketOutputStream sockOut = (SocketOutputStream)out;
            //first write the packet
            //sockOut.write(buf, 0, dataOff);
            // no need to flush. since we know out is not a buffered stream. 

            //sockOut.transferToFully(((FileInputStream)blockIn).getChannel(), 
            //                        blockInPosition, len);

            //blockInPosition += len;
        } else {
            // normal transfer
            /* LOG.info("send packet with Length: "+len+" Offset: "+offset); */
            out.write(buf, 0, dataOff + len);
        }
        //LOG.info("anchor Send_packet "+seqno+" Sent");

    } catch (IOException e) {
        /* Exception while writing to the client. Connection closure from
         * the other end is mostly the case and we do not care much about
         * it. But other things can go wrong, especially in transferTo(),
         * which we do not want to ignore.
         *
         * The message parsing below should not be considered as a good
         * coding example. NEVER do it to drive a program logic. NEVER.
         * It was done here because the NIO throws an IOException for EPIPE.
         */
        String ioem = e.getMessage();
        if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
            LOG.error("BlockSender.sendChunks() exception: ", e);
        }
        throw ioeToSocketException(e);
    }

    if (throttler != null) { // rebalancing so throttle
        throttler.throttle(packetLen);
    }

    return len;
}

From source file:org.apache.hadoop.hdfs.server.datanode.PMBlockSender.java

/**
 * Sends upto maxChunks chunks of data. Used by Encoded Read.
 * /* w  w  w.j  a  v a 2  s  .c om*/
 * When blockInPosition is >= 0, assumes 'out' is a 
 * {@link SocketOutputStream} and tries 
 * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
 * send data (and updates blockInPosition).
 */
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out, BlockingQueue<ByteBuffer> q)
        throws IOException {
    //LOG.info("anchor Send_packet "+seqno);
    // Sends multiple chunks in one packet with a single write().

    int len = (int) Math.min((endOffset - offset), (((long) bytesPerChecksum) * ((long) maxChunks)));
    int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum;
    //boolean lastDataPacket = offset + len == endOffset && len > 0;
    int packetLen = len + numChunks * checksumSize + 4;
    //initial packet
    pkt.clear();

    //header
    PacketHeader header = new PacketHeader(packetLen, offset, seqno, (len == 0), len);
    header.putInBuffer(pkt);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();

    int dataOff = checksumOff + checksumLen;
    /*
    LOG.info("real length of the packet " + (dataOff + len) + " maxchunks " + maxChunks
        + " num chunks " + numChunks);
    */
    //read data from the ring buffer. Due to some padding problems, we need a global cache.
    //may have a better design

    if (cache == null)
        try {
            cache = q.take();
        } catch (InterruptedException e) {
        }

    int r = cache.remaining();
    int taken = 0;
    while (r < len) {
        cache.get(buf, dataOff + taken, r - taken);
        try {
            //LOG.info("before taken new package with remaining:"+r);
            cache = q.take();
        } catch (InterruptedException e) {
        }
        taken = r;
        r += cache.remaining();
    }

    //LOG.info("dataOff: "+dataOff+" taken: "+taken+" len:"+len);
    cache.get(buf, dataOff + taken, len - taken);

    //create checksum
    for (int i = checksumOff; i < checksumOff + checksumLen; i += checksumSize) {
        checksum.reset();
        int bufOff = (i - checksumOff) / checksumSize * bytesPerChecksum + dataOff;
        checksum.update(buf, bufOff, bytesPerChecksum);
        checksum.writeValue(buf, i, true);
    }
    //LOG.info("anchor Send_packet "+seqno+" Checksum_generated");

    try {
        if (blockInPosition >= 0) {
            //should not be used.
            LOG.warn("encoded read should not used transferTo().");
            //use transferTo(). Checks on out and blockIn are already done. 

            //SocketOutputStream sockOut = (SocketOutputStream)out;
            //first write the packet
            //sockOut.write(buf, 0, dataOff);
            // no need to flush. since we know out is not a buffered stream. 

            //sockOut.transferToFully(((FileInputStream)blockIn).getChannel(), 
            //                        blockInPosition, len);

            //blockInPosition += len;
        } else {
            // normal transfer
            /* LOG.info("send packet with Length: "+len+" Offset: "+offset); */
            out.write(buf, 0, dataOff + len);
        }
        //LOG.info("anchor Send_packet "+seqno+" Sent");

    } catch (IOException e) {
        /* Exception while writing to the client. Connection closure from
         * the other end is mostly the case and we do not care much about
         * it. But other things can go wrong, especially in transferTo(),
         * which we do not want to ignore.
         *
         * The message parsing below should not be considered as a good
         * coding example. NEVER do it to drive a program logic. NEVER.
         * It was done here because the NIO throws an IOException for EPIPE.
         */
        String ioem = e.getMessage();
        if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
            LOG.error("BlockSender.sendChunks() exception: ", e);
        }
        throw ioeToSocketException(e);
    }

    if (throttler != null) { // rebalancing so throttle
        throttler.throttle(packetLen);
    }

    return len;
}

From source file:org.cc86.MMC.modules.audio.SWTTYProvider.java

@Override
public void uartHandler(final Consumer<Byte[]> out, final BlockingQueue<byte[]> ctrl, final boolean addPrefix) {
    new Thread(() -> {
        InputStream fis = null;//from  ww  w .  j  a  v  a  2s .com
        OutputStream fos = null;

        try {
            //final Socket s = new Socket("127.0.0.1", 8888);
            //fis = new FileInputStream();
            if (!new File("/sys/class/softuart/softuart/data").exists()) {
                System.out.println("no running softuart driver, shutting down");
                System.exit(0);
            }

            setup();
            //("/home/pi/codestuff/uartdmp");//
            fos = new FileOutputStream("/sys/class/softuart/softuart/data");
            PrintStream ps = new PrintStream(fos);
            //fos = s.getOutputStream();/*new InputStreamReader(s.getInputStream()*/

            new Thread(() -> {
                FileInputStream br;
                try {
                    byte[] data = new byte[3192];
                    //br = 
                    //br.mark(4096);
                    ByteArrayOutputStream bs = new ByteArrayOutputStream();
                    while (true) {
                        List<Byte> bfr = new ArrayList<Byte>();
                        FileInputStream fi = new FileInputStream("/sys/class/softuart/softuart/data");
                        //Tools.runCmdWithPassthru(new PrintStream(bs), "cat","/sys/class/softuart/softuart/data");

                        int len = fi.read(data);
                        //bs.reset();
                        //int len=data.length;
                        if (len < 0) {
                            try {
                                Thread.sleep(10);
                            } catch (InterruptedException ex) {
                            }
                            continue;
                        }
                        l.trace("data rcvd, len={}" + len);
                        for (int i = 0; i < len; i += 2) {
                            int datapkg = ((data[i] & 0xF0) >>> 4) | ((data[i + 1] & 0xF0));
                            datapkg |= ((data[i + 1] & 0x08) << 5);
                            boolean parity = numberOfSetBits(datapkg) % 2 == 0;
                            l.trace(String.format("%03X", datapkg));
                            bytesReceived++;
                            if (parity) {
                                l.trace(datapkg);
                                bfr.add((byte) (datapkg & 0xff));

                            } else {
                                bytesErrored++;
                            }
                        }
                        out.accept(bfr.toArray(new Byte[0]));

                        try {
                            Thread.sleep(10);
                        } catch (InterruptedException ex) {
                        }
                    }
                } /*catch (FileNotFoundException ex)
                  {
                  ex.printStackTrace();
                  } */catch (IOException ex) //noop-catch killme
                {
                    ex.printStackTrace();
                }
            }).start();//*/
            //BufferedReader bs = new BufferedReader(new InputStreamReader(ctrl));
            while (true) {
                //l.warn("d'arvit");
                byte[] thebyte = ctrl.take();//alphabet[new Random().nextInt(26)] + alphabet[new Random().nextInt(26)] + alphabet[new Random().nextInt(26)] + "\r\n";
                l.trace("SENT_UART:" + Arrays.toString(thebyte));
                StringBuilder sb = new StringBuilder();
                for (Byte pkgbyte : thebyte) {
                    sb.append("\\x").append(String.format("%02X", pkgbyte));
                }
                String echo = sb + "";
                l.trace(echo);
                Tools.runCmdWithPassthru(System.out, "/bin/bash", "-c",
                        "echo -ne '" + echo + "'>/sys/class/softuart/softuart/data");
                //ps.write(new byte[]{thebyte});
                //ps.flush();
                //fos.flush();
                //Thread.sleep(1000);
            }
        } catch (Exception ex) {
            ex.printStackTrace();
        } finally {
            try {
                //fis.close();
                fos.close();
            } catch (IOException ex) {
                ex.printStackTrace();
            }
        }
    }).start();
}

From source file:com.alibaba.otter.node.etl.common.pipe.impl.http.archive.ArchiveBean.java

/**
 * //  w w w.ja  va  2 s  . c o m
 */
@SuppressWarnings("resource")
private boolean doPack(final File targetArchiveFile, List<FileData> fileDatas,
        final ArchiveRetriverCallback<FileData> callback) {
    // ?
    if (true == targetArchiveFile.exists() && false == NioUtils.delete(targetArchiveFile, 3)) {
        throw new ArchiveException(
                String.format("[%s] exist and delete failed", targetArchiveFile.getAbsolutePath()));
    }

    boolean exist = false;
    ZipOutputStream zipOut = null;
    Set<String> entryNames = new HashSet<String>();
    BlockingQueue<Future<ArchiveEntry>> queue = new LinkedBlockingQueue<Future<ArchiveEntry>>(); // ?
    ExecutorCompletionService completionService = new ExecutorCompletionService(executor, queue);

    final File targetDir = new File(targetArchiveFile.getParentFile(),
            FilenameUtils.getBaseName(targetArchiveFile.getPath()));
    try {
        // 
        FileUtils.forceMkdir(targetDir);

        zipOut = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(targetArchiveFile)));
        zipOut.setLevel(Deflater.BEST_SPEED);
        // ??
        for (final FileData fileData : fileDatas) {
            if (fileData.getEventType().isDelete()) {
                continue; // delete??
            }

            String namespace = fileData.getNameSpace();
            String path = fileData.getPath();
            boolean isLocal = StringUtils.isBlank(namespace);
            String entryName = null;
            if (true == isLocal) {
                entryName = FilenameUtils.getPath(path) + FilenameUtils.getName(path);
            } else {
                entryName = namespace + File.separator + path;
            }

            // ????
            if (entryNames.contains(entryName) == false) {
                entryNames.add(entryName);
            } else {
                continue;
            }

            final String name = entryName;
            if (true == isLocal && !useLocalFileMutliThread) {
                // ??
                queue.add(new DummyFuture(new ArchiveEntry(name, callback.retrive(fileData))));
            } else {
                completionService.submit(new Callable<ArchiveEntry>() {

                    public ArchiveEntry call() throws Exception {
                        // ??
                        InputStream input = null;
                        OutputStream output = null;
                        try {
                            input = callback.retrive(fileData);

                            if (input instanceof LazyFileInputStream) {
                                input = ((LazyFileInputStream) input).getInputSteam();// ?stream
                            }

                            if (input != null) {
                                File tmp = new File(targetDir, name);
                                NioUtils.create(tmp.getParentFile(), false, 3);// ?
                                output = new FileOutputStream(tmp);
                                NioUtils.copy(input, output);// ?
                                return new ArchiveEntry(name, new File(targetDir, name));
                            } else {
                                return new ArchiveEntry(name);
                            }
                        } finally {
                            IOUtils.closeQuietly(input);
                            IOUtils.closeQuietly(output);
                        }
                    }
                });
            }
        }

        for (int i = 0; i < entryNames.size(); i++) {
            // ?
            ArchiveEntry input = null;
            InputStream stream = null;
            try {
                input = queue.take().get();
                if (input == null) {
                    continue;
                }

                stream = input.getStream();
                if (stream == null) {
                    continue;
                }

                if (stream instanceof LazyFileInputStream) {
                    stream = ((LazyFileInputStream) stream).getInputSteam();// ?stream
                }

                exist = true;
                zipOut.putNextEntry(new ZipEntry(input.getName()));
                NioUtils.copy(stream, zipOut);// ?
                zipOut.closeEntry();
            } finally {
                IOUtils.closeQuietly(stream);
            }
        }

        if (exist) {
            zipOut.finish();
        }
    } catch (Exception e) {
        throw new ArchiveException(e);
    } finally {
        IOUtils.closeQuietly(zipOut);
        try {
            FileUtils.deleteDirectory(targetDir);// 
        } catch (IOException e) {
            // ignore
        }
    }

    return exist;
}

From source file:net.yacy.http.servlets.YaCyDefaultServlet.java

/**
 * TODO: add same functionality & checks as in HTTPDemon.parseMultipart
 *
 * parse multi-part form data for formfields, see also original
 * implementation in HTTPDemon.parseMultipart
 *
 * For file data the parameter for the formfield contains the filename and a
 * additional parameter with appendix [fieldname]$file conteins the upload content
 * (e.g. <input type="file" name="upload">  upload="local/filename" upload$file=[content])
 *
 * @param request/* w  ww.jav  a  2  s.  c o m*/
 * @param args found fields/values are added to the map
 */
protected void parseMultipart(final HttpServletRequest request, final serverObjects args) throws IOException {

    // reject too large uploads
    if (request.getContentLength() > SIZE_FILE_THRESHOLD)
        throw new IOException("FileUploadException: uploaded file too large = " + request.getContentLength());

    // check if we have enough memory
    if (!MemoryControl.request(request.getContentLength() * 3, false)) {
        throw new IOException("not enough memory available for request. request.getContentLength() = "
                + request.getContentLength() + ", MemoryControl.available() = " + MemoryControl.available());
    }
    ServletFileUpload upload = new ServletFileUpload(DISK_FILE_ITEM_FACTORY);
    upload.setFileSizeMax(SIZE_FILE_THRESHOLD);
    try {
        // Parse the request to get form field items
        List<FileItem> fileItems = upload.parseRequest(request);
        // Process the uploaded file items
        Iterator<FileItem> i = fileItems.iterator();
        final BlockingQueue<Map.Entry<String, byte[]>> files = new LinkedBlockingQueue<>();
        while (i.hasNext()) {
            FileItem item = i.next();
            if (item.isFormField()) {
                // simple text
                if (item.getContentType() == null || !item.getContentType().contains("charset")) {
                    // old yacy clients use their local default charset, on most systems UTF-8 (I hope ;)
                    args.add(item.getFieldName(), item.getString(StandardCharsets.UTF_8.name()));
                } else {
                    // use default encoding (given as header or ISO-8859-1)
                    args.add(item.getFieldName(), item.getString());
                }
            } else {
                // read file upload
                args.add(item.getFieldName(), item.getName()); // add the filename to the parameters
                InputStream filecontent = null;
                try {
                    filecontent = item.getInputStream();
                    files.put(new AbstractMap.SimpleEntry<String, byte[]>(item.getFieldName(),
                            FileUtils.read(filecontent)));
                } catch (IOException e) {
                    ConcurrentLog.info("FILEHANDLER", e.getMessage());
                } finally {
                    if (filecontent != null)
                        try {
                            filecontent.close();
                        } catch (IOException e) {
                            ConcurrentLog.info("FILEHANDLER", e.getMessage());
                        }
                }
            }
        }
        if (files.size() <= 1) { // TODO: should include additonal checks to limit parameter.size below rel. large SIZE_FILE_THRESHOLD
            for (Map.Entry<String, byte[]> job : files) { // add the file content to parameter fieldname$file
                String n = job.getKey();
                byte[] v = job.getValue();
                String filename = args.get(n);
                if (filename != null && filename.endsWith(".gz")) {
                    // transform this value into base64
                    String b64 = Base64Order.standardCoder.encode(v);
                    args.put(n + "$file", b64);
                    args.remove(n);
                    args.put(n, filename + ".base64");
                } else {
                    args.put(n + "$file", v); // the byte[] is transformed into UTF8. You cannot push binaries here
                }
            }
        } else {
            // do this concurrently (this would all be superfluous if serverObjects could store byte[] instead only String)
            int t = Math.min(files.size(), Runtime.getRuntime().availableProcessors());
            final Map.Entry<String, byte[]> POISON = new AbstractMap.SimpleEntry<>(null, null);
            Thread[] p = new Thread[t];
            for (int j = 0; j < t; j++) {
                files.put(POISON);
                p[j] = new Thread("YaCyDefaultServlet.parseMultipart-" + j) {
                    @Override
                    public void run() {
                        Map.Entry<String, byte[]> job;
                        try {
                            while ((job = files.take()) != POISON) {
                                String n = job.getKey();
                                byte[] v = job.getValue();
                                String filename = args.get(n);
                                String b64 = Base64Order.standardCoder.encode(v);
                                synchronized (args) {
                                    args.put(n + "$file", b64);
                                    args.remove(n);
                                    args.put(n, filename + ".base64");
                                }
                            }
                        } catch (InterruptedException e) {
                        }
                    }
                };
                p[j].start();
            }
            for (int j = 0; j < t; j++)
                p[j].join();
        }
    } catch (Exception ex) {
        ConcurrentLog.info("FILEHANDLER", ex.getMessage());
    }
}

From source file:alluxio.master.file.DefaultFileSystemMaster.java

/**
 * Checks the consistency of the root in a multi-threaded and incremental fashion. This method
 * will only READ lock the directories and files actively being checked and release them after the
 * check on the file / directory is complete.
 *
 * @return a list of paths in Alluxio which are not consistent with the under storage
 * @throws InterruptedException if the thread is interrupted during execution
 * @throws IOException if an error occurs interacting with the under storage
 *///from   w  w  w  .  ja v a2  s  . co m
private List<AlluxioURI> startupCheckConsistency(final ExecutorService service)
        throws InterruptedException, IOException {
    /** A marker {@link StartupConsistencyChecker}s add to the queue to signal completion */
    final long completionMarker = -1;
    /** A shared queue of directories which have yet to be checked */
    final BlockingQueue<Long> dirsToCheck = new LinkedBlockingQueue<>();

    /**
     * A {@link Callable} which checks the consistency of a directory.
     */
    final class StartupConsistencyChecker implements Callable<List<AlluxioURI>> {
        /** The path to check, guaranteed to be a directory in Alluxio. */
        private final Long mFileId;

        /**
         * Creates a new callable which checks the consistency of a directory.
         * @param fileId the path to check
         */
        private StartupConsistencyChecker(Long fileId) {
            mFileId = fileId;
        }

        /**
         * Checks the consistency of the directory and all immediate children which are files. All
         * immediate children which are directories are added to the shared queue of directories to
         * check. The parent directory is READ locked during the entire call while the children are
         * READ locked only during the consistency check of the children files.
         *
         * @return a list of inconsistent uris
         * @throws IOException if an error occurs interacting with the under storage
         */
        @Override
        public List<AlluxioURI> call() throws IOException {
            List<AlluxioURI> inconsistentUris = new ArrayList<>();
            try (LockedInodePath dir = mInodeTree.lockFullInodePath(mFileId, InodeTree.LockMode.READ)) {
                Inode parentInode = dir.getInode();
                AlluxioURI parentUri = dir.getUri();
                if (!checkConsistencyInternal(parentInode, parentUri)) {
                    inconsistentUris.add(parentUri);
                }
                for (Inode childInode : ((InodeDirectory) parentInode).getChildren()) {
                    try {
                        childInode.lockReadAndCheckParent(parentInode);
                    } catch (InvalidPathException e) {
                        // This should be safe, continue.
                        LOG.debug("Error during startup check consistency, ignoring and continuing.", e);
                        continue;
                    }
                    try {
                        AlluxioURI childUri = parentUri.join(childInode.getName());
                        if (childInode.isDirectory()) {
                            dirsToCheck.add(childInode.getId());
                        } else {
                            if (!checkConsistencyInternal(childInode, childUri)) {
                                inconsistentUris.add(childUri);
                            }
                        }
                    } finally {
                        childInode.unlockRead();
                    }
                }
            } catch (FileDoesNotExistException e) {
                // This should be safe, continue.
                LOG.debug("A file scheduled for consistency check was deleted before the check.");
            } catch (InvalidPathException e) {
                // This should not happen.
                LOG.error("An invalid path was discovered during the consistency check, skipping.", e);
            }
            dirsToCheck.add(completionMarker);
            return inconsistentUris;
        }
    }

    // Add the root to the directories to check.
    dirsToCheck.add(mInodeTree.getRoot().getId());
    List<Future<List<AlluxioURI>>> results = new ArrayList<>();
    // Tracks how many checkers have been started.
    long started = 0;
    // Tracks how many checkers have completed.
    long completed = 0;
    do {
        Long fileId = dirsToCheck.take();
        if (fileId == completionMarker) { // A thread signaled completion.
            completed++;
        } else { // A new directory needs to be checked.
            StartupConsistencyChecker checker = new StartupConsistencyChecker(fileId);
            results.add(service.submit(checker));
            started++;
        }
    } while (started != completed);

    // Return the total set of inconsistent paths discovered.
    List<AlluxioURI> inconsistentUris = new ArrayList<>();
    for (Future<List<AlluxioURI>> result : results) {
        try {
            inconsistentUris.addAll(result.get());
        } catch (Exception e) {
            // This shouldn't happen, all futures should be complete.
            Throwables.propagate(e);
        }
    }
    service.shutdown();
    return inconsistentUris;
}