Example usage for java.util.concurrent BlockingQueue offer

List of usage examples for java.util.concurrent BlockingQueue offer

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue offer.

Prototype

boolean offer(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and false if no space is currently available.

Usage

From source file:org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.java

/**
 * Cache the block to ramCache/*from w ww.ja va 2 s .c om*/
 * @param cacheKey block's cache key
 * @param cachedItem block buffer
 * @param inMemory if block is in-memory
 * @param wait if true, blocking wait when queue is full
 */
public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, boolean wait) {
    if (!cacheEnabled)
        return;

    if (backingMap.containsKey(cacheKey) || ramCache.containsKey(cacheKey))
        return;

    /*
     * Stuff the entry into the RAM cache so it can get drained to the
     * persistent store
     */
    RAMQueueEntry re = new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory);
    ramCache.put(cacheKey, re);
    int queueNum = (cacheKey.hashCode() & 0x7FFFFFFF) % writerQueues.size();
    BlockingQueue<RAMQueueEntry> bq = writerQueues.get(queueNum);
    boolean successfulAddition = bq.offer(re);
    if (!successfulAddition && wait) {
        synchronized (cacheWaitSignals[queueNum]) {
            try {
                cacheWaitSignals[queueNum].wait(DEFAULT_CACHE_WAIT_TIME);
            } catch (InterruptedException ie) {
                Thread.currentThread().interrupt();
            }
        }
        successfulAddition = bq.offer(re);
    }
    if (!successfulAddition) {
        ramCache.remove(cacheKey);
        failedBlockAdditions.incrementAndGet();
    } else {
        this.blockNumber.incrementAndGet();
        this.heapSize.addAndGet(cachedItem.heapSize());
        blocksByHFile.put(cacheKey.getHfileName(), cacheKey);
    }
}

From source file:org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor.java

@Override
public boolean dispatch(final CallRunner callTask) throws InterruptedException {
    int queueIndex = balancer.getNextQueue();
    BlockingQueue<CallRunner> queue = queues.get(queueIndex);
    // that means we can overflow by at most <num reader> size (5), that's ok
    if (queue.size() >= currentQueueLimit) {
        return false;
    }//from w  w w .j  av  a 2  s  .c  o m
    return queue.offer(callTask);
}

From source file:org.apache.hadoop.ipc.FairCallQueue.java

/**
 * Put and offer follow the same pattern:
 * 1. Get the assigned priorityLevel from the call by scheduler
 * 2. Get the nth sub-queue matching this priorityLevel
 * 3. delegate the call to this sub-queue.
 *
 * But differ in how they handle overflow:
 * - Put will move on to the next queue until it lands on the last queue
 * - Offer does not attempt other queues on overflow
 *//*w w  w .java  2s.c o  m*/
@Override
public void put(E e) throws InterruptedException {
    int priorityLevel = e.getPriorityLevel();

    final int numLevels = this.queues.size();
    while (true) {
        BlockingQueue<E> q = this.queues.get(priorityLevel);
        boolean res = q.offer(e);
        if (!res) {
            // Update stats
            this.overflowedCalls.get(priorityLevel).getAndIncrement();

            // If we failed to insert, try again on the next level
            priorityLevel++;

            if (priorityLevel == numLevels) {
                // That was the last one, we will block on put in the last queue
                // Delete this line to drop the call
                this.queues.get(priorityLevel - 1).put(e);
                break;
            }
        } else {
            break;
        }
    }

    signalNotEmpty();
}

From source file:org.apache.hadoop.ipc.FairCallQueue.java

@Override
public boolean offer(E e) {
    int priorityLevel = e.getPriorityLevel();
    BlockingQueue<E> q = this.queues.get(priorityLevel);
    boolean ret = q.offer(e);
    if (ret) {// w  w  w .j  av a 2s.co m
        signalNotEmpty();
    }
    return ret;
}

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

private long destroyExpiredArchives(final String containerName, final Path container) throws IOException {
    archiveExpirationLog.debug("Destroying Expired Archives for Container {}", containerName);
    final List<ArchiveInfo> notYetExceedingThreshold = new ArrayList<>();
    long removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;
    long oldestArchiveDateFound = System.currentTimeMillis();

    // determine how much space we must have in order to stop deleting old data
    final Long minRequiredSpace = minUsableContainerBytesForArchive.get(containerName);
    if (minRequiredSpace == null) {
        archiveExpirationLog/*from   w w w. j  av  a2 s.c  o m*/
                .debug("Could not determine minimum required space so will not destroy any archived data");
        return -1L;
    }

    final long usableSpace = getContainerUsableSpace(containerName);
    final ContainerState containerState = containerStateMap.get(containerName);

    // First, delete files from our queue
    final long startNanos = System.nanoTime();
    final long toFree = minRequiredSpace - usableSpace;
    final BlockingQueue<ArchiveInfo> fileQueue = archivedFiles.get(containerName);
    if (archiveExpirationLog.isDebugEnabled()) {
        if (toFree < 0) {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so no need to free space until an additional {} bytes are used",
                    usableSpace, containerName, minRequiredSpace, Math.abs(toFree));
        } else {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so need to free {} bytes",
                    usableSpace, containerName, minRequiredSpace, toFree);
        }
    }

    ArchiveInfo toDelete;
    int deleteCount = 0;
    long freed = 0L;
    while ((toDelete = fileQueue.peek()) != null) {
        try {
            final long fileSize = toDelete.getSize();

            removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;

            // we use fileQueue.peek above instead of fileQueue.poll() because we don't always want to
            // remove the head of the queue. Instead, we want to remove it only if we plan to delete it.
            // In order to accomplish this, we just peek at the head and check if it should be deleted.
            // If so, then we call poll() to remove it
            if (freed < toFree || getLastModTime(toDelete.toPath()) < removalTimeThreshold) {
                toDelete = fileQueue.poll(); // remove the head of the queue, which is already stored in 'toDelete'
                Files.deleteIfExists(toDelete.toPath());
                containerState.decrementArchiveCount();
                LOG.debug(
                        "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                        toDelete.getName(), containerName);
                freed += fileSize;
                deleteCount++;
            }

            // If we'd freed up enough space, we're done... unless the next file needs to be destroyed based on time.
            if (freed >= toFree) {
                // If the last mod time indicates that it should be removed, just continue loop.
                if (deleteBasedOnTimestamp(fileQueue, removalTimeThreshold)) {
                    archiveExpirationLog.debug(
                            "Freed enough space ({} bytes freed, needed to free {} bytes) but will continue to expire data based on timestamp",
                            freed, toFree);
                    continue;
                }

                archiveExpirationLog.debug(
                        "Freed enough space ({} bytes freed, needed to free {} bytes). Finished expiring data",
                        freed, toFree);

                final ArchiveInfo archiveInfo = fileQueue.peek();
                final long oldestArchiveDate = archiveInfo == null ? System.currentTimeMillis()
                        : getLastModTime(archiveInfo.toPath());

                // Otherwise, we're done. Return the last mod time of the oldest file in the container's archive.
                final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                if (deleteCount > 0) {
                    LOG.info(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                } else {
                    LOG.debug(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                }

                return oldestArchiveDate;
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", toDelete, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }

    // Go through each container and grab the archived data into a List
    archiveExpirationLog.debug("Searching for more archived data to expire");
    final StopWatch stopWatch = new StopWatch(true);
    for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) {
        final Path sectionContainer = container.resolve(String.valueOf(i));
        final Path archive = sectionContainer.resolve("archive");
        if (!Files.exists(archive)) {
            continue;
        }

        try {
            final long timestampThreshold = removalTimeThreshold;
            Files.walkFileTree(archive, new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs)
                        throws IOException {
                    if (attrs.isDirectory()) {
                        return FileVisitResult.CONTINUE;
                    }

                    final long lastModTime = getLastModTime(file);
                    if (lastModTime < timestampThreshold) {
                        try {
                            Files.deleteIfExists(file);
                            containerState.decrementArchiveCount();
                            LOG.debug(
                                    "Deleted archived ContentClaim with ID {} from Container {} because it was older than the configured max archival duration",
                                    file.toFile().getName(), containerName);
                        } catch (final IOException ioe) {
                            LOG.warn(
                                    "Failed to remove archived ContentClaim with ID {} from Container {} due to {}",
                                    file.toFile().getName(), containerName, ioe.toString());
                            if (LOG.isDebugEnabled()) {
                                LOG.warn("", ioe);
                            }
                        }
                    } else if (usableSpace < minRequiredSpace) {
                        notYetExceedingThreshold
                                .add(new ArchiveInfo(container, file, attrs.size(), lastModTime));
                    }

                    return FileVisitResult.CONTINUE;
                }
            });
        } catch (final IOException ioe) {
            LOG.warn("Failed to cleanup archived files in {} due to {}", archive, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }
    final long deleteExpiredMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);

    // Sort the list according to last modified time
    Collections.sort(notYetExceedingThreshold, new Comparator<ArchiveInfo>() {
        @Override
        public int compare(final ArchiveInfo o1, final ArchiveInfo o2) {
            return Long.compare(o1.getLastModTime(), o2.getLastModTime());
        }
    });

    final long sortRemainingMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteExpiredMillis;

    // Delete the oldest data
    archiveExpirationLog.debug("Deleting data based on timestamp");
    final Iterator<ArchiveInfo> itr = notYetExceedingThreshold.iterator();
    int counter = 0;
    while (itr.hasNext()) {
        final ArchiveInfo archiveInfo = itr.next();

        try {
            final Path path = archiveInfo.toPath();
            Files.deleteIfExists(path);
            containerState.decrementArchiveCount();
            LOG.debug(
                    "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                    archiveInfo.getName(), containerName);

            // Check if we've freed enough space every 25 files that we destroy
            if (++counter % 25 == 0) {
                if (getContainerUsableSpace(containerName) > minRequiredSpace) { // check if we can stop now
                    LOG.debug("Finished cleaning up archive for Container {}", containerName);
                    break;
                }
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", archiveInfo, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }

        itr.remove();
    }

    final long deleteOldestMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - sortRemainingMillis
            - deleteExpiredMillis;

    long oldestContainerArchive;
    if (notYetExceedingThreshold.isEmpty()) {
        oldestContainerArchive = System.currentTimeMillis();
    } else {
        oldestContainerArchive = notYetExceedingThreshold.get(0).getLastModTime();
    }

    if (oldestContainerArchive < oldestArchiveDateFound) {
        oldestArchiveDateFound = oldestContainerArchive;
    }

    // Queue up the files in the order that they should be destroyed so that we don't have to scan the directories for a while.
    for (final ArchiveInfo toEnqueue : notYetExceedingThreshold.subList(0,
            Math.min(100000, notYetExceedingThreshold.size()))) {
        fileQueue.offer(toEnqueue);
    }

    final long cleanupMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteOldestMillis
            - sortRemainingMillis - deleteExpiredMillis;
    LOG.debug(
            "Oldest Archive Date for Container {} is {}; delete expired = {} ms, sort remaining = {} ms, delete oldest = {} ms, cleanup = {} ms",
            containerName, new Date(oldestContainerArchive), deleteExpiredMillis, sortRemainingMillis,
            deleteOldestMillis, cleanupMillis);
    return oldestContainerArchive;
}

From source file:org.apache.nifi.processors.standard.FetchFileTransfer.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();/*from  w  ww .j av  a2 s  . co  m*/
    if (flowFile == null) {
        return;
    }

    final StopWatch stopWatch = new StopWatch(true);
    final String host = context.getProperty(HOSTNAME).evaluateAttributeExpressions(flowFile).getValue();
    final int port = context.getProperty(UNDEFAULTED_PORT).evaluateAttributeExpressions(flowFile).asInteger();
    final String filename = context.getProperty(REMOTE_FILENAME).evaluateAttributeExpressions(flowFile)
            .getValue();

    // Try to get a FileTransfer object from our cache.
    BlockingQueue<FileTransferIdleWrapper> transferQueue;
    synchronized (fileTransferMap) {
        final Tuple<String, Integer> tuple = new Tuple<>(host, port);

        transferQueue = fileTransferMap.get(tuple);
        if (transferQueue == null) {
            transferQueue = new LinkedBlockingQueue<>();
            fileTransferMap.put(tuple, transferQueue);
        }

        // periodically close idle connections
        if (System.currentTimeMillis() - lastClearTime > IDLE_CONNECTION_MILLIS) {
            closeConnections(false);
            lastClearTime = System.currentTimeMillis();
        }
    }

    // we have a queue of FileTransfer Objects. Get one from the queue or create a new one.
    FileTransfer transfer;
    FileTransferIdleWrapper transferWrapper = transferQueue.poll();
    if (transferWrapper == null) {
        transfer = createFileTransfer(context);
    } else {
        transfer = transferWrapper.getFileTransfer();
    }

    // Pull data from remote system.
    final InputStream in;
    try {
        in = transfer.getInputStream(filename, flowFile);

        flowFile = session.write(flowFile, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                StreamUtils.copy(in, out);
                transfer.flush();
            }
        });
        transferQueue.offer(new FileTransferIdleWrapper(transfer, System.nanoTime()));
    } catch (final FileNotFoundException e) {
        getLogger().error(
                "Failed to fetch content for {} from filename {} on remote host {} because the file could not be found on the remote system; routing to {}",
                new Object[] { flowFile, filename, host, REL_NOT_FOUND.getName() });
        session.transfer(session.penalize(flowFile), REL_NOT_FOUND);
        session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND);
        return;
    } catch (final PermissionDeniedException e) {
        getLogger().error(
                "Failed to fetch content for {} from filename {} on remote host {} due to insufficient permissions; routing to {}",
                new Object[] { flowFile, filename, host, REL_PERMISSION_DENIED.getName() });
        session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED);
        session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED);
        return;
    } catch (final ProcessException | IOException e) {
        try {
            transfer.close();
        } catch (final IOException e1) {
            getLogger().warn("Failed to close connection to {}:{} due to {}",
                    new Object[] { host, port, e.toString() }, e);
        }

        getLogger().error(
                "Failed to fetch content for {} from filename {} on remote host {}:{} due to {}; routing to comms.failure",
                new Object[] { flowFile, filename, host, port, e.toString() }, e);
        session.transfer(session.penalize(flowFile), REL_COMMS_FAILURE);
        return;
    }

    // Add FlowFile attributes
    final String protocolName = transfer.getProtocolName();
    final Map<String, String> attributes = new HashMap<>();
    attributes.put(protocolName + ".remote.host", host);
    attributes.put(protocolName + ".remote.port", String.valueOf(port));
    attributes.put(protocolName + ".remote.filename", filename);

    if (filename.contains("/")) {
        final String path = StringUtils.substringBeforeLast(filename, "/");
        final String filenameOnly = StringUtils.substringAfterLast(filename, "/");
        attributes.put(CoreAttributes.PATH.key(), path);
        attributes.put(CoreAttributes.FILENAME.key(), filenameOnly);
    } else {
        attributes.put(CoreAttributes.FILENAME.key(), filename);
    }
    flowFile = session.putAllAttributes(flowFile, attributes);

    // emit provenance event and transfer FlowFile
    session.getProvenanceReporter().fetch(flowFile, protocolName + "://" + host + ":" + port + "/" + filename,
            stopWatch.getElapsed(TimeUnit.MILLISECONDS));
    session.transfer(flowFile, REL_SUCCESS);

    // it is critical that we commit the session before moving/deleting the remote file. Otherwise, we could have a situation where
    // we ingest the data, delete/move the remote file, and then NiFi dies/is shut down before the session is committed. This would
    // result in data loss! If we commit the session first, we are safe.
    session.commit();

    final String completionStrategy = context.getProperty(COMPLETION_STRATEGY).getValue();
    if (COMPLETION_DELETE.getValue().equalsIgnoreCase(completionStrategy)) {
        try {
            transfer.deleteFile(null, filename);
        } catch (final FileNotFoundException e) {
            // file doesn't exist -- effectively the same as removing it. Move on.
        } catch (final IOException ioe) {
            getLogger().warn(
                    "Successfully fetched the content for {} from {}:{}{} but failed to remove the remote file due to {}",
                    new Object[] { flowFile, host, port, filename, ioe }, ioe);
        }
    } else if (COMPLETION_MOVE.getValue().equalsIgnoreCase(completionStrategy)) {
        String targetDir = context.getProperty(MOVE_DESTINATION_DIR).evaluateAttributeExpressions(flowFile)
                .getValue();
        if (!targetDir.endsWith("/")) {
            targetDir = targetDir + "/";
        }
        final String simpleFilename = StringUtils.substringAfterLast(filename, "/");
        final String target = targetDir + simpleFilename;

        try {
            transfer.rename(filename, target);
        } catch (final IOException ioe) {
            getLogger().warn(
                    "Successfully fetched the content for {} from {}:{}{} but failed to rename the remote file due to {}",
                    new Object[] { flowFile, host, port, filename, ioe }, ioe);
        }
    }
}

From source file:org.springframework.integration.ftp.outbound.FtpServerOutboundTests.java

private Session<FTPFile> spyOnSession() {
    Session<FTPFile> session = spy(this.ftpSessionFactory.getSession());
    session.close();//from   w w  w.ja v  a 2  s .c om
    @SuppressWarnings("unchecked")
    BlockingQueue<Session<FTPFile>> cache = TestUtils.getPropertyValue(ftpSessionFactory, "pool.available",
            BlockingQueue.class);
    assertNotNull(cache.poll());
    cache.offer(session);
    @SuppressWarnings("unchecked")
    Set<Session<FTPFile>> allocated = TestUtils.getPropertyValue(ftpSessionFactory, "pool.allocated",
            Set.class);
    allocated.clear();
    allocated.add(session);
    return session;
}