Example usage for java.util.concurrent LinkedBlockingDeque takeFirst

List of usage examples for java.util.concurrent LinkedBlockingDeque takeFirst

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingDeque takeFirst.

Prototype

public E takeFirst() throws InterruptedException 

Source Link

Usage

From source file:edu.brown.hstore.PartitionExecutor.java

/**
 * Execute the given tasks and then block the current thread waiting for the
 * list of dependency_ids to come back from whatever it was we were suppose
 * to do... This is the slowest way to execute a bunch of WorkFragments and
 * therefore should only be invoked for batches that need to access
 * non-local Partitions//from  w ww  . ja v  a 2s  .com
 * 
 * @param ts
 * @param fragments
 * @param parameters
 * @return
 */
public VoltTable[] dispatchWorkFragments(final LocalTransaction ts, final int batchSize,
        Collection<WorkFragment> fragments, final ParameterSet parameters[]) {
    assert (fragments.isEmpty() == false) : "Unexpected empty WorkFragment list for " + ts;

    // *********************************** DEBUG
    // ***********************************
    if (d) {
        LOG.debug(String.format("%s - Preparing to dispatch %d messages and wait for the results", ts,
                fragments.size()));
        if (t) {
            StringBuilder sb = new StringBuilder();
            sb.append(ts + " - WorkFragments:\n");
            for (WorkFragment fragment : fragments) {
                sb.append(StringUtil.box(fragment.toString()) + "\n");
            } // FOR
            sb.append(ts + " - ParameterSets:\n");
            for (ParameterSet ps : parameters) {
                sb.append(ps + "\n");
            } // FOR
            LOG.trace(sb);
        }
    }
    // *********************************** DEBUG
    // ***********************************

    // OPTIONAL: Check to make sure that this request is valid
    // (1) At least one of the WorkFragments needs to be executed on a
    // remote partition
    // (2) All of the PlanFragments ids in the WorkFragments match this
    // txn's Procedure
    if (hstore_conf.site.exec_validate_work && ts.isSysProc() == false) {
        LOG.warn(String.format("%s - Checking whether all of the WorkFragments are valid", ts));
        boolean has_remote = false;
        for (WorkFragment frag : fragments) {
            if (frag.getPartitionId() != this.partitionId) {
                has_remote = true;
            }
            for (int frag_id : frag.getFragmentIdList()) {
                PlanFragment catalog_frag = CatalogUtil.getPlanFragment(database, frag_id);
                Statement catalog_stmt = catalog_frag.getParent();
                assert (catalog_stmt != null);
                Procedure catalog_proc = catalog_stmt.getParent();
                if (catalog_proc.equals(ts.getProcedure()) == false) {
                    LOG.warn(ts.debug() + "\n" + fragments + "\n---- INVALID ----\n" + frag);
                    String msg = String.format("%s - Unexpected %s", ts, catalog_frag.fullName());
                    throw new ServerFaultException(msg, ts.getTransactionId());
                }
            }
        } // FOR
        if (has_remote == false) {
            LOG.warn(ts.debug() + "\n" + fragments);
            String msg = String.format(
                    "%s - Trying to execute all local single-partition queries using the slow-path!", ts);
            throw new ServerFaultException(msg, ts.getTransactionId());
        }
    }

    // We have to store all of the tasks in the TransactionState before we
    // start executing, otherwise
    // there is a race condition that a task with input dependencies will
    // start running as soon as we
    // get one response back from another executor
    ts.initRound(this.partitionId, this.getNextUndoToken());
    ts.setBatchSize(batchSize);

    final boolean prefetch = ts.hasPrefetchQueries();
    final boolean predict_singlePartition = ts.isPredictSinglePartition();

    // Attach the ParameterSets to our transaction handle so that anybody on
    // this HStoreSite
    // can access them directly without needing to deserialize them from the
    // WorkFragments
    ts.attachParameterSets(parameters);

    // Now if we have some work sent out to other partitions, we need to
    // wait until they come back
    // In the first part, we wait until all of our blocked
    // FragmentTaskMessages become unblocked
    LinkedBlockingDeque<Collection<WorkFragment>> queue = ts.getUnblockedWorkFragmentsQueue();

    boolean first = true;
    boolean serializedParams = false;
    CountDownLatch latch = null;
    boolean all_local = true;
    boolean is_localSite;
    boolean is_localPartition;
    int num_localPartition = 0;
    int num_localSite = 0;
    int num_remote = 0;
    int num_skipped = 0;
    int total = 0;

    // Run through this loop if:
    // (1) We have no pending errors
    // (2) This is our first time in the loop (first == true)
    // (3) If we know that there are still messages being blocked
    // (4) If we know that there are still unblocked messages that we need
    // to process
    // (5) The latch for this round is still greater than zero
    while (ts.hasPendingError() == false
            && (first == true || ts.stillHasWorkFragments() || (latch != null && latch.getCount() > 0))) {
        if (t)
            LOG.trace(String.format("%s - [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first,
                    ts.stillHasWorkFragments(), queue.size(), latch));

        // If this is the not first time through the loop, then poll the
        // queue to get our list of fragments
        if (first == false) {
            all_local = true;
            is_localSite = false;
            is_localPartition = false;
            num_localPartition = 0;
            num_localSite = 0;
            num_remote = 0;
            num_skipped = 0;
            total = 0;

            if (t)
                LOG.trace(String.format("%s - Waiting for unblocked tasks on partition %d", ts,
                        this.partitionId));
            if (hstore_conf.site.txn_profiling)
                ts.profiler.startExecDtxnWork();
            try {
                fragments = queue.takeFirst(); // BLOCKING
            } catch (InterruptedException ex) {
                if (this.hstore_site.isShuttingDown() == false) {
                    LOG.error(String.format("%s - We were interrupted while waiting for blocked tasks", ts),
                            ex);
                }
                return (null);
            } finally {
                if (hstore_conf.site.txn_profiling)
                    ts.profiler.stopExecDtxnWork();
            }
        }
        assert (fragments != null);

        // If the list to fragments unblock is empty, then we
        // know that we have dispatched all of the WorkFragments for the
        // transaction's current SQLStmt batch. That means we can just wait
        // until all the results return to us.
        if (fragments.isEmpty()) {
            if (t)
                LOG.trace(ts + " - Got an empty list of WorkFragments. Blocking until dependencies arrive");
            break;
        }

        this.tmp_localWorkFragmentList.clear();
        if (predict_singlePartition == false) {
            this.tmp_remoteFragmentList.clear();
            this.tmp_localSiteFragmentList.clear();
        }

        // -------------------------------
        // FAST PATH: Assume everything is local
        // -------------------------------
        if (predict_singlePartition) {
            for (WorkFragment ftask : fragments) {
                if (first == false || ts.addWorkFragment(ftask) == false) {
                    this.tmp_localWorkFragmentList.add(ftask);
                    total++;
                    num_localPartition++;
                }
            } // FOR

            // We have to tell the TransactinState to start the round before
            // we send off the
            // FragmentTasks for execution, since they might start executing
            // locally!
            if (first) {
                ts.startRound(this.partitionId);
                latch = ts.getDependencyLatch();
            }

            // Execute all of our WorkFragments quickly at our local
            // ExecutionEngine
            for (WorkFragment fragment : this.tmp_localWorkFragmentList) {
                if (d)
                    LOG.debug(String.format("Got unblocked FragmentTaskMessage for %s. Executing locally...",
                            ts));
                assert (fragment.getPartitionId() == this.partitionId) : String.format(
                        "Trying to process FragmentTaskMessage for %s on partition %d but it should have been sent to partition %d [singlePartition=%s]\n%s",
                        ts, this.partitionId, fragment.getPartitionId(), predict_singlePartition, fragment);
                ParameterSet fragmentParams[] = this.getFragmentParameters(ts, fragment, parameters);
                this.processWorkFragment(ts, fragment, fragmentParams);
            } // FOR
        }
        // -------------------------------
        // SLOW PATH: Mixed local and remote messages
        // -------------------------------
        else {
            // Look at each task and figure out whether it needs to be
            // executed at a remote
            // HStoreSite or whether we can execute it at one of our local
            // PartitionExecutors.
            for (WorkFragment fragment : fragments) {
                int partition = fragment.getPartitionId();
                is_localSite = hstore_site.isLocalPartition(partition);
                is_localPartition = (partition == this.partitionId);
                all_local = all_local && is_localPartition;
                if (first == false || ts.addWorkFragment(fragment) == false) {
                    total++;

                    // At this point we know that all the WorkFragment has
                    // been registered
                    // in the LocalTransaction, so then it's safe for us to
                    // look to see
                    // whether we already have a prefetched result that we
                    // need
                    if (prefetch && is_localPartition == false) {
                        boolean skip_queue = true;
                        for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) {
                            int fragId = fragment.getFragmentId(i);
                            int paramIdx = fragment.getParamIndex(i);

                            VoltTable vt = this.queryCache.getTransactionCachedResult(ts.getTransactionId(),
                                    fragId, partition, parameters[paramIdx]);
                            if (vt != null) {
                                ts.addResult(partition, fragment.getOutputDepId(i), vt);
                            } else {
                                skip_queue = false;
                            }
                        } // FOR
                          // If we were able to get cached results for all
                          // of the fragmentIds in
                          // this WorkFragment, then there is no need for
                          // us
                          // to send the message
                          // So we'll just skip queuing it up! How nice!
                        if (skip_queue) {
                            if (d)
                                LOG.debug(String.format(
                                        "%s - Using prefetch result for all fragments from partition %d", ts,
                                        partition));
                            num_skipped++;
                            continue;
                        }
                    }

                    // Otherwise add it to our list of WorkFragments that we
                    // want
                    // queue up right now
                    if (is_localPartition) {
                        this.tmp_localWorkFragmentList.add(fragment);
                        num_localPartition++;
                    } else if (is_localSite) {
                        this.tmp_localSiteFragmentList.add(fragment);
                        num_localSite++;
                    } else {
                        this.tmp_remoteFragmentList.add(fragment);
                        num_remote++;
                    }
                }
            } // FOR
            assert (total == (num_remote + num_localSite + num_localPartition + num_skipped)) : String.format(
                    "Total:%d / Remote:%d / LocalSite:%d / LocalPartition:%d / Skipped:%d", total, num_remote,
                    num_localSite, num_localPartition, num_skipped);
            if (num_localPartition == 0 && num_localSite == 0 && num_remote == 0 && num_skipped == 0) {
                String msg = String.format("Deadlock! All tasks for %s are blocked waiting on input!", ts);
                throw new ServerFaultException(msg, ts.getTransactionId());
            }

            // We have to tell the TransactinState to start the round before
            // we send off the
            // FragmentTasks for execution, since they might start executing
            // locally!
            if (first) {
                ts.startRound(this.partitionId);
                latch = ts.getDependencyLatch();
            }

            // Now request the fragments that aren't local
            // We want to push these out as soon as possible
            if (num_remote > 0) {
                // We only need to serialize the ParameterSets once
                if (serializedParams == false) {
                    if (hstore_conf.site.txn_profiling)
                        ts.profiler.startSerialization();
                    tmp_serializedParams.clear();
                    for (int i = 0; i < parameters.length; i++) {
                        if (parameters[i] == null) {
                            tmp_serializedParams.add(ByteString.EMPTY);
                        } else {
                            this.fs.clear();
                            try {
                                parameters[i].writeExternal(this.fs);
                                ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b);
                                tmp_serializedParams.add(bs);
                            } catch (Exception ex) {
                                throw new ServerFaultException(
                                        "Failed to serialize ParameterSet " + i + " for " + ts, ex);
                            }
                        }
                    } // FOR
                    if (hstore_conf.site.txn_profiling)
                        ts.profiler.stopSerialization();
                }
                if (d)
                    LOG.debug(String.format(
                            "%s - Requesting %d FragmentTaskMessages to be executed on remote partitions", ts,
                            num_remote));
                this.requestWork(ts, tmp_remoteFragmentList, tmp_serializedParams);
            }

            // Then dispatch the task that are needed at the same HStoreSite
            // but
            // at a different partition than this one
            if (num_localSite > 0) {
                if (d)
                    LOG.debug(String.format("%s - Executing %d FragmentTaskMessages on local site's partitions",
                            ts, num_localSite));
                for (WorkFragment fragment : this.tmp_localSiteFragmentList) {
                    FragmentTaskMessage ftask = ts.getFragmentTaskMessage(fragment);
                    hstore_site.getPartitionExecutor(fragment.getPartitionId()).queueWork(ts, ftask);
                } // FOR
            }

            // Then execute all of the tasks need to access the partitions
            // at this HStoreSite
            // We'll dispatch the remote-partition-local-site fragments
            // first because they're going
            // to need to get queued up by at the other PartitionExecutors
            if (num_localPartition > 0) {
                if (d)
                    LOG.debug(String.format("%s - Executing %d FragmentTaskMessages on local partition", ts,
                            num_localPartition));
                for (WorkFragment fragment : this.tmp_localWorkFragmentList) {
                    ParameterSet fragmentParams[] = this.getFragmentParameters(ts, fragment, parameters);
                    this.processWorkFragment(ts, fragment, fragmentParams);
                } // FOR
            }
        }
        if (t)
            LOG.trace(String.format(
                    "%s - Dispatched %d WorkFragments [remoteSite=%d, localSite=%d, localPartition=%d]", ts,
                    total, num_remote, num_localSite, num_localPartition));
        first = false;
    } // WHILE
    this.fs.getBBContainer().discard();

    if (t)
        LOG.trace(String.format("%s - BREAK OUT [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first,
                ts.stillHasWorkFragments(), latch));
    // assert(ts.stillHasWorkFragments() == false) :
    // String.format("Trying to block %s before all of its WorkFragments have been dispatched!\n%s\n%s",
    // ts,
    // StringUtil.join("** ", "\n", tempDebug),
    // this.getVoltProcedure(ts.getProcedureName()).getLastBatchPlan());

    // Now that we know all of our FragmentTaskMessages have been
    // dispatched, we can then
    // wait for all of the results to come back in.
    if (latch == null)
        latch = ts.getDependencyLatch();
    if (latch.getCount() > 0) {
        if (d) {
            LOG.debug(String.format("%s - All blocked messages dispatched. Waiting for %d dependencies", ts,
                    latch.getCount()));
            if (t)
                LOG.trace(ts.toString());
        }
        if (hstore_conf.site.txn_profiling)
            ts.profiler.startExecDtxnWork();
        boolean done = false;
        // XXX this.utilityWork(latch);
        try {
            done = latch.await(hstore_conf.site.exec_response_timeout, TimeUnit.MILLISECONDS);
        } catch (InterruptedException ex) {
            if (this.hstore_site.isShuttingDown() == false) {
                LOG.error(String.format("%s - We were interrupted while waiting for results", ts), ex);
            }
            done = true;
        } catch (Throwable ex) {
            new ServerFaultException(String.format("Fatal error for %s while waiting for results", ts), ex);
        } finally {
            if (hstore_conf.site.txn_profiling)
                ts.profiler.stopExecDtxnWork();
        }
        if (done == false && this.isShuttingDown() == false) {
            LOG.warn(String.format("Still waiting for responses for %s after %d ms [latch=%d]\n%s", ts,
                    hstore_conf.site.exec_response_timeout, latch.getCount(), ts.debug()));
            LOG.warn("Procedure Parameters:\n" + ts.getInvocation().getParams());
            hstore_conf.site.exec_profiling = true;
            LOG.warn(hstore_site.statusSnapshot());

            String msg = "PartitionResponses for " + ts + " never arrived!";
            throw new ServerFaultException(msg, ts.getTransactionId());
        }
    }

    // IMPORTANT: Check whether the fragments failed somewhere and we got a
    // response with an error
    // We will rethrow this so that it pops the stack all the way back to
    // VoltProcedure.call()
    // where we can generate a message to the client
    if (ts.hasPendingError()) {
        if (d)
            LOG.warn(
                    String.format("%s was hit with a %s", ts, ts.getPendingError().getClass().getSimpleName()));
        throw ts.getPendingError();
    }

    // IMPORTANT: Don't try to check whether we got back the right number of
    // tables because the batch
    // may have hit an error and we didn't execute all of them.
    VoltTable results[] = ts.getResults();
    ts.finishRound(this.partitionId);
    if (d) {
        if (t)
            LOG.trace(ts + " is now running and looking for love in all the wrong places...");
        LOG.debug(ts + " is returning back " + results.length + " tables to VoltProcedure");
    }
    return (results);
}