Example usage for java.util.concurrent LinkedBlockingDeque size

List of usage examples for java.util.concurrent LinkedBlockingDeque size

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingDeque size.

Prototype

public int size() 

Source Link

Document

Returns the number of elements in this deque.

Usage

From source file:edu.brown.hstore.PartitionExecutor.java

/**
 * Execute the given tasks and then block the current thread waiting for the
 * list of dependency_ids to come back from whatever it was we were suppose
 * to do... This is the slowest way to execute a bunch of WorkFragments and
 * therefore should only be invoked for batches that need to access
 * non-local Partitions/*from   w w w  .  j a v  a2s  .c o m*/
 * 
 * @param ts
 * @param fragments
 * @param parameters
 * @return
 */
public VoltTable[] dispatchWorkFragments(final LocalTransaction ts, final int batchSize,
        Collection<WorkFragment> fragments, final ParameterSet parameters[]) {
    assert (fragments.isEmpty() == false) : "Unexpected empty WorkFragment list for " + ts;

    // *********************************** DEBUG
    // ***********************************
    if (d) {
        LOG.debug(String.format("%s - Preparing to dispatch %d messages and wait for the results", ts,
                fragments.size()));
        if (t) {
            StringBuilder sb = new StringBuilder();
            sb.append(ts + " - WorkFragments:\n");
            for (WorkFragment fragment : fragments) {
                sb.append(StringUtil.box(fragment.toString()) + "\n");
            } // FOR
            sb.append(ts + " - ParameterSets:\n");
            for (ParameterSet ps : parameters) {
                sb.append(ps + "\n");
            } // FOR
            LOG.trace(sb);
        }
    }
    // *********************************** DEBUG
    // ***********************************

    // OPTIONAL: Check to make sure that this request is valid
    // (1) At least one of the WorkFragments needs to be executed on a
    // remote partition
    // (2) All of the PlanFragments ids in the WorkFragments match this
    // txn's Procedure
    if (hstore_conf.site.exec_validate_work && ts.isSysProc() == false) {
        LOG.warn(String.format("%s - Checking whether all of the WorkFragments are valid", ts));
        boolean has_remote = false;
        for (WorkFragment frag : fragments) {
            if (frag.getPartitionId() != this.partitionId) {
                has_remote = true;
            }
            for (int frag_id : frag.getFragmentIdList()) {
                PlanFragment catalog_frag = CatalogUtil.getPlanFragment(database, frag_id);
                Statement catalog_stmt = catalog_frag.getParent();
                assert (catalog_stmt != null);
                Procedure catalog_proc = catalog_stmt.getParent();
                if (catalog_proc.equals(ts.getProcedure()) == false) {
                    LOG.warn(ts.debug() + "\n" + fragments + "\n---- INVALID ----\n" + frag);
                    String msg = String.format("%s - Unexpected %s", ts, catalog_frag.fullName());
                    throw new ServerFaultException(msg, ts.getTransactionId());
                }
            }
        } // FOR
        if (has_remote == false) {
            LOG.warn(ts.debug() + "\n" + fragments);
            String msg = String.format(
                    "%s - Trying to execute all local single-partition queries using the slow-path!", ts);
            throw new ServerFaultException(msg, ts.getTransactionId());
        }
    }

    // We have to store all of the tasks in the TransactionState before we
    // start executing, otherwise
    // there is a race condition that a task with input dependencies will
    // start running as soon as we
    // get one response back from another executor
    ts.initRound(this.partitionId, this.getNextUndoToken());
    ts.setBatchSize(batchSize);

    final boolean prefetch = ts.hasPrefetchQueries();
    final boolean predict_singlePartition = ts.isPredictSinglePartition();

    // Attach the ParameterSets to our transaction handle so that anybody on
    // this HStoreSite
    // can access them directly without needing to deserialize them from the
    // WorkFragments
    ts.attachParameterSets(parameters);

    // Now if we have some work sent out to other partitions, we need to
    // wait until they come back
    // In the first part, we wait until all of our blocked
    // FragmentTaskMessages become unblocked
    LinkedBlockingDeque<Collection<WorkFragment>> queue = ts.getUnblockedWorkFragmentsQueue();

    boolean first = true;
    boolean serializedParams = false;
    CountDownLatch latch = null;
    boolean all_local = true;
    boolean is_localSite;
    boolean is_localPartition;
    int num_localPartition = 0;
    int num_localSite = 0;
    int num_remote = 0;
    int num_skipped = 0;
    int total = 0;

    // Run through this loop if:
    // (1) We have no pending errors
    // (2) This is our first time in the loop (first == true)
    // (3) If we know that there are still messages being blocked
    // (4) If we know that there are still unblocked messages that we need
    // to process
    // (5) The latch for this round is still greater than zero
    while (ts.hasPendingError() == false
            && (first == true || ts.stillHasWorkFragments() || (latch != null && latch.getCount() > 0))) {
        if (t)
            LOG.trace(String.format("%s - [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first,
                    ts.stillHasWorkFragments(), queue.size(), latch));

        // If this is the not first time through the loop, then poll the
        // queue to get our list of fragments
        if (first == false) {
            all_local = true;
            is_localSite = false;
            is_localPartition = false;
            num_localPartition = 0;
            num_localSite = 0;
            num_remote = 0;
            num_skipped = 0;
            total = 0;

            if (t)
                LOG.trace(String.format("%s - Waiting for unblocked tasks on partition %d", ts,
                        this.partitionId));
            if (hstore_conf.site.txn_profiling)
                ts.profiler.startExecDtxnWork();
            try {
                fragments = queue.takeFirst(); // BLOCKING
            } catch (InterruptedException ex) {
                if (this.hstore_site.isShuttingDown() == false) {
                    LOG.error(String.format("%s - We were interrupted while waiting for blocked tasks", ts),
                            ex);
                }
                return (null);
            } finally {
                if (hstore_conf.site.txn_profiling)
                    ts.profiler.stopExecDtxnWork();
            }
        }
        assert (fragments != null);

        // If the list to fragments unblock is empty, then we
        // know that we have dispatched all of the WorkFragments for the
        // transaction's current SQLStmt batch. That means we can just wait
        // until all the results return to us.
        if (fragments.isEmpty()) {
            if (t)
                LOG.trace(ts + " - Got an empty list of WorkFragments. Blocking until dependencies arrive");
            break;
        }

        this.tmp_localWorkFragmentList.clear();
        if (predict_singlePartition == false) {
            this.tmp_remoteFragmentList.clear();
            this.tmp_localSiteFragmentList.clear();
        }

        // -------------------------------
        // FAST PATH: Assume everything is local
        // -------------------------------
        if (predict_singlePartition) {
            for (WorkFragment ftask : fragments) {
                if (first == false || ts.addWorkFragment(ftask) == false) {
                    this.tmp_localWorkFragmentList.add(ftask);
                    total++;
                    num_localPartition++;
                }
            } // FOR

            // We have to tell the TransactinState to start the round before
            // we send off the
            // FragmentTasks for execution, since they might start executing
            // locally!
            if (first) {
                ts.startRound(this.partitionId);
                latch = ts.getDependencyLatch();
            }

            // Execute all of our WorkFragments quickly at our local
            // ExecutionEngine
            for (WorkFragment fragment : this.tmp_localWorkFragmentList) {
                if (d)
                    LOG.debug(String.format("Got unblocked FragmentTaskMessage for %s. Executing locally...",
                            ts));
                assert (fragment.getPartitionId() == this.partitionId) : String.format(
                        "Trying to process FragmentTaskMessage for %s on partition %d but it should have been sent to partition %d [singlePartition=%s]\n%s",
                        ts, this.partitionId, fragment.getPartitionId(), predict_singlePartition, fragment);
                ParameterSet fragmentParams[] = this.getFragmentParameters(ts, fragment, parameters);
                this.processWorkFragment(ts, fragment, fragmentParams);
            } // FOR
        }
        // -------------------------------
        // SLOW PATH: Mixed local and remote messages
        // -------------------------------
        else {
            // Look at each task and figure out whether it needs to be
            // executed at a remote
            // HStoreSite or whether we can execute it at one of our local
            // PartitionExecutors.
            for (WorkFragment fragment : fragments) {
                int partition = fragment.getPartitionId();
                is_localSite = hstore_site.isLocalPartition(partition);
                is_localPartition = (partition == this.partitionId);
                all_local = all_local && is_localPartition;
                if (first == false || ts.addWorkFragment(fragment) == false) {
                    total++;

                    // At this point we know that all the WorkFragment has
                    // been registered
                    // in the LocalTransaction, so then it's safe for us to
                    // look to see
                    // whether we already have a prefetched result that we
                    // need
                    if (prefetch && is_localPartition == false) {
                        boolean skip_queue = true;
                        for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) {
                            int fragId = fragment.getFragmentId(i);
                            int paramIdx = fragment.getParamIndex(i);

                            VoltTable vt = this.queryCache.getTransactionCachedResult(ts.getTransactionId(),
                                    fragId, partition, parameters[paramIdx]);
                            if (vt != null) {
                                ts.addResult(partition, fragment.getOutputDepId(i), vt);
                            } else {
                                skip_queue = false;
                            }
                        } // FOR
                          // If we were able to get cached results for all
                          // of the fragmentIds in
                          // this WorkFragment, then there is no need for
                          // us
                          // to send the message
                          // So we'll just skip queuing it up! How nice!
                        if (skip_queue) {
                            if (d)
                                LOG.debug(String.format(
                                        "%s - Using prefetch result for all fragments from partition %d", ts,
                                        partition));
                            num_skipped++;
                            continue;
                        }
                    }

                    // Otherwise add it to our list of WorkFragments that we
                    // want
                    // queue up right now
                    if (is_localPartition) {
                        this.tmp_localWorkFragmentList.add(fragment);
                        num_localPartition++;
                    } else if (is_localSite) {
                        this.tmp_localSiteFragmentList.add(fragment);
                        num_localSite++;
                    } else {
                        this.tmp_remoteFragmentList.add(fragment);
                        num_remote++;
                    }
                }
            } // FOR
            assert (total == (num_remote + num_localSite + num_localPartition + num_skipped)) : String.format(
                    "Total:%d / Remote:%d / LocalSite:%d / LocalPartition:%d / Skipped:%d", total, num_remote,
                    num_localSite, num_localPartition, num_skipped);
            if (num_localPartition == 0 && num_localSite == 0 && num_remote == 0 && num_skipped == 0) {
                String msg = String.format("Deadlock! All tasks for %s are blocked waiting on input!", ts);
                throw new ServerFaultException(msg, ts.getTransactionId());
            }

            // We have to tell the TransactinState to start the round before
            // we send off the
            // FragmentTasks for execution, since they might start executing
            // locally!
            if (first) {
                ts.startRound(this.partitionId);
                latch = ts.getDependencyLatch();
            }

            // Now request the fragments that aren't local
            // We want to push these out as soon as possible
            if (num_remote > 0) {
                // We only need to serialize the ParameterSets once
                if (serializedParams == false) {
                    if (hstore_conf.site.txn_profiling)
                        ts.profiler.startSerialization();
                    tmp_serializedParams.clear();
                    for (int i = 0; i < parameters.length; i++) {
                        if (parameters[i] == null) {
                            tmp_serializedParams.add(ByteString.EMPTY);
                        } else {
                            this.fs.clear();
                            try {
                                parameters[i].writeExternal(this.fs);
                                ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b);
                                tmp_serializedParams.add(bs);
                            } catch (Exception ex) {
                                throw new ServerFaultException(
                                        "Failed to serialize ParameterSet " + i + " for " + ts, ex);
                            }
                        }
                    } // FOR
                    if (hstore_conf.site.txn_profiling)
                        ts.profiler.stopSerialization();
                }
                if (d)
                    LOG.debug(String.format(
                            "%s - Requesting %d FragmentTaskMessages to be executed on remote partitions", ts,
                            num_remote));
                this.requestWork(ts, tmp_remoteFragmentList, tmp_serializedParams);
            }

            // Then dispatch the task that are needed at the same HStoreSite
            // but
            // at a different partition than this one
            if (num_localSite > 0) {
                if (d)
                    LOG.debug(String.format("%s - Executing %d FragmentTaskMessages on local site's partitions",
                            ts, num_localSite));
                for (WorkFragment fragment : this.tmp_localSiteFragmentList) {
                    FragmentTaskMessage ftask = ts.getFragmentTaskMessage(fragment);
                    hstore_site.getPartitionExecutor(fragment.getPartitionId()).queueWork(ts, ftask);
                } // FOR
            }

            // Then execute all of the tasks need to access the partitions
            // at this HStoreSite
            // We'll dispatch the remote-partition-local-site fragments
            // first because they're going
            // to need to get queued up by at the other PartitionExecutors
            if (num_localPartition > 0) {
                if (d)
                    LOG.debug(String.format("%s - Executing %d FragmentTaskMessages on local partition", ts,
                            num_localPartition));
                for (WorkFragment fragment : this.tmp_localWorkFragmentList) {
                    ParameterSet fragmentParams[] = this.getFragmentParameters(ts, fragment, parameters);
                    this.processWorkFragment(ts, fragment, fragmentParams);
                } // FOR
            }
        }
        if (t)
            LOG.trace(String.format(
                    "%s - Dispatched %d WorkFragments [remoteSite=%d, localSite=%d, localPartition=%d]", ts,
                    total, num_remote, num_localSite, num_localPartition));
        first = false;
    } // WHILE
    this.fs.getBBContainer().discard();

    if (t)
        LOG.trace(String.format("%s - BREAK OUT [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first,
                ts.stillHasWorkFragments(), latch));
    // assert(ts.stillHasWorkFragments() == false) :
    // String.format("Trying to block %s before all of its WorkFragments have been dispatched!\n%s\n%s",
    // ts,
    // StringUtil.join("** ", "\n", tempDebug),
    // this.getVoltProcedure(ts.getProcedureName()).getLastBatchPlan());

    // Now that we know all of our FragmentTaskMessages have been
    // dispatched, we can then
    // wait for all of the results to come back in.
    if (latch == null)
        latch = ts.getDependencyLatch();
    if (latch.getCount() > 0) {
        if (d) {
            LOG.debug(String.format("%s - All blocked messages dispatched. Waiting for %d dependencies", ts,
                    latch.getCount()));
            if (t)
                LOG.trace(ts.toString());
        }
        if (hstore_conf.site.txn_profiling)
            ts.profiler.startExecDtxnWork();
        boolean done = false;
        // XXX this.utilityWork(latch);
        try {
            done = latch.await(hstore_conf.site.exec_response_timeout, TimeUnit.MILLISECONDS);
        } catch (InterruptedException ex) {
            if (this.hstore_site.isShuttingDown() == false) {
                LOG.error(String.format("%s - We were interrupted while waiting for results", ts), ex);
            }
            done = true;
        } catch (Throwable ex) {
            new ServerFaultException(String.format("Fatal error for %s while waiting for results", ts), ex);
        } finally {
            if (hstore_conf.site.txn_profiling)
                ts.profiler.stopExecDtxnWork();
        }
        if (done == false && this.isShuttingDown() == false) {
            LOG.warn(String.format("Still waiting for responses for %s after %d ms [latch=%d]\n%s", ts,
                    hstore_conf.site.exec_response_timeout, latch.getCount(), ts.debug()));
            LOG.warn("Procedure Parameters:\n" + ts.getInvocation().getParams());
            hstore_conf.site.exec_profiling = true;
            LOG.warn(hstore_site.statusSnapshot());

            String msg = "PartitionResponses for " + ts + " never arrived!";
            throw new ServerFaultException(msg, ts.getTransactionId());
        }
    }

    // IMPORTANT: Check whether the fragments failed somewhere and we got a
    // response with an error
    // We will rethrow this so that it pops the stack all the way back to
    // VoltProcedure.call()
    // where we can generate a message to the client
    if (ts.hasPendingError()) {
        if (d)
            LOG.warn(
                    String.format("%s was hit with a %s", ts, ts.getPendingError().getClass().getSimpleName()));
        throw ts.getPendingError();
    }

    // IMPORTANT: Don't try to check whether we got back the right number of
    // tables because the batch
    // may have hit an error and we didn't execute all of them.
    VoltTable results[] = ts.getResults();
    ts.finishRound(this.partitionId);
    if (d) {
        if (t)
            LOG.trace(ts + " is now running and looking for love in all the wrong places...");
        LOG.debug(ts + " is returning back " + results.length + " tables to VoltProcedure");
    }
    return (results);
}

From source file:net.tourbook.photo.internal.manager.PhotoImageLoader.java

/**
 * Image could not be loaded with {@link #loadImage()}, try to load high quality image.
 * //  ww w .j  ava 2 s . c  om
 * @param thumbImageWaitingQueue
 *            waiting queue for small images
 * @param exifWaitingQueue
 */
public void loadImageHQ(final LinkedBlockingDeque<PhotoImageLoader> thumbImageWaitingQueue,
        final LinkedBlockingDeque<PhotoExifLoader> exifWaitingQueue) {

    //      if (isImageVisible() == false) {
    //         setStateUndefined();
    //         return;
    //      }

    /*
     * wait until exif data and small images are loaded
     */
    try {
        while (thumbImageWaitingQueue.size() > 0 || exifWaitingQueue.size() > 0) {
            Thread.sleep(PhotoLoadManager.DELAY_TO_CHECK_WAITING_QUEUE);
        }
    } catch (final InterruptedException e) {
        // should not happen, I hope so
    }

    boolean isLoadingError = false;
    Image hqImage = null;

    try {

        /**
         * sometimes (when images are loaded concurrently) larger images could not be loaded
         * with SWT methods in Win7 (Eclipse 3.8 M6), try to load image with AWT. This bug fix
         * <code>https://bugs.eclipse.org/bugs/show_bug.cgi?id=350783</code> has not solved this
         * problem
         */

        // load original image and create thumbs

        if (_imageFramework.equals(PhotoLoadManager.IMAGE_FRAMEWORK_SWT)
                // use SWT when image format is not supported by AWT which is the case for tiff images
                || isAWTImageSupported() == false) {

            hqImage = loadImageHQ_10_WithSWT();
        } else {
            hqImage = loadImageHQ_20_WithAWT();
        }

    } catch (final Exception e) {

        setStateLoadingError();

        isLoadingError = true;

    } finally {

        disposeTrackedImages();

        if (hqImage == null) {

            System.out.println(NLS.bind(//
                    UI.timeStampNano() + " image == NULL when loading with {0}: \"{1}\"", //$NON-NLS-1$
                    _imageFramework.toUpperCase(), _photo.imageFilePathName));

            if (_imageFramework.equals(PhotoLoadManager.IMAGE_FRAMEWORK_AWT)) {

                /*
                 * AWT fails, try to load image with SWT
                 */

                try {

                    hqImage = loadImageHQ_10_WithSWT();

                } catch (final Exception e2) {

                    setStateLoadingError();

                    isLoadingError = true;

                } finally {

                    if (hqImage == null) {
                        System.out.println(NLS.bind(//
                                UI.timeStampNano() + " image == NULL when loading with SWT: \"{0}\"", //$NON-NLS-1$
                                _photo.imageFilePathName));
                    }
                }
            }
        }

        // update image state
        final boolean isImageLoaded = hqImage != null;
        if (isImageLoaded) {

            setStateUndefined();

        } else {

            setStateLoadingError();

            isLoadingError = true;
        }

        // display image in the loading callback
        _loadCallBack.callBackImageIsLoaded(isImageLoaded || isLoadingError);
    }
}

From source file:net.tourbook.photo.internal.manager.PhotoImageLoader.java

/**
 * This is called from the executor when the loading task is starting. It loads an image and
 * puts it into the image cache from where it is fetched when painted.
 * /*from w  w  w.  j  a  v  a  2 s . c o m*/
 * <pre>
 * 
 * 2 Threads
 * =========
 * 
 * SWT
 * Photo-Image-Loader-1   IMG_1219_10.JPG   load:   1165   resize:   645   save:   110   total:   1920
 * Photo-Image-Loader-0   IMG_1219_9.JPG   load:   1165   resize:   650   save:   110   total:   1925
 * Photo-Image-Loader-1   IMG_1219.JPG   load:   566      resize:   875   save:   60   total:   1501
 * Photo-Image-Loader-0   IMG_1219_2.JPG   load:   835      resize:   326   save:   55   total:   1216
 * Photo-Image-Loader-1   IMG_1219_3.JPG   load:   1150   resize:   625   save:   55   total:   1830
 * Photo-Image-Loader-0   IMG_1219_4.JPG   load:   565      resize:   630   save:   60   total:   1255
 * Photo-Image-Loader-1   IMG_1219_5.JPG   load:   566      resize:   880   save:   60   total:   1506
 * Photo-Image-Loader-0   IMG_1219_6.JPG   load:   845      resize:   341   save:   65   total:   1251
 * Photo-Image-Loader-1   IMG_1219_7.JPG   load:   575      resize:   875   save:   50   total:   1500
 * Photo-Image-Loader-0   IMG_1219_8.JPG   load:   845      resize:   356   save:   45   total:   1246
 *                                     8277         6203      670         15150
 * 
 * 
 * AWT
 * Photo-Image-Loader-1   IMG_1219_9.JPG   load:   1005   resize:   770      save AWT:   25   load SWT:   10   total:   1810
 * Photo-Image-Loader-0   IMG_1219_10.JPG   load:   1015   resize:   1311   save AWT:   145   load SWT:   5   total:   2476
 * Photo-Image-Loader-1   IMG_1219.JPG   load:   931      resize:   755      save AWT:   65   load SWT:   5   total:   1756
 * Photo-Image-Loader-0   IMG_1219_2.JPG   load:   960      resize:   737      save AWT:   30   load SWT:   5   total:   1732
 * Photo-Image-Loader-1   IMG_1219_3.JPG   load:   1340   resize:   700      save AWT:   25   load SWT:   10   total:   2075
 * Photo-Image-Loader-0   IMG_1219_4.JPG   load:   935      resize:   751      save AWT:   25   load SWT:   10   total:   1721
 * Photo-Image-Loader-1   IMG_1219_5.JPG   load:   981      resize:   810      save AWT:   25   load SWT:   5   total:   1821
 * Photo-Image-Loader-0   IMG_1219_6.JPG   load:   970      resize:   821      save AWT:   30   load SWT:   5   total:   1826
 * Photo-Image-Loader-1   IMG_1219_7.JPG   load:   950      resize:   710      save AWT:   25   load SWT:   5   total:   1690
 * Photo-Image-Loader-0   IMG_1219_8.JPG   load:   950      resize:   706      save AWT:   30   load SWT:   5   total:   1691
 *                                     10037         8071            425            65         18598
 * 
 * 1 Thread
 * ========
 * 
 * SWT
 * Photo-Image-Loader-0   IMG_1219_10.JPG   load:   595   resize:   330   save:   70   total:   995
 * Photo-Image-Loader-0   IMG_1219.JPG   load:   561   resize:   325   save:   80   total:   966
 * Photo-Image-Loader-0   IMG_1219_2.JPG   load:   560   resize:   330   save:   50   total:   940
 * Photo-Image-Loader-0   IMG_1219_3.JPG   load:   561   resize:   325   save:   45   total:   931
 * Photo-Image-Loader-0   IMG_1219_4.JPG   load:   570   resize:   325   save:   50   total:   945
 * Photo-Image-Loader-0   IMG_1219_5.JPG   load:   570   resize:   340   save:   50   total:   960
 * Photo-Image-Loader-0   IMG_1219_6.JPG   load:   575   resize:   330   save:   45   total:   950
 * Photo-Image-Loader-0   IMG_1219_7.JPG   load:   560   resize:   335   save:   50   total:   945
 * Photo-Image-Loader-0   IMG_1219_8.JPG   load:   565   resize:   330   save:   45   total:   940
 * Photo-Image-Loader-0   IMG_1219_9.JPG   load:   565   resize:   330   save:   45   total:   940
 *                                     5682      3300      530         9512
 * 
 * AWT
 * Photo-Image-Loader-0   IMG_1219.JPG   load:   1115   resize:   790   save AWT:   45   load SWT:   5   total:   1955
 * Photo-Image-Loader-0   IMG_1219_2.JPG   load:   1070   resize:   695   save AWT:   30   load SWT:   5   total:   1800
 * Photo-Image-Loader-0   IMG_1219_3.JPG   load:   1035   resize:   695   save AWT:   25   load SWT:   5   total:   1760
 * Photo-Image-Loader-0   IMG_1219_4.JPG   load:   1040   resize:   695   save AWT:   25   load SWT:   5   total:   1765
 * Photo-Image-Loader-0   IMG_1219_5.JPG   load:   1040   resize:   695   save AWT:   25   load SWT:   110   total:   1870
 * Photo-Image-Loader-0   IMG_1219_6.JPG   load:   1050   resize:   690   save AWT:   25   load SWT:   5   total:   1770
 * Photo-Image-Loader-0   IMG_1219_7.JPG   load:   1035   resize:   690   save AWT:   145   load SWT:   5   total:   1875
 * Photo-Image-Loader-0   IMG_1219_8.JPG   load:   1032   resize:   700   save AWT:   20   load SWT:   10   total:   1762
 * Photo-Image-Loader-0   IMG_1219_9.JPG   load:   1030   resize:   700   save AWT:   25   load SWT:   5   total:   1760
 * Photo-Image-Loader-0   IMG_1219_10.JPG   load:   1032   resize:   700   save AWT:   25   load SWT:   5   total:   1762
 *                                     10479         7050         390            160         18079
 * 
 * </pre>
 * 
 * @param waitingqueueoriginal
 * @param waitingqueueexif
 * @return Returns <code>true</code> when image should be loaded in HQ.
 */
public boolean loadImageThumb(final LinkedBlockingDeque<PhotoImageLoader> waitingQueueOriginal) {

    /*
     * wait until original images are loaded
     */
    try {
        while (waitingQueueOriginal.size() > 0) {
            Thread.sleep(PhotoLoadManager.DELAY_TO_CHECK_WAITING_QUEUE);
        }
    } catch (final InterruptedException e) {
        // should not happen, I hope so
    }

    boolean isLoadedImageInRequestedQuality = false;
    Image loadedExifImage = null;
    String imageKey = null;
    boolean isLoadingError = false;

    boolean isHQRequired = false;

    try {

        // 1. get image with the requested quality from the image store
        final Image storeImage = loadImageFromStore(_requestedImageQuality);
        if (storeImage != null) {

            isLoadedImageInRequestedQuality = true;

            imageKey = _requestedImageKey;
            loadedExifImage = storeImage;

        } else {

            // 2. get image from thumbnail image in the EXIF data

            //  debug (delay) image loading
            //            Thread.sleep(500);

            final IPath storeThumbImageFilePath = ThumbnailStore.getStoreImagePath(_photo, ImageQuality.THUMB);

            final Image exifThumbnail = loadImageFromEXIFThumbnail(storeThumbImageFilePath);
            if (exifThumbnail != null) {

                // EXIF image is available

                isLoadedImageInRequestedQuality = _requestedImageQuality == ImageQuality.THUMB;

                imageKey = _photo.getImageKey(ImageQuality.THUMB);
                loadedExifImage = exifThumbnail;
            }
        }

    } catch (final Exception e) {

        setStateLoadingError();

        isLoadingError = true;

    } finally {

        disposeTrackedImages();

        final boolean isImageLoaded = loadedExifImage != null;

        /*
         * keep image in cache
         */
        if (isImageLoaded) {

            final String originalImagePathName = _photo.imageFilePathName;

            // ensure metadata are loaded
            _photo.getImageMetaData();

            int imageWidth = _photo.getPhotoImageWidth();
            int imageHeight = _photo.getPhotoImageHeight();

            // check if width is set
            if (imageWidth == Integer.MIN_VALUE) {

                // photo image width/height is not set from metadata, set it from the image

                final Rectangle imageBounds = loadedExifImage.getBounds();
                imageWidth = imageBounds.width;
                imageHeight = imageBounds.height;

                // update dimension
                updateImageSize(imageWidth, imageHeight, false);
            }

            PhotoImageCache.putImage(imageKey, loadedExifImage, originalImagePathName);
        }

        /*
         * update loading state
         */
        if (isLoadedImageInRequestedQuality) {

            // image is loaded with requested quality, reset image state

            setStateUndefined();

        } else {

            // load image with higher quality

            isHQRequired = true;
        }

        // show in the UI, that meta data are loaded, loading message is displayed with another color
        final boolean isUpdateUI = _photo.getImageMetaDataRaw() != null;

        // display image in the loading callback
        _loadCallBack.callBackImageIsLoaded(isUpdateUI || isImageLoaded || isLoadingError);
    }

    return isHQRequired;
}