Example usage for java.lang Thread interrupted

List of usage examples for java.lang Thread interrupted

Introduction

In this page you can find the example usage for java.lang Thread interrupted.

Prototype

public static boolean interrupted() 

Source Link

Document

Tests whether the current thread has been interrupted.

Usage

From source file:com.parse.ParseCommandCache.java

/**
 * The main function of the run loop thread. This function basically loops forever (unless pause
 * is called). On each iteration, if it hasn't been told to stop, it calls maybeRunAllCommandsNow
 * to try to execute everything queued up on disk. Then it waits until it gets signaled again by
 * lock.notify(). Usually that happens as a result of either (1) Parse being initialized, (2)
 * runEventually being called, or (3) the OS notifying that the network connection has been
 * re-established./*from   w  w w .j a  va  2 s .  c o  m*/
 */
private void runLoop() {
    if (Parse.LOG_LEVEL_INFO >= Parse.getLogLevel()) {
        log.info("Parse command cache has started processing queued commands.");
    }
    // Make sure we marked as running.
    synchronized (runningLock) {
        if (running) {
            // Don't run this thread more than once.
            return;
        } else {
            running = true;
            runningLock.notifyAll();
        }
    }

    boolean shouldRun;
    synchronized (lock) {
        shouldRun = !(shouldStop || Thread.interrupted());
    }
    while (shouldRun) {
        synchronized (lock) {
            try {
                maybeRunAllCommandsNow(timeoutMaxRetries);
                if (!shouldStop) {
                    try {
                        /*
                         * If an unprocessed command was added, avoid waiting because we want
                         * maybeRunAllCommandsNow to run at least once to potentially process that command.
                         */
                        if (!unprocessedCommandsExist) {
                            lock.wait();
                        }
                    } catch (InterruptedException e) {
                        shouldStop = true;
                    }
                }
            } catch (Exception e) {
                if (Parse.LOG_LEVEL_ERROR >= Parse.getLogLevel()) {
                    log.log(Level.SEVERE, "saveEventually thread had an error.", e);
                }
            } finally {
                shouldRun = !shouldStop;
            }
        }
    }

    synchronized (runningLock) {
        running = false;
        runningLock.notifyAll();
    }
    if (Parse.LOG_LEVEL_INFO >= Parse.getLogLevel()) {
        log.info("saveEventually thread has stopped processing commands.");
    }
}

From source file:org.apache.solr.util.SolrCLI.java

/**
 * Utility function for sending HTTP GET request to Solr with built-in retry support.
 *//*  w  w w. j a va2 s .  c om*/
public static Map<String, Object> getJson(HttpClient httpClient, String getUrl, int attempts,
        boolean isFirstAttempt) throws Exception {
    Map<String, Object> json = null;
    if (attempts >= 1) {
        try {
            json = getJson(httpClient, getUrl);
        } catch (Exception exc) {
            if (exceptionIsAuthRelated(exc)) {
                throw exc;
            }
            if (--attempts > 0 && checkCommunicationError(exc)) {
                if (!isFirstAttempt) // only show the log warning after the second attempt fails
                    log.warn("Request to " + getUrl + " failed due to: " + exc.getMessage()
                            + ", sleeping for 5 seconds before re-trying the request ...");
                try {
                    Thread.sleep(5000);
                } catch (InterruptedException ie) {
                    Thread.interrupted();
                }

                // retry using recursion with one-less attempt available
                json = getJson(httpClient, getUrl, attempts, false);
            } else {
                // no more attempts or error is not retry-able
                throw exc;
            }
        }
    }

    return json;
}

From source file:com.sxit.crawler.utils.ArchiveUtils.java

/**
 * Perform checks as to whether normal execution should proceed.
 * /*ww w .  j a va 2s.  c  o  m*/
 * If an external interrupt is detected, throw an interrupted exception.
 * Used before anything that should not be attempted by a 'zombie' thread
 * that the Frontier/Crawl has given up on.
 * 
 * @throws InterruptedException
 */
public static void continueCheck() throws InterruptedException {
    if (Thread.interrupted()) {
        throw new InterruptedException("interrupt detected");
    }
}

From source file:fr.gael.dhus.sync.impl.ODataProductSynchronizer.java

/**
 * Retrieve new/updated products./*from   w  w w . j ava 2s.c  o  m*/
 * @return how many products have been retrieved.
 */
private int getNewProducts() throws InterruptedException {
    int res = 0;
    try {
        // Makes the query parameters
        Map<String, String> query_param = new HashMap<>();

        String lup_s = EdmSimpleTypeKind.DateTime.getEdmSimpleTypeInstance().valueToString(lastCreated,
                EdmLiteralKind.URI, null);
        // 'GreaterEqual' because of products with the same IngestionDate
        String filter = "IngestionDate ge " + lup_s;

        // Appends custom $filter parameter
        if (filterParam != null) {
            filter += " and (" + filterParam + ")";
        }

        query_param.put("$filter", filter);

        query_param.put("$top", String.valueOf(pageSize));

        query_param.put("$orderby", "IngestionDate");

        // Executes the query
        long delta = System.currentTimeMillis();
        ODataFeed pdf = client.readFeed(sourceCollection + "/Products", query_param);
        logODataPerf("Products", System.currentTimeMillis() - delta);

        // For each entry, creates a DataBase Object
        for (ODataEntry pdt : pdf.getEntries()) {
            Map<String, Object> props = pdt.getProperties();

            // Checks if a product with the same UUID already exist
            // (`UUID` and `PATH` have unique constraint), PATH references the UUID
            String uuid = (String) props.get("Id");
            if (PRODUCT_SERVICE.systemGetProduct(uuid) != null) {
                // FIXME: might not be the same product
                this.lastCreated = (((GregorianCalendar) props.get("IngestionDate")).getTime());
                this.dateChanged = true;
                continue;
            }

            // Makes the product resource path
            String pdt_p = "/Products('" + uuid + "')";

            Product product = new Product();
            product.setUuid(uuid);

            // Reads the properties
            product.setIdentifier((String) props.get("Name"));
            product.setIngestionDate(((GregorianCalendar) props.get("IngestionDate")).getTime());
            product.setCreated(((GregorianCalendar) props.get("CreationDate")).getTime());
            product.setFootPrint((String) props.get("ContentGeometry"));
            product.setProcessed(Boolean.TRUE);
            product.setSize((Long) props.get("ContentLength"));

            // Reads the ContentDate complex type
            Map contentDate = (Map) props.get("ContentDate");
            product.setContentStart(((GregorianCalendar) contentDate.get("Start")).getTime());
            product.setContentEnd(((GregorianCalendar) contentDate.get("End")).getTime());

            // Sets the origin to the remote URI
            product.setOrigin(client.getServiceRoot() + pdt_p + "/$value");
            product.setPath(new URL(pdt.getMetadata().getId() + "/$value"));

            // Sets the download path to LocalPath (if LocalPaths are exposed)
            if (this.remoteIncoming != null && !this.copyProduct) {
                String path = (String) props.get("LocalPath");
                if (path != null && !path.isEmpty()) {
                    Map<String, String> checksum = (Map) props.get("Checksum");

                    Product.Download d = new Product.Download();
                    d.setPath(Paths.get(this.remoteIncoming, path).toString());
                    d.setSize(product.getSize());
                    d.setType((String) props.get("ContentType"));
                    d.setChecksums(Collections.singletonMap(checksum.get(V1Model.ALGORITHM),
                            checksum.get(V1Model.VALUE)));
                    product.setDownload(d);

                    File f = new File(d.getPath());
                    if (!f.exists()) {
                        // The incoming path is probably false
                        // Throws an exception to notify the admin about this issue
                        throw new RuntimeException("ODataSynchronizer: Local file '" + path
                                + "' not found in remote incoming '" + this.remoteIncoming + '\'');
                    }
                    product.setPath(new URL("file://" + d.getPath()));
                } else {
                    throw new RuntimeException("RemoteIncoming is set"
                            + " but the LocalPath property is missing in remote products");
                }
            }

            // Retrieves the Product Class
            delta = System.currentTimeMillis();
            ODataEntry pdt_class_e = client.readEntry(pdt_p + "/Class", null);
            logODataPerf(pdt_p + "/Class", System.currentTimeMillis() - delta);

            Map<String, Object> pdt_class_pm = pdt_class_e.getProperties();
            String pdt_class = (String) pdt_class_pm.get("Uri");
            product.setItemClass(pdt_class);

            // Retrieves Metadata Indexes (aka Attributes on odata)
            delta = System.currentTimeMillis();
            ODataFeed mif = client.readFeed(pdt_p + "/Attributes", null);
            logODataPerf(pdt_p + "/Attributes", System.currentTimeMillis() - delta);

            List<MetadataIndex> mi_l = new ArrayList<>(mif.getEntries().size());
            for (ODataEntry mie : mif.getEntries()) {
                props = mie.getProperties();
                MetadataIndex mi = new MetadataIndex();
                String mi_name = (String) props.get("Name");
                mi.setName(mi_name);
                mi.setType((String) props.get("ContentType"));
                mi.setValue((String) props.get("Value"));
                MetadataType mt = METADATA_TYPE_SERVICE.getMetadataTypeByName(pdt_class, mi_name);
                if (mt != null) {
                    mi.setCategory(mt.getCategory());
                    if (mt.getSolrField() != null) {
                        mi.setQueryable(mt.getSolrField().getName());
                    }
                } else if (mi_name.equals("Identifier")) {
                    mi.setCategory("");
                    mi.setQueryable("identifier");
                } else if (mi_name.equals("Ingestion Date")) {
                    mi.setCategory("product");
                    mi.setQueryable("ingestionDate");
                } else {
                    mi.setCategory("");
                }
                mi_l.add(mi);
            }
            product.setIndexes(mi_l);

            // Retrieves subProducts
            delta = System.currentTimeMillis();
            ODataFeed subp = client.readFeed(pdt_p + "/Products", null);
            logODataPerf(pdt_p + "/Products", System.currentTimeMillis() - delta);

            for (ODataEntry subpe : subp.getEntries()) {
                String id = (String) subpe.getProperties().get("Id");
                Long content_len = (Long) subpe.getProperties().get("ContentLength");

                String path = (String) subpe.getProperties().get("LocalPath");
                if (this.remoteIncoming != null && !this.copyProduct && path != null && !path.isEmpty()) {
                    path = Paths.get(this.remoteIncoming, path).toString();
                } else {
                    path = client.getServiceRoot() + pdt_p + "/Products('" + subpe.getProperties().get("Id")
                            + "')/$value";
                }

                // Retrieves the Quicklook
                if (id.equals("Quicklook")) {
                    product.setQuicklookSize(content_len);
                    product.setQuicklookPath(path);
                }

                // Retrieves the Thumbnail
                else if (id.equals("Thumbnail")) {
                    product.setThumbnailSize(content_len);
                    product.setThumbnailPath(path);
                }
            }

            // `processed` must be set to TRUE
            product.setProcessed(Boolean.TRUE);

            // Downloads the product if required
            if (this.copyProduct) {
                downloadProduct(product);
            }

            // Stores `product` in the database
            product = PRODUCT_SERVICE.addProduct(product);
            product.setIndexes(mi_l); // DELME lazy loading not working atm ...

            // Sets the target collection both in the DB and Solr
            if (this.targetCollection != null) {
                try {
                    COLLECTION_SERVICE.systemAddProduct(this.targetCollection, product.getId(), false);
                } catch (HibernateException e) {
                    LOGGER.error("Synchronizer#" + getId() + " Failed to set collection#"
                            + this.targetCollection + " for product " + product.getIdentifier(), e);
                    // Reverting ...
                    PRODUCT_SERVICE.systemDeleteProduct(product.getId());
                    throw e;
                } catch (Exception e) {
                    LOGGER.error("Synchronizer#" + getId() + " Failed to update product "
                            + product.getIdentifier() + " in Solr's index", e);
                }
            }

            // Stores `product` in the index
            try {
                delta = System.currentTimeMillis();
                SEARCH_SERVICE.index(product);
                LOGGER.debug("Synchronizer#" + getId() + " indexed product " + product.getIdentifier() + " in "
                        + (System.currentTimeMillis() - delta) + "ms");
            } catch (Exception e) {
                // Solr errors are not considered fatal
                LOGGER.error("Synchronizer#" + getId() + " Failed to index product " + product.getIdentifier()
                        + " in Solr's index", e);
            }

            this.lastCreated = product.getIngestionDate();
            this.dateChanged = true;

            LOGGER.info("Synchronizer#" + getId() + " Product " + product.getIdentifier() + " ("
                    + product.getSize() + " bytes compressed) " + "successfully synchronized from "
                    + this.client.getServiceRoot());

            res++;

            // Checks if we have to abandon the current pass
            if (Thread.interrupted()) {
                throw new InterruptedException();
            }
        }
    } catch (IOException | ODataException ex) {
        LOGGER.error("OData failure", ex);
    } finally {
        // Save the ingestionDate of the last created Product
        this.syncConf.setConfig("last_created", String.valueOf(this.lastCreated.getTime()));
    }

    return res;
}

From source file:org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor.java

protected void handleIterator(final Context context, final Iterator itty, final Graph graph)
        throws TimeoutException, InterruptedException {
    final ChannelHandlerContext ctx = context.getChannelHandlerContext();
    final RequestMessage msg = context.getRequestMessage();
    final Settings settings = context.getSettings();
    final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get();
    final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get();
    boolean warnOnce = false;

    // we have an empty iterator - happens on stuff like: g.V().iterate()
    if (!itty.hasNext()) {
        // as there is nothing left to iterate if we are transaction managed then we should execute a
        // commit here before we send back a NO_CONTENT which implies success
        onTraversalSuccess(graph, context);
        ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create());
        return;//from   w ww.j a v a2  s  . c  om
    }

    // timer for the total serialization time
    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    // the batch size can be overridden by the request
    final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE)
            .orElse(settings.resultIterationBatchSize);
    List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);

    // use an external control to manage the loop as opposed to just checking hasNext() in the while.  this
    // prevent situations where auto transactions create a new transaction after calls to commit() withing
    // the loop on calls to hasNext().
    boolean hasMore = itty.hasNext();

    while (hasMore) {
        if (Thread.interrupted())
            throw new InterruptedException();

        // check if an implementation needs to force flush the aggregated results before the iteration batch
        // size is reached.
        final boolean forceFlush = isForceFlushed(ctx, msg, itty);

        // have to check the aggregate size because it is possible that the channel is not writeable (below)
        // so iterating next() if the message is not written and flushed would bump the aggregate size beyond
        // the expected resultIterationBatchSize.  Total serialization time for the response remains in
        // effect so if the client is "slow" it may simply timeout.
        //
        // there is a need to check hasNext() on the iterator because if the channel is not writeable the
        // previous pass through the while loop will have next()'d the iterator and if it is "done" then a
        // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't
        // require a forced flush which can be forced by sub-classes.
        //
        // this could be placed inside the isWriteable() portion of the if-then below but it seems better to
        // allow iteration to continue into a batch if that is possible rather than just doing nothing at all
        // while waiting for the client to catch up
        if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush)
            aggregate.add(itty.next());

        // send back a page of results if batch size is met or if it's the end of the results being iterated.
        // also check writeability of the channel to prevent OOME for slow clients.
        if (ctx.channel().isWritable()) {
            if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                        : ResponseStatusCode.SUCCESS;

                // serialize here because in sessionless requests the serialization must occur in the same
                // thread as the eval.  as eval occurs in the GremlinExecutor there's no way to get back to the
                // thread that processed the eval of the script so, we have to push serialization down into that
                Frame frame = null;
                try {
                    frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code,
                            generateMetaData(ctx, msg, code, itty));
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();

                    // exception is handled in makeFrame() - serialization error gets written back to driver
                    // at that point
                    onError(graph, context);
                    break;
                }

                try {
                    // only need to reset the aggregation list if there's more stuff to write
                    if (itty.hasNext())
                        aggregate = new ArrayList<>(resultIterationBatchSize);
                    else {
                        // iteration and serialization are both complete which means this finished successfully. note that
                        // errors internal to script eval or timeout will rollback given GremlinServer's global configurations.
                        // local errors will get rolledback below because the exceptions aren't thrown in those cases to be
                        // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if
                        // there are no more items to iterate and serialization is complete
                        onTraversalSuccess(graph, context);

                        // exit the result iteration loop as there are no more results left.  using this external control
                        // because of the above commit.  some graphs may open a new transaction on the call to
                        // hasNext()
                        hasMore = false;
                    }
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();
                    throw ex;
                }

                if (!itty.hasNext())
                    iterateComplete(ctx, msg, itty);

                // the flush is called after the commit has potentially occurred.  in this way, if a commit was
                // required then it will be 100% complete before the client receives it. the "frame" at this point
                // should have completely detached objects from the transaction (i.e. serialization has occurred)
                // so a new one should not be opened on the flush down the netty pipeline
                ctx.writeAndFlush(frame);
            }
        } else {
            // don't keep triggering this warning over and over again for the same request
            if (!warnOnce) {
                logger.warn(
                        "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up",
                        msg);
                warnOnce = true;
            }

            // since the client is lagging we can hold here for a period of time for the client to catch up.
            // this isn't blocking the IO thread - just a worker.
            TimeUnit.MILLISECONDS.sleep(10);
        }

        stopWatch.split();
        if (settings.serializedResponseTimeout > 0
                && stopWatch.getSplitTime() > settings.serializedResponseTimeout) {
            final String timeoutMsg = String.format(
                    "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s",
                    warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]"
                            : "");
            throw new TimeoutException(timeoutMsg.trim());
        }

        stopWatch.unsplit();
    }

    stopWatch.stop();
}

From source file:io.requery.android.database.sqlite.SQLiteConnectionPool.java

private SQLiteConnection waitForConnection(String sql, int connectionFlags,
        CancellationSignal cancellationSignal) {
    final boolean wantPrimaryConnection = (connectionFlags & CONNECTION_FLAG_PRIMARY_CONNECTION_AFFINITY) != 0;

    final ConnectionWaiter waiter;
    final int nonce;
    synchronized (mLock) {
        throwIfClosedLocked();/*from   www . ja v  a2  s  . c om*/

        // Abort if canceled.
        if (cancellationSignal != null) {
            cancellationSignal.throwIfCanceled();
        }

        // Try to acquire a connection.
        SQLiteConnection connection = null;
        if (!wantPrimaryConnection) {
            connection = tryAcquireNonPrimaryConnectionLocked(sql, connectionFlags); // might throw
        }
        if (connection == null) {
            connection = tryAcquirePrimaryConnectionLocked(connectionFlags); // might throw
        }
        if (connection != null) {
            return connection;
        }

        // No connections available.  Enqueue a waiter in priority order.
        final int priority = getPriority(connectionFlags);
        final long startTime = SystemClock.uptimeMillis();
        waiter = obtainConnectionWaiterLocked(Thread.currentThread(), startTime, priority,
                wantPrimaryConnection, sql, connectionFlags);
        ConnectionWaiter predecessor = null;
        ConnectionWaiter successor = mConnectionWaiterQueue;
        while (successor != null) {
            if (priority > successor.mPriority) {
                waiter.mNext = successor;
                break;
            }
            predecessor = successor;
            successor = successor.mNext;
        }
        if (predecessor != null) {
            predecessor.mNext = waiter;
        } else {
            mConnectionWaiterQueue = waiter;
        }

        nonce = waiter.mNonce;
    }

    // Set up the cancellation listener.
    if (cancellationSignal != null) {
        cancellationSignal.setOnCancelListener(new CancellationSignal.OnCancelListener() {
            @Override
            public void onCancel() {
                synchronized (mLock) {
                    if (waiter.mNonce == nonce) {
                        cancelConnectionWaiterLocked(waiter);
                    }
                }
            }
        });
    }
    try {
        // Park the thread until a connection is assigned or the pool is closed.
        // Rethrow an exception from the wait, if we got one.
        long busyTimeoutMillis = CONNECTION_POOL_BUSY_MILLIS;
        long nextBusyTimeoutTime = waiter.mStartTime + busyTimeoutMillis;
        for (;;) {
            // Detect and recover from connection leaks.
            if (mConnectionLeaked.compareAndSet(true, false)) {
                synchronized (mLock) {
                    wakeConnectionWaitersLocked();
                }
            }

            // Wait to be unparked (may already have happened), a timeout, or interruption.
            LockSupport.parkNanos(this, busyTimeoutMillis * 1000000L);

            // Clear the interrupted flag, just in case.
            Thread.interrupted();

            // Check whether we are done waiting yet.
            synchronized (mLock) {
                throwIfClosedLocked();

                final SQLiteConnection connection = waiter.mAssignedConnection;
                final RuntimeException ex = waiter.mException;
                if (connection != null || ex != null) {
                    recycleConnectionWaiterLocked(waiter);
                    if (connection != null) {
                        return connection;
                    }
                    throw ex; // rethrow!
                }

                final long now = SystemClock.uptimeMillis();
                if (now < nextBusyTimeoutTime) {
                    busyTimeoutMillis = now - nextBusyTimeoutTime;
                } else {
                    logConnectionPoolBusyLocked(now - waiter.mStartTime, connectionFlags);
                    busyTimeoutMillis = CONNECTION_POOL_BUSY_MILLIS;
                    nextBusyTimeoutTime = now + busyTimeoutMillis;
                }
            }
        }
    } finally {
        // Remove the cancellation listener.
        if (cancellationSignal != null) {
            cancellationSignal.setOnCancelListener(null);
        }
    }
}

From source file:org.apache.solr.common.cloud.SolrZkClient.java

/**
 * Check to see if a Throwable is an InterruptedException, and if it is, set the thread interrupt flag
 * @param e the Throwable//from  w w w .ja va  2s. c om
 * @return the Throwable
 */
public static Throwable checkInterrupted(Throwable e) {
    if (e instanceof InterruptedException)
        Thread.interrupted();
    return e;
}

From source file:com.cloudbees.jenkins.plugins.bitbucket.server.client.BitbucketServerAPIClient.java

private <V> List<V> getResources(UriTemplate template, Class<? extends PagedApiResponse<V>> clazz)
        throws IOException, InterruptedException {
    List<V> resources = new ArrayList<>();

    PagedApiResponse<V> page;/*from   w ww.  jav  a 2 s  .co m*/
    Integer pageNumber = 0;
    Integer limit = DEFAULT_PAGE_LIMIT;
    do {
        if (Thread.interrupted()) {
            throw new InterruptedException();
        }
        String url = template //
                .set("start", pageNumber) //
                .set("limit", limit) //
                .expand();
        String response = getRequest(url);
        try {
            page = JsonParser.toJava(response, clazz);
        } catch (IOException e) {
            throw new IOException("I/O error when parsing response from URL: " + url, e);
        }
        resources.addAll(page.getValues());

        limit = page.getLimit();
        pageNumber = page.getNextPageStart();
    } while (!page.isLastPage());

    return resources;
}

From source file:com.amazon.alexa.avs.AVSAudioPlayer.java

/**
 * Play the alarm sound/*from  ww w  .j a v  a  2s  .  c om*/
 */
public void startAlert() {
    if (!isAlarming()) {
        interruptContent();
        if (isSpeaking()) {
            // alerts are in the background when Alexa is speaking
            alertState = AlertState.INTERRUPTED;
        } else {
            alertState = AlertState.PLAYING;

            alarmThread = new Thread() {
                @Override
                public void run() {
                    while (isAlarming() && !isSpeaking()) {
                        if (Thread.interrupted()) {
                            break;
                        }
                        InputStream inpStream = resLoader.getResourceAsStream("res/alarm.mp3");
                        synchronized (playLock) {
                            try {
                                play(inpStream);
                                while (inpStream.available() > 0) {
                                    playLock.wait(TIMEOUT_IN_MS);
                                }
                            } catch (InterruptedException | IOException e) {
                            }
                        }
                    }
                }
            };
            alarmThread.start();
        }
    }
}

From source file:fr.gael.dhus.sync.impl.ODataProductSynchronizer.java

@Override
public boolean synchronize() throws InterruptedException {
    int retrieved = 0, updated = 0, deleted = 0;

    LOGGER.info("Synchronizer#" + getId() + " started");
    try {/*from   w w w. j a v  a 2s.  c om*/
        retrieved = getNewProducts();
        if (Thread.interrupted()) {
            throw new InterruptedException();
        }

        updated = getUpdatedProducts();
        if (Thread.interrupted()) {
            throw new InterruptedException();
        }

        deleted = getDeletedProducts();
    } catch (LockAcquisitionException | CannotAcquireLockException e) {
        throw new InterruptedException(e.getMessage());
    } finally {
        // Logs a summary of what it has done this session
        StringBuilder sb = new StringBuilder("Synchronizer#");
        sb.append(getId()).append(" done:    ");
        sb.append(retrieved).append(" new Products,    ");
        sb.append(updated).append(" updated Products,    ");
        sb.append(deleted).append(" deleted Products");
        sb.append("    from ").append(this.client.getServiceRoot());
        LOGGER.info(sb.toString());

        // Writes the database only if there is a modification
        if (this.dateChanged) {
            SYNC_SERVICE.saveSynchronizer(this);
            this.dateChanged = false;
        }
    }

    return retrieved < pageSize && updated < pageSize && deleted < pageSize;
}