Example usage for java.lang Thread interrupted

List of usage examples for java.lang Thread interrupted

Introduction

In this page you can find the example usage for java.lang Thread interrupted.

Prototype

public static boolean interrupted() 

Source Link

Document

Tests whether the current thread has been interrupted.

Usage

From source file:io.subutai.plugin.appscale.rest.RestServiceImpl.java

private OperationState waitUntilOperationFinish(UUID uuid) {
    // OperationState state = OperationState.RUNNING;
    OperationState state = null;//from  w  ww .  java2 s . c  o  m
    long start = System.currentTimeMillis();
    while (!Thread.interrupted()) {
        TrackerOperationView po = tracker.getTrackerOperation(AppScaleConfig.PRODUCT_NAME, uuid);
        LOG.info("*********\n" + po.getState() + "\n********");
        if (po != null) {

            if (po.getState() != OperationState.RUNNING) {
                state = po.getState();
                break;
            }

        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException ex) {
            break;
        }
        if (System.currentTimeMillis() - start > (6000 * 1000)) {
            break;
        }
    }

    return state;
}

From source file:com.yahoo.ads.pb.kafka.KafkaSimpleConsumer.java

private long getOffset(boolean earliest) throws InterruptedException {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
            earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1));
    OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);
    OffsetResponse response = null;/*from   w w w .j  ava 2  s .  c  o m*/
    try {
        response = consumer.getOffsetsBefore(request);
    } catch (Exception e) {
        // e could be an instance of ClosedByInterruptException as SimpleConsumer.getOffsetsBefore uses nio
        if (Thread.interrupted()) {
            logger.info("catch exception of {} with interrupted in getOffset({}) for {} - {}",
                    e.getClass().getName(), earliest, topic, partitionId);

            throw new InterruptedException();
        }

        logger.error("caught exception in getOffsetsBefore {} - {}", topic, partitionId, e);
        return -1;
    }
    if (response.hasError()) {
        logger.error("error fetching data Offset from the Broker {}. reason: {}", leaderBroker.host(),
                response.errorCode(topic, partitionId));
        return -1;
    }
    long[] offsets = response.offsets(topic, partitionId);
    return earliest ? offsets[0] : offsets[offsets.length - 1];
}

From source file:org.archive.modules.fetcher.BdbCookieStoreTest.java

public void testConcurrentLoad() throws IOException, InterruptedException {
    bdbCookieStore().clear();//from   w  w  w  .  j a  v a  2s.  co  m

    Runnable runnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (!Thread.interrupted()) {
                    Collection<Cookie> cookies = bdbCookieStore().getCookies();
                    new ArrayList<Cookie>(cookies);
                    BasicClientCookie cookie = new BasicClientCookie(UUID.randomUUID().toString(),
                            UUID.randomUUID().toString());
                    bdbCookieStore().addCookie(cookie);
                }
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };

    Thread[] threads = new Thread[200];
    for (int i = 0; i < threads.length; i++) {
        threads[i] = new Thread(runnable);
        threads[i].setName("cookie-load-test-" + i);
        threads[i].start();
    }

    Thread.sleep(5000);

    for (int i = 0; i < threads.length; i++) {
        threads[i].interrupt();
    }
    for (int i = 0; i < threads.length; i++) {
        threads[i].join();
    }

    assertTrue(bdbCookieStore().getCookies().size() > 3000);
    assertCookieListsEquivalent(bdbCookieStore().getCookies(), bdbCookieStore().getCookiesBypassCache());
}

From source file:ch.epfl.scapetoad.CartogramGastner.java

/**
 * Integrates the non-linear Volterra equation.
 * @return true if the displacement field has converged, false otherwise.
 *///  ww w .  j a v  a2s. c o  m
private boolean integrateNonlinearVolterraEquation() throws InterruptedException {
    boolean stepsize_ok;
    double h, maxchange = this.INFTY, t, vxplus, vyplus, xguess, yguess;
    int i, j, k;

    do {
        this.initcond();
        this.nblurs++;
        //if (this.minpop < 0.0)
        //   double sigmaVal = SIGMA * Math.pow(this.SIGMAFAC, this.nblurs);

    } while (this.minpop < 0.0);

    h = HINITIAL;
    t = 0;

    for (j = 0; j <= this.lx; j++) {
        for (k = 0; k <= this.ly; k++) {
            this.x[j][k] = j;
            this.y[j][k] = k;
        }
    }

    this.calculateVelocityField(0.0);

    for (j = 0; j <= this.lx; j++) {
        for (k = 0; k <= this.ly; k++) {
            vx[j][k] = gridvx[j][k];
            vy[j][k] = gridvy[j][k];
        }
    }

    i = 1;

    do {
        // Stop if the user has interrupted the process.
        if (Thread.interrupted()) {
            // Raise an InterruptedException.
            throw new InterruptedException("Computation has been interrupted by the user.");
        }

        stepsize_ok = true;
        this.calculateVelocityField(t + h);

        for (j = 0; j <= this.lx; j++) {
            for (k = 0; k <= this.ly; k++) {

                double xinterpol = this.x[j][k] + (h * this.vx[j][k]);
                double yinterpol = this.y[j][k] + (h * this.vy[j][k]);
                if (xinterpol < 0.0 || yinterpol < 0.0) {
                    if (AppContext.DEBUG)
                        System.out.println("[ERROR] Cartogram out of bounds !");
                }

                vxplus = this.interpolateBilinear(this.gridvx, xinterpol, yinterpol);

                vyplus = this.interpolateBilinear(this.gridvy, xinterpol, yinterpol);

                xguess = this.x[j][k] + (0.5 * h * (this.vx[j][k] + vxplus));

                yguess = this.y[j][k] + (0.5 * h * (this.vy[j][k] + vyplus));

                double[] ptappr = new double[2];
                ptappr[0] = this.xappr[j][k];
                ptappr[1] = this.yappr[j][k];
                boolean solving_ok = this.newt2(h, ptappr, xguess, yguess, j, k);
                this.xappr[j][k] = ptappr[0];
                this.yappr[j][k] = ptappr[1];
                if (solving_ok == false)
                    return false;

                if (((xguess - this.xappr[j][k]) * (xguess - this.xappr[j][k]))
                        + ((yguess - this.yappr[j][k]) * (yguess - this.yappr[j][k])) > this.TOLINT) {
                    if (h < this.MINH) {
                        //double sigmaVal = this.SIGMA * Math.pow(
                        //   this.SIGMAFAC, this.nblurs);
                        this.nblurs++;
                        return false;
                    }
                    h = h / 10;
                    stepsize_ok = false;
                    break;
                }

            } // for (k = 0; k <= this.ly; k++)

        } // for (j = 0; j <= this.lx; j++)

        if (!stepsize_ok) {
            continue;
        } else {
            t += h;
            maxchange = 0.0;

            for (j = 0; j <= this.lx; j++) {
                for (k = 0; k <= this.ly; k++) {
                    if (((this.x[j][k] - this.xappr[j][k]) * (this.x[j][k] - this.xappr[j][k]))
                            + ((this.y[j][k] - this.yappr[j][k])
                                    * (this.y[j][k] - this.yappr[j][k])) > maxchange) {
                        maxchange = ((this.x[j][k] - this.xappr[j][k]) * (this.x[j][k] - this.xappr[j][k]))
                                + ((this.y[j][k] - this.yappr[j][k]) * (this.y[j][k] - this.yappr[j][k]));
                    }

                    this.x[j][k] = this.xappr[j][k];
                    this.y[j][k] = this.yappr[j][k];
                    this.vx[j][k] = this.interpolateBilinear(this.gridvx, this.xappr[j][k], this.yappr[j][k]);
                    this.vy[j][k] = this.interpolateBilinear(this.gridvy, this.xappr[j][k], this.yappr[j][k]);

                } // for (k=0; k<=ly; k++)

            } // for (j = 0; j <= this.lx; j++)

        }

        h = 1.2 * h;

        int progress = mProgressEnd;
        if (i < 200)
            progress = mProgressStart + (i * ((mProgressEnd - mProgressStart) / 200));
        mCartogramWizard.updateRunningStatus(progress, mProgressText, "Doing time step " + i);

        i++;

    } while (i < this.MAXINTSTEPS && t < this.TIMELIMIT && maxchange > this.CONVERGENCE);

    return true;

}

From source file:com.feedzai.commons.sql.abstraction.engine.AbstractDatabaseEngine.java

/**
 * Checks if the connection is available and returns it. If the connection is not available, it tries to reconnect (the number of times defined in the
 * properties with the delay there specified).
 *
 * @return The connection./*from w w  w  .j  av a 2 s.c  o  m*/
 * @throws RetryLimitExceededException If the retry limit is exceeded.
 * @throws InterruptedException        If the thread is interrupted during reconnection.
 */
@Override
public synchronized Connection getConnection()
        throws RetryLimitExceededException, InterruptedException, RecoveryException {

    if (!properties.isReconnectOnLost()) {
        return conn;
    }

    int retries = 1;

    if (checkConnection(conn)) {
        return conn;
    }

    logger.debug("Connection is down.");

    // reconnect.
    while (true) {
        if (Thread.interrupted()) {
            throw new InterruptedException();
        }

        try {

            if (maximumNumberOfTries > 0) {
                if (retries == (maximumNumberOfTries / 2) || retries == (maximumNumberOfTries - 1)) {
                    logger.error("The connection to the database was lost. Remaining retries: {}",
                            (maximumNumberOfTries - retries));
                    notificationLogger.error("The connection to the database was lost. Remaining retries: {}",
                            (maximumNumberOfTries - retries));
                } else {
                    logger.debug("Retrying ({}/{}) in {} seconds...", new Object[] { retries,
                            maximumNumberOfTries, TimeUnit.MILLISECONDS.toSeconds(retryInterval) });
                }
            } else {
                logger.debug("Retry number {} in {} seconds...", retries,
                        TimeUnit.MILLISECONDS.toSeconds(retryInterval));
                if (retries % 10 == 0) {
                    notificationLogger.error("The connection to the database was lost. Retry number {} in {}",
                            retries, TimeUnit.MILLISECONDS.toSeconds(retryInterval));
                }
            }
            Thread.sleep(retryInterval);
            connect(); // this sets the new object.

            // recover state.

            try {
                recover();
            } catch (Exception e) {
                throw new RecoveryException("Error recovering from lost connection.", e);
            }

            // return it.
            return conn;
        } catch (SQLException ex) {

            logger.debug("Connection failed.");

            if (maximumNumberOfTries > 0 && retries > maximumNumberOfTries) {
                throw new RetryLimitExceededException("Maximum number of retries for a connection exceeded.",
                        ex);
            }

            retries++;
        } catch (Exception e) {
            logger.error("An unexpected error occurred.", e);
        }
    }
}

From source file:org.apache.kudu.client.MiniKdc.java

/**
 * Waits for the process to exit, checking the return code. Any output to the
 * process' stdout is optionally logged to SLF4J.
 * @param process the process to check// w  ww  .j ava 2 s. c  o  m
 * @param name the name of the process
 * @param log whether to log the process' stdout.
 */
private static void checkReturnCode(Process process, String name, boolean log) throws IOException {
    int ret;
    try {
        ret = process.waitFor();
        if (log) {
            // Reading the output *after* waiting for the process to close can deadlock
            // if the process overwhelms the output buffer, however none of the krb5
            // utilities are known to do that.
            try (BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream()))) {
                String line;
                while ((line = in.readLine()) != null) {
                    LOG.debug(line);
                }
            }
        }
    } catch (InterruptedException e) {
        Thread.interrupted();
        throw new IOException(String.format("process '%s' interrupted", name));
    }
    if (ret != 0) {
        throw new IOException(String.format("process '%s' failed: %s", name, ret));
    }
}

From source file:com.btoddb.fastpersitentqueue.InMemorySegmentMgrTest.java

@Test
public void testThreading() throws IOException, ExecutionException {
    final int entrySize = 1000;
    final int numEntries = 3000;
    final int numPushers = 3;
    int numPoppers = 3;

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    mgr.setMaxSegmentSizeInBytes(10000);
    mgr.init();/*from  ww  w  . j a va 2 s . c  om*/

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = idGen.incrementAndGet();
                        pushSum.addAndGet(x);
                        FpqEntry entry = new FpqEntry(x, new byte[entrySize]);
                        mgr.push(entry);
                        if (x % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !mgr.isEmpty()) {
                    try {
                        FpqEntry entry;
                        while (null != (entry = mgr.pop())) {
                            if (entry.getId() % 500 == 0) {
                                System.out.println("popped ID = " + entry.getId());
                            }

                            popSum.addAndGet(entry.getId());
                            numPops.incrementAndGet();
                            Thread.sleep(popRand.nextInt(5));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(popSum.get(), is(pushSum.get()));
    assertThat(mgr.getNumberOfEntries(), is(0L));
    assertThat(mgr.getNumberOfActiveSegments(), is(1));
    assertThat(mgr.getSegments(), hasSize(1));
    assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty()));

    // make sure we tested paging in/out
    assertThat(mgr.getNumberOfSwapOut(), is(greaterThan(0L)));
    assertThat(mgr.getNumberOfSwapIn(), is(mgr.getNumberOfSwapOut()));
}

From source file:com.btoddb.fastpersitentqueue.JournalMgr.java

public void shutdown() {
    shutdownInProgress = true;/*from w w  w  .ja  v  a2 s.  c  o  m*/
    if (null != flushExec) {
        flushExec.shutdown();
        try {
            flushExec.awaitTermination(60, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            Thread.interrupted();
            // ignore
        }
    }
    if (null != generalExec) {
        generalExec.shutdown();
        try {
            generalExec.awaitTermination(60, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            Thread.interrupted();
            // ignore
        }
    }

    // for journals that are completely popped, but not removed (because a client still could have pushed)
    Set<JournalDescriptor> removeThese = new HashSet<>();
    try {
        for (JournalDescriptor desc : journalIdMap.values()) {
            if (0 == desc.getNumberOfUnconsumedEntries()) {
                removeThese.add(desc);
            } else if (desc.getFile().isOpen()) {
                try {
                    desc.getFile().forceFlush();
                } catch (IOException e) {
                    logger.error("on shutdown - could not fsync journal file, {} -- ignoring",
                            desc.getFile().getFile().getAbsolutePath());
                }
            }
        }
    } finally {
        for (JournalDescriptor desc : removeThese) {
            removeJournal(desc);
        }
    }
}

From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java

@Test(timeout = 20000)
public void testRebootedDuringSetup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);//  ww  w .  j a va2  s.  co m
    dispatcher.start();
    OutputCommitter committer = new StubbedOutputCommitter() {
        @Override
        public synchronized void setupJob(JobContext jobContext) throws IOException {
            while (!Thread.interrupted()) {
                try {
                    wait();
                } catch (InterruptedException e) {
                }
            }
        }
    };
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();

    AppContext mockContext = mock(AppContext.class);
    when(mockContext.isLastAMRetry()).thenReturn(false);
    JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
    JobId jobId = job.getID();
    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(jobId));
    assertJobState(job, JobStateInternal.SETUP);

    job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT));
    assertJobState(job, JobStateInternal.REBOOT);
    // return the external state as RUNNING since otherwise JobClient will
    // exit when it polls the AM for job state
    Assert.assertEquals(JobState.RUNNING, job.getState());

    dispatcher.stop();
    commitHandler.stop();
}

From source file:org.apache.hadoop.hbase.io.hfile.HFileReaderImpl.java

/**
 * Opens a HFile. You must load the index before you can use it by calling
 * {@link #loadFileInfo()}.//  w  w w  . j a  v a  2 s .  co m
 * @param path
 *          Path to HFile.
 * @param trailer
 *          File trailer.
 * @param fsdis
 *          input stream.
 * @param fileSize
 *          Length of the stream.
 * @param cacheConf
 *          Cache configuration.
 * @param hfs
 *          The file system.
 * @param conf
 *          Configuration
 */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD")
public HFileReaderImpl(final Path path, FixedFileTrailer trailer, final FSDataInputStreamWrapper fsdis,
        final long fileSize, final CacheConfig cacheConf, final HFileSystem hfs, final Configuration conf)
        throws IOException {
    this.trailer = trailer;
    this.compressAlgo = trailer.getCompressionCodec();
    this.cacheConf = cacheConf;
    this.fileSize = fileSize;
    this.path = path;
    this.name = path.getName();
    this.conf = conf;
    checkFileVersion();
    this.hfileContext = createHFileContext(fsdis, fileSize, hfs, path, trailer);
    this.fsBlockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, hfs, path, hfileContext);

    // Comparator class name is stored in the trailer in version 2.
    comparator = trailer.createComparator();
    dataBlockIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator,
            trailer.getNumDataIndexLevels(), this);
    metaBlockIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);

    // Parse load-on-open data.

    HFileBlock.BlockIterator blockIter = fsBlockReader.blockRange(trailer.getLoadOnOpenDataOffset(),
            fileSize - trailer.getTrailerSize());

    // Data index. We also read statistics about the block index written after
    // the root level.
    dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
            trailer.getDataIndexCount());

    // Meta index.
    metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
            trailer.getMetaIndexCount());

    // File info
    fileInfo = new FileInfo();
    fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
    byte[] creationTimeBytes = fileInfo.get(FileInfo.CREATE_TIME_TS);
    this.hfileContext.setFileCreateTime(creationTimeBytes == null ? 0 : Bytes.toLong(creationTimeBytes));
    if (fileInfo.get(FileInfo.LASTKEY) != null) {
        lastKeyCell = new KeyValue.KeyOnlyKeyValue(fileInfo.get(FileInfo.LASTKEY));
    }
    avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
    avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
    byte[] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION);
    includesMemstoreTS = keyValueFormatVersion != null
            && Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE;
    fsBlockReader.setIncludesMemstoreTS(includesMemstoreTS);
    if (includesMemstoreTS) {
        decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterImpl.MAX_MEMSTORE_TS_KEY)) > 0;
    }

    // Read data block encoding algorithm name from file info.
    dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo);
    fsBlockReader.setDataBlockEncoder(dataBlockEncoder);

    // Store all other load-on-open blocks for further consumption.
    HFileBlock b;
    while ((b = blockIter.nextBlock()) != null) {
        loadOnOpenBlocks.add(b);
    }

    // Prefetch file blocks upon open if requested
    if (cacheConf.shouldPrefetchOnOpen()) {
        PrefetchExecutor.request(path, new Runnable() {
            public void run() {
                try {
                    long offset = 0;
                    long end = fileSize - getTrailer().getTrailerSize();
                    HFileBlock prevBlock = null;
                    while (offset < end) {
                        if (Thread.interrupted()) {
                            break;
                        }
                        long onDiskSize = -1;
                        if (prevBlock != null) {
                            onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
                        }
                        HFileBlock block = readBlock(offset, onDiskSize, true, false, false, false, null, null);
                        // Need not update the current block. Ideally here the readBlock won't find the
                        // block in cache. We call this readBlock so that block data is read from FS and
                        // cached in BC. So there is no reference count increment that happens here.
                        // The return will ideally be a noop because the block is not of MemoryType SHARED.
                        returnBlock(block);
                        prevBlock = block;
                        offset += block.getOnDiskSizeWithHeader();
                    }
                } catch (IOException e) {
                    // IOExceptions are probably due to region closes (relocation, etc.)
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Exception encountered while prefetching " + path + ":", e);
                    }
                } catch (Exception e) {
                    // Other exceptions are interesting
                    LOG.warn("Exception encountered while prefetching " + path + ":", e);
                } finally {
                    PrefetchExecutor.complete(path);
                }
            }
        });
    }

    byte[] tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
    // max tag length is not present in the HFile means tags were not at all written to file.
    if (tmp != null) {
        hfileContext.setIncludesTags(true);
        tmp = fileInfo.get(FileInfo.TAGS_COMPRESSED);
        if (tmp != null && Bytes.toBoolean(tmp)) {
            hfileContext.setCompressTags(true);
        }
    }
}