Example usage for java.lang Thread MIN_PRIORITY

List of usage examples for java.lang Thread MIN_PRIORITY

Introduction

In this page you can find the example usage for java.lang Thread MIN_PRIORITY.

Prototype

int MIN_PRIORITY

To view the source code for java.lang Thread MIN_PRIORITY.

Click Source Link

Document

The minimum priority that a thread can have.

Usage

From source file:org.geowebcache.seed.SeedTask.java

@Override
protected void doActionInternal() throws GeoWebCacheException, InterruptedException {
    super.state = GWCTask.STATE.RUNNING;

    // Lower the priority of the thread
    Thread.currentThread().setPriority((java.lang.Thread.NORM_PRIORITY + java.lang.Thread.MIN_PRIORITY) / 2);

    checkInterrupted();//from w  w  w  . j  a v a  2  s .c om

    // approximate thread creation time
    final long START_TIME = System.currentTimeMillis();

    final String layerName = tl.getName();
    log.info(Thread.currentThread().getName() + " begins seeding layer : " + layerName);

    TileRange tr = trIter.getTileRange();

    checkInterrupted();
    // TODO move to TileRange object, or distinguish between thread and task
    super.tilesTotal = tileCount(tr);

    final int metaTilingFactorX = tl.getMetaTilingFactors()[0];
    final int metaTilingFactorY = tl.getMetaTilingFactors()[1];

    final boolean tryCache = !reseed;

    checkInterrupted();
    long[] gridLoc = trIter.nextMetaGridLocation(new long[3]);

    long seedCalls = 0;
    while (gridLoc != null && this.terminate == false) {

        checkInterrupted();
        Map<String, String> fullParameters = tr.getParameters();

        ConveyorTile tile = new ConveyorTile(storageBroker, layerName, tr.getGridSetId(), gridLoc,
                tr.getMimeType(), fullParameters, null, null);

        for (int fetchAttempt = 0; fetchAttempt <= tileFailureRetryCount; fetchAttempt++) {
            try {
                checkInterrupted();
                tl.seedTile(tile, tryCache);
                break;// success, let it go
            } catch (Exception e) {
                // if GWC_SEED_RETRY_COUNT was not set then none of the settings have effect, in
                // order to keep backwards compatibility with the old behaviour
                if (tileFailureRetryCount == 0) {
                    if (e instanceof GeoWebCacheException) {
                        throw (GeoWebCacheException) e;
                    }
                    throw new GeoWebCacheException(e);
                }

                long sharedFailureCount = sharedFailureCounter.incrementAndGet();
                if (sharedFailureCount >= totalFailuresBeforeAborting) {
                    log.info("Aborting seed thread " + Thread.currentThread().getName()
                            + ". Error count reached configured maximum of " + totalFailuresBeforeAborting);
                    super.state = GWCTask.STATE.DEAD;
                    return;
                }
                String logMsg = "Seed failed at " + tile.toString() + " after " + (fetchAttempt + 1) + " of "
                        + (tileFailureRetryCount + 1) + " attempts.";
                if (fetchAttempt < tileFailureRetryCount) {
                    log.debug(logMsg);
                    if (tileFailureRetryWaitTime > 0) {
                        log.trace("Waiting " + tileFailureRetryWaitTime + " before trying again");
                        Thread.sleep(tileFailureRetryCount);
                    }
                } else {
                    log.info(logMsg + " Skipping and continuing with next tile. Original error: "
                            + e.getMessage());
                }
            }
        }

        if (log.isTraceEnabled()) {
            log.trace(Thread.currentThread().getName() + " seeded " + Arrays.toString(gridLoc));
        }

        // final long totalTilesCompleted = trIter.getTilesProcessed();
        // note: computing the # of tiles processed by this thread instead of by the whole group
        // also reduces thread contention as the trIter methods are synchronized and profiler
        // shows 16 threads block on synchronization about 40% the time
        final long tilesCompletedByThisThread = seedCalls * metaTilingFactorX * metaTilingFactorY;

        updateStatusInfo(tl, tilesCompletedByThisThread, START_TIME);

        checkInterrupted();
        seedCalls++;
        gridLoc = trIter.nextMetaGridLocation(gridLoc);
    }

    if (this.terminate) {
        log.info("Job on " + Thread.currentThread().getName() + " was terminated after " + this.tilesDone
                + " tiles");
    } else {
        log.info(Thread.currentThread().getName() + " completed (re)seeding layer " + layerName + " after "
                + this.tilesDone + " tiles and " + this.timeSpent + " seconds.");
    }

    checkInterrupted();
    if (threadOffset == 0 && doFilterUpdate) {
        runFilterUpdates(tr.getGridSetId());
    }

    super.state = GWCTask.STATE.DONE;
}

From source file:com.searchcode.app.jobs.IndexSvnRepoJob.java

public void execute(JobExecutionContext context) throws JobExecutionException {
    if (this.ENABLED == false) {
        return;/*from   w  ww .  j  a  v a2  s  . c o m*/
    }

    Thread.currentThread().setPriority(Thread.MIN_PRIORITY);

    while (CodeIndexer.shouldPauseAdding()) {
        Singleton.getLogger().info("Pausing parser.");
        return;
    }

    // Pull the next repo to index from the queue
    UniqueRepoQueue repoQueue = Singleton.getUniqueSvnRepoQueue();

    RepoResult repoResult = repoQueue.poll();
    AbstractMap<String, Integer> runningIndexRepoJobs = Singleton.getRunningIndexRepoJobs();

    if (repoResult != null && !runningIndexRepoJobs.containsKey(repoResult.getName())) {
        Singleton.getLogger().info("Indexing " + repoResult.getName());
        try {
            runningIndexRepoJobs.put(repoResult.getName(), (int) (System.currentTimeMillis() / 1000));

            JobDataMap data = context.getJobDetail().getJobDataMap();

            String repoName = repoResult.getName();
            String repoRemoteLocation = repoResult.getUrl();
            String repoUserName = repoResult.getUsername();
            String repoPassword = repoResult.getPassword();

            String repoLocations = data.get("REPOLOCATIONS").toString();
            this.LOWMEMORY = Boolean.parseBoolean(data.get("LOWMEMORY").toString());

            // Check if sucessfully cloned, and if not delete and restart
            boolean cloneSucess = checkCloneUpdateSucess(repoLocations + repoName);
            if (cloneSucess == false) {
                // Delete the folder
                try {
                    FileUtils.deleteDirectory(new File(repoLocations + repoName + "/"));
                    CodeIndexer.deleteByReponame(repoName);
                } catch (IOException ex) {
                    Singleton.getLogger().warning("ERROR - caught a " + ex.getClass() + " in " + this.getClass()
                            + "\n with message: " + ex.getMessage());
                    return;
                }
            }
            deleteCloneUpdateSuccess(repoLocations + repoName);

            String repoGitLocation = repoLocations + repoName + "/.svn/";

            File f = new File(repoGitLocation);
            boolean existingRepo = f.exists();
            boolean useCredentials = repoUserName != null && !repoUserName.isEmpty();
            RepositoryChanged repositoryChanged;

            if (existingRepo) {
                repositoryChanged = this.updateSvnRepository(repoName, repoRemoteLocation, repoUserName,
                        repoPassword, repoLocations, useCredentials);
            } else {
                repositoryChanged = this.checkoutSvnRepository(repoName, repoRemoteLocation, repoUserName,
                        repoPassword, repoLocations, useCredentials);
            }

            // Write file indicating we have sucessfully cloned
            createCloneUpdateSuccess(repoLocations + repoName);

            if (repositoryChanged.isChanged()) {
                Singleton.getLogger().info("Update found indexing " + repoRemoteLocation);
                this.updateIndex(repoName, repoLocations, repoRemoteLocation, existingRepo, repositoryChanged);
            }
        } finally {
            // Clean up the job
            runningIndexRepoJobs.remove(repoResult.getName());
        }
    }
}

From source file:at.ac.ait.ubicity.fileloader.FileLoader.java

/**
 * /*from  w w w. j  a  v  a2s  .  c  o m*/
 * @param _fileInfo A FileInformation object representing usage information on the file we are supposed to load: line count already ingested, last usage time...
 * @param _keySpace Cassandra key space into which to ingest
 * @param _host Cassandra host / server
 * @param _batchSize MutationBatch size
 * @throws Exception Shouldn't happen, although the Disruptor may throw an Exception under duress
 */
@SuppressWarnings("unchecked")
public final static void load(final FileInformation _fileInfo, final String _keySpace, final String _host,
        final int _batchSize) throws Exception {

    if (!cassandraInitialized) {
        keySpace = AstyanaxInitializer.doInit("Test Cluster", _host, _keySpace);
        cassandraInitialized = true;
    }

    LongTimeStampSorter tsSorter = new LongTimeStampSorter();
    Thread tTSSorter = new Thread(tsSorter);
    tTSSorter.setPriority(Thread.MAX_PRIORITY - 1);
    tTSSorter.setName("long timestamp sorter ");
    tTSSorter.start();
    //get the log id from the file's URI
    final String log_id = _fileInfo.getURI().toString();

    final MutationBatch batch = keySpace.prepareMutationBatch();

    logger.info("got keyspace " + keySpace.getKeyspaceName() + " from Astyanax initializer");

    final LineIterator onLines = FileUtils.lineIterator(new File(_fileInfo.getURI()));

    final ExecutorService exec = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors() * 2);

    ColumnFamily crawl_stats = null;

    AggregationJob aggregationJob = new AggregationJob(keySpace, crawl_stats);
    Thread tAggJob = new Thread(aggregationJob);
    tAggJob.setName("Monitrix loader / aggregation job ");
    tAggJob.setPriority(Thread.MIN_PRIORITY + 1);
    tAggJob.start();
    logger.info("[FILELOADER] started aggregation job, ring buffer running");

    final Disruptor<SingleLogLineAsString> disruptor = new Disruptor(SingleLogLineAsString.EVENT_FACTORY,
            (int) Math.pow(TWO, 17), exec);
    SingleLogLineAsStringEventHandler.batch = batch;
    SingleLogLineAsStringEventHandler.keySpace = keySpace;
    SingleLogLineAsStringEventHandler.batchSize = _batchSize;
    SingleLogLineAsStringEventHandler.LOG_ID = log_id;
    SingleLogLineAsStringEventHandler.tsSorter = tsSorter;
    SingleLogLineAsStringEventHandler.aggregationJob = aggregationJob;

    //The EventHandler contains the actual logic for ingesting
    final EventHandler<SingleLogLineAsString> handler = new SingleLogLineAsStringEventHandler();

    disruptor.handleEventsWith(handler);

    //get our Aggregate job in place

    //we are almost ready to start
    final RingBuffer<SingleLogLineAsString> rb = disruptor.start();

    int _lineCount = 0;
    long _start, _lapse;
    _start = System.nanoTime();

    int _linesAlreadyProcessed = _fileInfo.getLineCount();

    //cycle through the lines already processed
    while (_lineCount < _linesAlreadyProcessed) {
        onLines.nextLine();
        _lineCount++;
    }

    //now get down to the work we actually must do, and fill the ring buffer
    logger.info("begin proccessing of file " + _fileInfo.getURI() + " @line #" + _lineCount);
    while (onLines.hasNext()) {

        final long _seq = rb.next();
        final SingleLogLineAsString event = rb.get(_seq);
        event.setValue(onLines.nextLine());
        rb.publish(_seq);
        _lineCount++;
    }
    _lapse = System.nanoTime() - _start;
    logger.info("ended proccessing of file " + _fileInfo.getURI() + " @line #" + _lineCount);

    //stop, waiting for last threads still busy to finish their work
    disruptor.shutdown();

    //update the file info, this will  land in the cache
    _fileInfo.setLineCount(_lineCount);
    _fileInfo.setLastAccess(System.currentTimeMillis());
    int _usageCount = _fileInfo.getUsageCount();
    _fileInfo.setUsageCount(_usageCount++);

    //make sure we release resources
    onLines.close();

    logger.info(
            "handled " + (_lineCount - _linesAlreadyProcessed) + " log lines in " + _lapse + " nanoseconds");

    //now go to aggregation step
    SortedSet<Long> timeStamps = new TreeSet(tsSorter.timeStamps);

    long _minTs = timeStamps.first();
    long _maxTs = timeStamps.last();
    logger.info("**** min TimeStamp = " + _minTs);
    logger.info("**** max TimeStamp = " + _maxTs);

    StatsTableActualizer.update(_fileInfo.getURI().toString(), _minTs, _maxTs, _lineCount);

    //        AggregationJob aggJob = new AggregationJob( keySpace, _host, _batchSize );
    //        Thread tAgg = new Thread( aggJob );
    //        tAgg.setName( "aggregation job " );
    //        tAgg.setPriority( Thread.MAX_PRIORITY - 1 );
    //        tAgg.start();

}

From source file:net.sf.jabref.JabRefExecutorService.java

public void executeWithLowPriorityInOwnThread(final Runnable runnable, String name) {
    AutoCleanupRunnable target = new AutoCleanupRunnable(runnable, startedThreads);
    final Thread thread = new Thread(target);
    target.thread = thread;// w w w. j  a v a2s.co m
    thread.setName("JabRef - " + name + " - low prio");
    startedThreads.add(thread);
    thread.setPriority(Thread.MIN_PRIORITY);
    thread.start();
}

From source file:jahspotify.web.api.ServerBroadcaster.java

@PostConstruct
private void initialize() {

    _senderThread.setDaemon(true);
    _senderThread.setPriority(Thread.MIN_PRIORITY);
    _senderThread.start();
}

From source file:edu.lternet.pasta.portal.search.BrowseCrawlerServlet.java

/**
 * Initializes the servlet by starting a separate thread in which to
 * run the BrowseCrawler main program.//www . j  a v  a  2s . c om
 * 
 * @param config   the ServletConfig object, holding servlet configuration
 *                 info
 * @throws         ServletException
 */
public void init(ServletConfig config) throws ServletException {
    super.init(config);
    PropertiesConfiguration options = ConfigurationListener.getOptions();
    String browseDirPath = options.getString("browse.dir");
    BrowseSearch.setBrowseCacheDir(browseDirPath);
    Integer crawlPeriodInt;
    this.servletContext = getServletContext();

    crawlPeriodInt = new Integer("24");
    this.crawlPeriod = crawlPeriodInt.intValue();

    browseCrawlerThread = new Thread(this);
    browseCrawlerThread.setPriority(Thread.MIN_PRIORITY); // be a good citizen
    browseCrawlerThread.start();
}

From source file:GetOpenProperties.java

public void run() {
    String value = null;/*from  w  w  w.  j  a va 2 s  .c  om*/

    Thread.currentThread().setPriority(Thread.MIN_PRIORITY);

    //Pause to let the reader see the default strings.
    pause(3000);

    for (int i = 0; i < numProperties; i++) {
        //Pause for dramatic effect.
        pause(250);

        try {
            value = System.getProperty(propertyNames[i]);
            values[i].setText(value);
        } catch (SecurityException e) {
            values[i].setText("Could not read: " + "SECURITY EXCEPTION!");
        }
    }
}

From source file:uk.ac.diamond.scisoft.JythonCreator.java

@Override
public void earlyStartup() {

    // initialiseInterpreter only when 
    // loader factory and function factory plugins 
    // are known.
    final Runnable runner = new Runnable() {
        @Override//from ww w .j  av a 2s  .  c  o m
        public void run() {

            try {
                Thread.sleep(500); // 1/2 second
            } catch (InterruptedException e) {
                logger.error("Cannot wait on worker thread", e);
            }

            while (!LoaderFactoryStartup.isStarted() || !FunctionFactoryStartup.isStarted()) {

                try {
                    Thread.sleep(500); // 1/2 second
                } catch (InterruptedException e) {
                    logger.error("Cannot sleep on worker thread", e);
                }
            }
            try {
                initialiseConsole();
                initialiseInterpreter(new NullProgressMonitor());
            } catch (Exception e) {
                logger.error("Cannot initialize the Jython interpreter.", e);
            }
        }
    };

    final Thread daemon = new Thread(runner);
    daemon.setPriority(Thread.MIN_PRIORITY);
    daemon.setDaemon(true);
    daemon.start();
}

From source file:org.opencms.search.CmsIndexingThreadManager.java

/**
 * Creates and starts a new indexing thread for a resource.<p>
 * //from   ww w.  ja v a2s.  c  om
 * After an indexing thread was started, the manager suspends itself 
 * and waits for an amount of time specified by the <code>timeout</code>
 * value. If the timeout value is reached, the indexing thread is
 * aborted by an interrupt signal.<p>
 * 
 * @param indexer the VFS indexer to create the index thread for 
 * @param writer the index writer that can update the index
 * @param res the resource
 */
public void createIndexingThread(CmsVfsIndexer indexer, I_CmsIndexWriter writer, CmsResource res) {

    I_CmsReport report = indexer.getReport();
    m_startedCounter++;
    CmsIndexingThread thread = new CmsIndexingThread(indexer.getCms(), res, indexer.getIndex(),
            m_startedCounter, report);
    thread.setPriority(Thread.MIN_PRIORITY);
    thread.start();
    try {
        thread.join(m_timeout);
    } catch (InterruptedException e) {
        // ignore
    }
    if (thread.isAlive()) {
        // the thread has not finished - so it must be marked as an abandoned thread 
        m_abandonedCounter++;
        thread.interrupt();
        if (LOG.isWarnEnabled()) {
            LOG.warn(Messages.get().getBundle().key(Messages.LOG_INDEXING_TIMEOUT_1, res.getRootPath()));
        }
        if (report != null) {
            report.println();
            report.print(org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_FAILED_0),
                    I_CmsReport.FORMAT_WARNING);
            report.println(Messages.get().container(Messages.RPT_SEARCH_INDEXING_TIMEOUT_1, res.getRootPath()),
                    I_CmsReport.FORMAT_WARNING);
        }
    } else {
        // the thread finished normally
        m_returnedCounter++;
    }
    Document doc = thread.getResult();
    if (doc != null) {
        // write the document to the index
        indexer.updateResource(writer, res.getRootPath(), doc);
    }
    if ((m_startedCounter % m_maxModificationsBeforeCommit) == 0) {
        try {
            writer.commit();
        } catch (IOException e) {
            if (LOG.isWarnEnabled()) {
                LOG.warn(Messages.get().getBundle().key(Messages.LOG_IO_INDEX_WRITER_COMMIT_2,
                        indexer.getIndex().getName(), indexer.getIndex().getPath()), e);
            }
        }
    }
}

From source file:org.geowebcache.s3.S3Ops.java

private ExecutorService createDeleteExecutorService() {
    ThreadFactory tf = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("GWC S3BlobStore bulk delete thread-%d. Bucket: " + bucketName)
            .setPriority(Thread.MIN_PRIORITY).build();
    return Executors.newCachedThreadPool(tf);
}