Example usage for java.lang Thread MIN_PRIORITY

List of usage examples for java.lang Thread MIN_PRIORITY

Introduction

In this page you can find the example usage for java.lang Thread MIN_PRIORITY.

Prototype

int MIN_PRIORITY

To view the source code for java.lang Thread MIN_PRIORITY.

Click Source Link

Document

The minimum priority that a thread can have.

Usage

From source file:org.apache.cocoon.thread.impl.DefaultRunnableManager.java

/**
 * DOCUMENT ME!// w  ww .  j a  va 2s .co  m
 *
 * @param priority
 *            The priority to set as string value.
 *
 * @return The priority as int value.
 */
private int convertPriority(final String priority) {
    if ("MIN".equalsIgnoreCase(priority)) {
        return Thread.MIN_PRIORITY;
    } else if ("NORM".equalsIgnoreCase(priority)) {
        return Thread.NORM_PRIORITY;
    } else if ("MAX".equalsIgnoreCase(priority)) {
        return Thread.MAX_PRIORITY;
    } else {
        getLogger().warn("Unknown thread priority \"" + priority + "\". Set to \"NORM\".");

        return Thread.NORM_PRIORITY;
    }
}

From source file:com.zia.freshdocs.widget.adapter.CMISAdapter.java

/**
 * Download the content for the given NodeRef
 * /*from  w w  w.j  av  a2 s  . c  o m*/
 * @param ref
 * @param handler
 */
protected void downloadContent(final NodeRef ref, final Handler handler) {
    startProgressDlg(false);
    mProgressDlg.setMax(Long.valueOf(ref.getContentLength()).intValue());

    mDlThread = new ChildDownloadThread(handler, new Downloadable() {
        public Object execute() {
            File f = null;

            try {
                CMISApplication app = (CMISApplication) getContext().getApplicationContext();
                URL url = new URL(ref.getContent());
                String name = ref.getName();
                long fileSize = ref.getContentLength();
                f = app.getFile(name, fileSize);

                if (f != null && f.length() != fileSize) {
                    Thread.currentThread().setPriority(Thread.MIN_PRIORITY);

                    FileOutputStream fos = new FileOutputStream(f);
                    InputStream is = mCmis.get(url.getPath());

                    byte[] buffer = new byte[BUF_SIZE];
                    int len = is.read(buffer);
                    int total = len;
                    Message msg = null;
                    Bundle b = null;

                    while (len != -1) {
                        msg = handler.obtainMessage();
                        b = new Bundle();
                        b.putInt("progress", total);
                        msg.setData(b);
                        handler.sendMessage(msg);

                        fos.write(buffer, 0, len);
                        len = is.read(buffer);
                        total += len;

                        if (Thread.interrupted()) {
                            fos.close();
                            f = null;
                            throw new InterruptedException();
                        }
                    }

                    fos.flush();
                    fos.close();
                }
            } catch (Exception e) {
                Log.e(CMISAdapter.class.getSimpleName(), "", e);
            }

            return f;
        }
    });
    mDlThread.start();
}

From source file:org.apache.jackrabbit.core.RepositoryImpl.java

/**
 * Protected constructor./*from   w w w. j a v a  2  s  . c  om*/
 *
 * @param repConfig the repository configuration.
 * @throws RepositoryException if there is already another repository
 *                             instance running on the given configuration
 *                             or another error occurs.
 */
protected RepositoryImpl(RepositoryConfig repConfig) throws RepositoryException {
    // Acquire a lock on the repository home
    repLock = repConfig.getRepositoryLockMechanism();
    repLock.init(repConfig.getHomeDir());
    repLock.acquire();

    long t0 = System.currentTimeMillis();
    log.info("Starting repository...");

    boolean succeeded = false;
    try {
        this.repConfig = repConfig;

        context.setFileSystem(repConfig.getFileSystem());

        // Load root node identifier
        context.setRootNodeId(loadRootNodeId());

        // initialize repository descriptors
        initRepositoryDescriptors();

        // create registries
        context.setNamespaceRegistry(createNamespaceRegistry());
        context.setNodeTypeRegistry(createNodeTypeRegistry());
        context.setPrivilegeRegistry(
                new PrivilegeRegistry(context.getNamespaceRegistry(), context.getFileSystem()));

        // Create item state cache manager
        context.setItemStateCacheFactory(new ManagedMLRUItemStateCacheFactory(cacheMgr));

        DataStore dataStore = repConfig.getDataStore();
        if (dataStore != null) {
            context.setDataStore(dataStore);
        }

        nodeIdFactory = new NodeIdFactory(repConfig.getHomeDir());
        nodeIdFactory.open();
        context.setNodeIdFactory(nodeIdFactory);

        context.setWorkspaceManager(new WorkspaceManager(this));

        // init workspace configs
        for (WorkspaceConfig config : repConfig.getWorkspaceConfigs()) {
            WorkspaceInfo info = createWorkspaceInfo(config);
            wspInfos.put(config.getName(), info);
        }

        // initialize optional clustering before setting up any other
        // external event source that a cluster node will be interested in
        ClusterNode clusterNode = null;
        if (repConfig.getClusterConfig() != null) {
            clusterNode = createClusterNode();
            context.setClusterNode(clusterNode);
            context.getNamespaceRegistry().setEventChannel(clusterNode);
            context.getNodeTypeRegistry().setEventChannel(clusterNode);
            context.getPrivilegeRegistry().setEventChannel(clusterNode);

            createWorkspaceEventChannel = clusterNode;
            clusterNode.setListener(this);
        }

        // init version manager
        InternalVersionManagerImpl vMgr = createVersionManager(repConfig.getVersioningConfig(),
                delegatingDispatcher);
        context.setInternalVersionManager(vMgr);
        if (clusterNode != null) {
            vMgr.setEventChannel(clusterNode.createUpdateChannel(null));
        }

        // init virtual node type manager
        virtNTMgr = new VirtualNodeTypeStateManager(context.getNodeTypeRegistry(), delegatingDispatcher,
                NODETYPES_NODE_ID, SYSTEM_ROOT_NODE_ID);

        // initialize startup workspaces
        initStartupWorkspaces();

        // initialize system search manager
        getSystemSearchManager(repConfig.getDefaultWorkspaceName());

        // Initialise the security manager;
        initSecurityManager();

        // after the workspace is initialized we pass a system session to
        // the virtual node type manager

        // todo FIXME the *global* virtual node type manager is using a session that is bound to a single specific workspace...
        virtNTMgr.setSession(getSystemSession(repConfig.getDefaultWorkspaceName()));

        // now start cluster node as last step
        if (clusterNode != null) {
            setDescriptor(JACKRABBIT_CLUSTER_ID, repConfig.getClusterConfig().getId());
            try {
                clusterNode.start();
            } catch (ClusterException e) {
                String msg = "Unable to start clustered node, forcing shutdown...";
                log.error(msg, e);
                shutdown();
                throw new RepositoryException(msg, e);
            }
        }

        // amount of time in seconds before an idle workspace is automatically
        // shut down
        int maxIdleTime = repConfig.getWorkspaceMaxIdleTime();
        if (maxIdleTime != 0) {
            // start workspace janitor thread
            Thread wspJanitor = new Thread(new WorkspaceJanitor(maxIdleTime * 1000));
            wspJanitor.setName("WorkspaceJanitor");
            wspJanitor.setPriority(Thread.MIN_PRIORITY);
            wspJanitor.setDaemon(true);
            wspJanitor.start();
        }

        succeeded = true;
        log.info("Repository started (" + (System.currentTimeMillis() - t0) + "ms)");
    } catch (RepositoryException e) {
        log.error("failed to start Repository: " + e.getMessage(), e);
        throw e;
    } finally {
        if (!succeeded) {
            try {
                // repository startup failed, clean up...
                shutdown();
            } catch (Throwable t) {
                // ensure this exception does not overlay the original
                // startup exception and only log it
                log.error("In addition to startup fail, another unexpected problem "
                        + "occurred while shutting down the repository again.", t);
                // Clear the repository lock if it was left in place
                repLock.release();
            }
        }
    }
}

From source file:org.apache.hadoop.hive.metastore.AggregateStatsCache.java

/**
 * Cleans the expired nodes or removes LRU nodes of the cache,
 * until the cache size reduces to cleanUntil% full.
 *//*  w  w  w  .  j  a v a  2  s.  c o m*/
private void spawnCleaner() {
    // This spawns a separate thread to walk through the cache and removes expired nodes.
    // Only one cleaner thread should be running at any point.
    synchronized (this) {
        if (isCleaning) {
            return;
        }
        isCleaning = true;
    }
    Thread cleaner = new Thread("AggregateStatsCache-CleanerThread") {
        @Override
        public void run() {
            numRemovedTTL = 0;
            numRemovedLRU = 0;
            long cleanerStartTime = System.currentTimeMillis();
            LOG.info("AggregateStatsCache is " + getFullPercent() + "% full, with " + getCurrentNodes()
                    + " nodes; starting cleaner thread");
            try {
                Iterator<Map.Entry<Key, AggrColStatsList>> mapIterator = cacheStore.entrySet().iterator();
                while (mapIterator.hasNext()) {
                    Map.Entry<Key, AggrColStatsList> pair = (Map.Entry<Key, AggrColStatsList>) mapIterator
                            .next();
                    AggrColStats node;
                    AggrColStatsList candidateList = (AggrColStatsList) pair.getValue();
                    List<AggrColStats> nodes = candidateList.nodes;
                    if (nodes.size() == 0) {
                        mapIterator.remove();
                        continue;
                    }
                    boolean isLocked = false;
                    try {
                        isLocked = candidateList.writeLock.tryLock(maxWriterWaitTime, TimeUnit.MILLISECONDS);
                        if (isLocked) {
                            for (Iterator<AggrColStats> listIterator = nodes.iterator(); listIterator
                                    .hasNext();) {
                                node = listIterator.next();
                                // Remove the node if it has expired
                                if (isExpired(node)) {
                                    listIterator.remove();
                                    numRemovedTTL++;
                                    currentNodes.getAndDecrement();
                                }
                            }
                        }
                    } catch (InterruptedException e) {
                        LOG.debug(e);
                    } finally {
                        if (isLocked) {
                            candidateList.writeLock.unlock();
                        }
                    }
                    // We want to make sure this runs at a low priority in the background
                    Thread.yield();
                }
                // If the expired nodes did not result in cache being cleanUntil% in size,
                // start removing LRU nodes
                while (getCurrentNodes() / maxCacheNodes > cleanUntil) {
                    evictOneNode();
                }
            } finally {
                isCleaning = false;
                LOG.info("Stopping cleaner thread; AggregateStatsCache is now " + getFullPercent()
                        + "% full, with " + getCurrentNodes() + " nodes");
                LOG.info("Number of expired nodes removed: " + numRemovedTTL);
                LOG.info("Number of LRU nodes removed: " + numRemovedLRU);
                LOG.info("Cleaner ran for: " + (System.currentTimeMillis() - cleanerStartTime) + "ms");
            }
        }
    };
    cleaner.setPriority(Thread.MIN_PRIORITY);
    cleaner.setDaemon(true);
    cleaner.start();
}

From source file:weka.server.WekaServer.java

/**
 * Starts the Jetty server//from   w  ww.  j  a  va 2 s  .c o m
 * 
 * @throws Exception if a problem occurs
 */
protected void startJettyServer() throws Exception {
    // load any persisted scheduled tasks
    loadTasks();

    if (m_jettyServer != null) {
        throw new Exception("Server is already running. Stop it first.");
    }

    if (m_hostname == null) {
        throw new Exception("No hostname has been specified!!");
    }

    weka.core.logging.Logger.log(weka.core.logging.Logger.Level.INFO, "Logging started");

    m_jettyServer = new Server();

    String wekaServerPasswordPath = WekaPackageManager.WEKA_HOME.toString() + File.separator + "server"
            + File.separator + "weka.pwd";
    File wekaServerPasswordFile = new File(wekaServerPasswordPath);
    boolean useAuth = wekaServerPasswordFile.exists();

    SecurityHandler securityHandler = null;
    if (useAuth) {
        System.out.println("[WekaServer] installing security handler");
        Constraint constraint = new Constraint();
        constraint.setName(Constraint.__BASIC_AUTH);
        constraint.setRoles(new String[] { Constraint.ANY_ROLE });
        constraint.setAuthenticate(true);

        ConstraintMapping constraintMapping = new ConstraintMapping();
        constraintMapping.setConstraint(constraint);
        constraintMapping.setPathSpec("/*");

        securityHandler = new SecurityHandler();
        securityHandler.setUserRealm(new HashUserRealm("WekaServer", wekaServerPasswordFile.toString()));
        securityHandler.setConstraintMappings(new ConstraintMapping[] { constraintMapping });

        BufferedReader br = null;
        try {
            br = new BufferedReader(new FileReader(wekaServerPasswordFile));
            String line = null;
            while ((line = br.readLine()) != null) {
                // not a comment character, then assume its the data
                if (!line.startsWith("#")) {
                    String[] parts = line.split(":");
                    if (parts.length > 3 || parts.length < 2) {
                        continue;
                    }
                    m_username = parts[0].trim();
                    m_password = parts[1].trim();
                    if (parts.length == 3 && parts[1].trim().startsWith("OBF")) {
                        m_password = m_password + ":" + parts[2];
                        String deObbs = Password.deobfuscate(m_password);
                        m_password = deObbs;
                    }
                    break;
                }
            }
        } catch (Exception ex) {
            System.err.println("[WekaServer} Error reading password file: " + ex.getMessage());
        } finally {
            if (br != null) {
                br.close();
            }
        }
    }

    // Servlets
    ContextHandlerCollection contexts = new ContextHandlerCollection();

    // Root context
    Context root = new Context(contexts, RootServlet.CONTEXT_PATH, Context.SESSIONS);
    RootServlet rootServlet = new RootServlet(m_taskMap, this);
    root.addServlet(new ServletHolder(rootServlet), "/*");

    // Execute task
    Context executeTask = new Context(contexts, ExecuteTaskServlet.CONTEXT_PATH, Context.SESSIONS);
    executeTask.addServlet(new ServletHolder(new ExecuteTaskServlet(m_taskMap, this)), "/*");

    // Task status
    Context taskStatus = new Context(contexts, GetTaskStatusServlet.CONTEXT_PATH, Context.SESSIONS);
    taskStatus.addServlet(new ServletHolder(new GetTaskStatusServlet(m_taskMap, this)), "/*");

    // Purge task
    Context purgeTask = new Context(contexts, PurgeTaskServlet.CONTEXT_PATH, Context.SESSIONS);
    purgeTask.addServlet(new ServletHolder(new PurgeTaskServlet(m_taskMap, this)), "/*");

    // Add slave
    Context addSlave = new Context(contexts, AddSlaveServlet.CONTEXT_PATH, Context.SESSIONS);
    addSlave.addServlet(new ServletHolder(new AddSlaveServlet(m_taskMap, this)), "/*");

    // Server load factor
    Context loadFactor = new Context(contexts, GetServerLoadServlet.CONTEXT_PATH, Context.SESSIONS);
    loadFactor.addServlet(new ServletHolder(new GetServerLoadServlet(m_taskMap, this)), "/*");

    // Set task status (from slave)
    Context setTaskStatus = new Context(contexts, SetTaskStatusServlet.CONTEXT_PATH, Context.SESSIONS);
    setTaskStatus.addServlet(new ServletHolder(new SetTaskStatusServlet(m_taskMap, this)), "/*");

    // Set last executon for task (from slave)
    Context setLastExecution = new Context(contexts, SetLastExecutionServlet.CONTEXT_PATH, Context.SESSIONS);
    setLastExecution.addServlet(new ServletHolder(new SetLastExecutionServlet(m_taskMap, this)), "/*");

    // Get task list servlet
    Context getTaskList = new Context(contexts, GetTaskListServlet.CONTEXT_PATH, Context.SESSIONS);
    getTaskList.addServlet(new ServletHolder(new GetTaskListServlet(m_taskMap, this)), "/*");

    // Get task result servlet
    Context getTaskResult = new Context(contexts, GetTaskResultServlet.CONTEXT_PATH, Context.SESSIONS);
    getTaskResult.addServlet(new ServletHolder(new GetTaskResultServlet(m_taskMap, this)), "/*");

    // Get schedule servlet
    Context getSchedule = new Context(contexts, GetScheduleServlet.CONTEXT_PATH, Context.SESSIONS);
    getSchedule.addServlet(new ServletHolder(new GetScheduleServlet(m_taskMap, this)), "/*");

    m_jettyServer.setHandlers((securityHandler != null) ? new Handler[] { securityHandler, contexts }
            : new Handler[] { contexts });

    // start execution

    SocketConnector connector = new SocketConnector();
    connector.setPort(m_port);
    connector.setHost(m_hostname);
    connector.setName("WekaServer@" + m_hostname);

    m_jettyServer.setConnectors(new Connector[] { connector });

    m_jettyServer.start();
    startExecutorPool();

    // start a purge thread that purges stale tasks
    Thread purgeThread = new Thread() {
        @Override
        public void run() {
            while (true) {
                purgeTasks(m_staleTime);
                try {
                    Thread.sleep(m_staleTime);
                } catch (InterruptedException ie) {
                }
            }
        }
    };

    if (m_staleTime > 0) {
        System.out.println("[WekaServer] Starting purge thread.");
        purgeThread.setPriority(Thread.MIN_PRIORITY);
        purgeThread.setDaemon(m_daemon);
        purgeThread.start();
    } else {
        System.out.println("[WekaServer] Purge thread disabled.");
    }

    // start a thread for executing scheduled tasks
    Thread scheduleChecker = new Thread() {
        GregorianCalendar m_cal = new GregorianCalendar();

        @Override
        public void run() {
            while (true) {
                List<WekaTaskEntry> tasks = m_taskMap.getTaskList();
                for (WekaTaskEntry t : tasks) {
                    NamedTask task = m_taskMap.getTask(t);
                    if (task instanceof Scheduled
                            && task.getTaskStatus().getExecutionStatus() != TaskStatusInfo.PROCESSING) {
                        // Date lastExecution = m_taskMap.getExecutionTime(t);
                        Date lastExecution = t.getLastExecution();
                        Schedule schedule = ((Scheduled) task).getSchedule();
                        boolean runIt = false;
                        try {
                            runIt = schedule.execute(lastExecution);
                        } catch (Exception ex) {
                            System.err.println("[WekaServer] There is a problem with scheduled task "
                                    + t.toString() + "\n\n" + ex.getMessage());
                        }
                        if (runIt) {
                            System.out.println("[WekaServer] Starting scheduled task " + t.toString());
                            executeTask(t);
                        }
                    }
                }

                try {
                    // check every 60 seconds
                    // wait enough seconds to be on the minute
                    Date now = new Date();
                    m_cal.setTime(now);
                    int seconds = (60 - m_cal.get(Calendar.SECOND));
                    Thread.sleep(seconds * 1000);
                } catch (InterruptedException ie) {
                }
            }
        }
    };

    System.out.println("[WekaServer] Starting schedule checker.");
    scheduleChecker.setPriority(Thread.MIN_PRIORITY);
    scheduleChecker.setDaemon(m_daemon);
    scheduleChecker.start();

    // Register with a master server?
    if (m_master != null && m_master.length() > 0 && m_master.lastIndexOf(":") > 0) {
        registerWithMaster();
    }

    if (!m_daemon) {
        m_jettyServer.join();
    }
}

From source file:org.apache.cocoon.thread.impl.DefaultThreadPool.java

/**
 * DOCUMENT ME!/*from   ww  w .  java2s  .  com*/
 *
 * @param priority
 *                The priority to set as string value.
 *
 * @return The priority as int value.
 */
private int convertPriority(final String priority) {
    if ("MIN".equalsIgnoreCase(priority)) {
        return Thread.MIN_PRIORITY;
    } else if ("NORM".equalsIgnoreCase(priority)) {
        return Thread.NORM_PRIORITY;
    } else if ("MAX".equalsIgnoreCase(priority)) {
        return Thread.MAX_PRIORITY;
    } else {
        logger.warn("Unknown thread priority \"" + priority + "\". Set to \"NORM\".");

        return Thread.NORM_PRIORITY;
    }
}

From source file:com.silentcircle.contacts.calllognew.CallLogAdapter.java

/**
 * Starts a background thread to process contact-lookup requests, unless one
 * has already been started./*from  w w  w.j a  va  2 s  .  c om*/
 */
private synchronized void startRequestProcessing() {
    // For unit-testing.
    if (mRequestProcessingDisabled)
        return;

    // Idempotence... if a thread is already started, don't start another.
    if (mCallerIdThread != null)
        return;

    mCallerIdThread = new QueryThread();
    mCallerIdThread.setPriority(Thread.MIN_PRIORITY);
    mCallerIdThread.start();
}

From source file:lucee.runtime.engine.CFMLEngineImpl.java

private CFMLEngineImpl(CFMLEngineFactory factory, BundleCollection bc) {
    this.factory = factory;
    this.bundleCollection = bc;

    // happen when Lucee is loaded directly
    if (bundleCollection == null) {
        try {//from  ww  w.ja v a 2s .  co m
            Properties prop = InfoImpl.getDefaultProperties(null);

            // read the config from default.properties
            Map<String, Object> config = new HashMap<String, Object>();
            Iterator<Entry<Object, Object>> it = prop.entrySet().iterator();
            Entry<Object, Object> e;
            String k;
            while (it.hasNext()) {
                e = it.next();
                k = (String) e.getKey();
                if (!k.startsWith("org.") && !k.startsWith("felix."))
                    continue;
                config.put(k, CFMLEngineFactorySupport.removeQuotes((String) e.getValue(), true));
            }

            /*/ TODO no idea what is going on, but this is necessary atm
            config.put(
               Constants.FRAMEWORK_SYSTEMPACKAGES,
               "org.w3c.dom,org.w3c.dom.bootstrap,org.w3c.dom.events,org.w3c.dom.ls,org.xml.sax,org.xml.sax.ext,org.xml.sax.helpers,javax.crypto,javax.crypto.spec");
                    
            config.put(
                  Constants.FRAMEWORK_BOOTDELEGATION,
                  "coldfusion,coldfusion.image,coldfusion.runtime,coldfusion.runtime.java,coldfusion.server,coldfusion.sql,org,org.apache,org.apache.axis,org.apache.axis.encoding,org.apache.axis.encoding.ser,org.apache.taglibs,org.apache.taglibs.datetime,org.jfree,org.jfree.chart,org.jfree.chart.block,org.objectweb,org.objectweb.asm,org.opencfml,org.opencfml.cfx,lucee,lucee.commons,lucee.commons.activation,lucee.commons.cli,lucee.commons.collection,lucee.commons.collection.concurrent,lucee.commons.color,lucee.commons.date,lucee.commons.db,lucee.commons.digest,lucee.commons.i18n,lucee.commons.img,lucee.commons.io,lucee.commons.io.auto,lucee.commons.io.cache,lucee.commons.io.compress,lucee.commons.io.ini,lucee.commons.io.log,lucee.commons.io.log.log4j,lucee.commons.io.log.log4j.appender,lucee.commons.io.log.log4j.appender.task,lucee.commons.io.log.log4j.layout,lucee.commons.io.log.sl4j,lucee.commons.io.reader,lucee.commons.io.res,lucee.commons.io.res.filter,lucee.commons.io.res.type,lucee.commons.io.res.type.cache,lucee.commons.io.res.type.cfml,lucee.commons.io.res.type.compress,lucee.commons.io.res.type.datasource,lucee.commons.io.res.type.datasource.core,lucee.commons.io.res.type.file,lucee.commons.io.res.type.ftp,lucee.commons.io.res.type.http,lucee.commons.io.res.type.ram,lucee.commons.io.res.type.s3,lucee.commons.io.res.type.tar,lucee.commons.io.res.type.tgz,lucee.commons.io.res.type.zip,lucee.commons.io.res.util,lucee.commons.io.retirement,lucee.commons.lang,lucee.commons.lang.font,lucee.commons.lang.lock,lucee.commons.lang.mimetype,lucee.commons.lang.types,lucee.commons.lock,lucee.commons.lock.rw,lucee.commons.management,lucee.commons.math,lucee.commons.net,lucee.commons.net.http,lucee.commons.net.http.httpclient3,lucee.commons.net.http.httpclient3.entity,lucee.commons.net.http.httpclient4,lucee.commons.net.http.httpclient4.entity,lucee.commons.pdf,lucee.commons.res,lucee.commons.res.io,lucee.commons.res.io.filter,lucee.commons.security,lucee.commons.sql,lucee.commons.surveillance,lucee.commons.util,lucee.deployer,lucee.deployer.filter,lucee.intergral,lucee.intergral.fusiondebug,lucee.intergral.fusiondebug.server,lucee.intergral.fusiondebug.server.type,lucee.intergral.fusiondebug.server.type.coll,lucee.intergral.fusiondebug.server.type.nat,lucee.intergral.fusiondebug.server.type.qry,lucee.intergral.fusiondebug.server.type.simple,lucee.intergral.fusiondebug.server.util,lucee.runtime,lucee.runtime.cache,lucee.runtime.cache.eh,lucee.runtime.cache.eh.remote,lucee.runtime.cache.eh.remote.rest,lucee.runtime.cache.eh.remote.rest.sax,lucee.runtime.cache.eh.remote.soap,lucee.runtime.cache.legacy,lucee.runtime.cache.ram,lucee.runtime.cache.tag,lucee.runtime.cache.tag.include,lucee.runtime.cache.tag.query,lucee.runtime.cache.tag.request,lucee.runtime.cache.tag.smart,lucee.runtime.cache.tag.timespan,lucee.runtime.cache.tag.udf,lucee.runtime.cache.util,lucee.runtime.cfx,lucee.runtime.cfx.customtag,lucee.runtime.chart,lucee.runtime.coder,lucee.runtime.com,lucee.runtime.compiler,lucee.runtime.component,lucee.runtime.concurrency,lucee.runtime.config,lucee.runtime.config.ajax,lucee.runtime.config.component,lucee.runtime.converter,lucee.runtime.converter.bin,lucee.runtime.crypt,lucee.runtime.customtag,lucee.runtime.db,lucee.runtime.db.driver,lucee.runtime.db.driver.state,lucee.runtime.debug,lucee.runtime.debug.filter,lucee.runtime.dump,lucee.runtime.engine,lucee.runtime.err,lucee.runtime.exp,lucee.runtime.ext,lucee.runtime.ext.tag,lucee.runtime.extension,lucee.runtime.flash,lucee.runtime.format,lucee.runtime.functions,lucee.runtime.functions.arrays,lucee.runtime.functions.cache,lucee.runtime.functions.closure,lucee.runtime.functions.component,lucee.runtime.functions.conversion,lucee.runtime.functions.csrf,lucee.runtime.functions.dateTime,lucee.runtime.functions.decision,lucee.runtime.functions.displayFormatting,lucee.runtime.functions.dynamicEvaluation,lucee.runtime.functions.file,lucee.runtime.functions.gateway,lucee.runtime.functions.image,lucee.runtime.functions.international,lucee.runtime.functions.list,lucee.runtime.functions.math,lucee.runtime.functions.orm,lucee.runtime.functions.other,lucee.runtime.functions.owasp,lucee.runtime.functions.poi,lucee.runtime.functions.query,lucee.runtime.functions.rest,lucee.runtime.functions.s3,lucee.runtime.functions.string,lucee.runtime.functions.struct,lucee.runtime.functions.system,lucee.runtime.functions.video,lucee.runtime.functions.xml,lucee.runtime.gateway,lucee.runtime.gateway.proxy,lucee.runtime.helpers,lucee.runtime.i18n,lucee.runtime.img,lucee.runtime.img.coder,lucee.runtime.img.composite,lucee.runtime.img.filter,lucee.runtime.img.gif,lucee.runtime.img.interpolation,lucee.runtime.img.math,lucee.runtime.img.vecmath,lucee.runtime.instrumentation,lucee.runtime.interpreter,lucee.runtime.interpreter.ref,lucee.runtime.interpreter.ref.cast,lucee.runtime.interpreter.ref.func,lucee.runtime.interpreter.ref.literal,lucee.runtime.interpreter.ref.op,lucee.runtime.interpreter.ref.util,lucee.runtime.interpreter.ref.var,lucee.runtime.java,lucee.runtime.listener,lucee.runtime.lock,lucee.runtime.monitor,lucee.runtime.net,lucee.runtime.net.amf,lucee.runtime.net.ftp,lucee.runtime.net.http,lucee.runtime.net.imap,lucee.runtime.net.ipsettings,lucee.runtime.net.ldap,lucee.runtime.net.mail,lucee.runtime.net.ntp,lucee.runtime.net.pop,lucee.runtime.net.proxy,lucee.runtime.net.rpc,lucee.runtime.net.rpc.client,lucee.runtime.net.rpc.server,lucee.runtime.net.s3,lucee.runtime.net.smtp,lucee.runtime.op,lucee.runtime.op.date,lucee.runtime.op.validators,lucee.runtime.orm,lucee.runtime.osgi,lucee.runtime.poi,lucee.runtime.query,lucee.runtime.query.caster,lucee.runtime.reflection,lucee.runtime.reflection.pairs,lucee.runtime.reflection.storage,lucee.runtime.regex,lucee.runtime.registry,lucee.runtime.rest,lucee.runtime.rest.path,lucee.runtime.schedule,lucee.runtime.search,lucee.runtime.search.lucene2,lucee.runtime.search.lucene2.analyzer,lucee.runtime.search.lucene2.docs,lucee.runtime.search.lucene2.highlight,lucee.runtime.search.lucene2.html,lucee.runtime.search.lucene2.net,lucee.runtime.search.lucene2.query,lucee.runtime.security,lucee.runtime.services,lucee.runtime.spooler,lucee.runtime.spooler.mail,lucee.runtime.spooler.remote,lucee.runtime.spooler.test,lucee.runtime.sql,lucee.runtime.sql.exp,lucee.runtime.sql.exp.op,lucee.runtime.sql.exp.value,lucee.runtime.sql.old,lucee.runtime.tag,lucee.runtime.tag.util,lucee.runtime.text,lucee.runtime.text.csv,lucee.runtime.text.feed,lucee.runtime.text.pdf,lucee.runtime.text.xml,lucee.runtime.text.xml.storage,lucee.runtime.text.xml.struct,lucee.runtime.thread,lucee.runtime.timer,lucee.runtime.type,lucee.runtime.type.cfc,lucee.runtime.type.comparator,lucee.runtime.type.dt,lucee.runtime.type.it,lucee.runtime.type.query,lucee.runtime.type.ref,lucee.runtime.type.scope,lucee.runtime.type.scope.client,lucee.runtime.type.scope.session,lucee.runtime.type.scope.storage,lucee.runtime.type.scope.storage.clean,lucee.runtime.type.scope.storage.db,lucee.runtime.type.scope.util,lucee.runtime.type.sql,lucee.runtime.type.trace,lucee.runtime.type.util,lucee.runtime.type.wrap,lucee.runtime.user,lucee.runtime.util,lucee.runtime.util.pool,lucee.runtime.video,lucee.runtime.vm,lucee.runtime.writer,lucee.servlet,lucee.servlet.pic,lucee.transformer,lucee.transformer.bytecode,lucee.transformer.bytecode.cast,lucee.transformer.bytecode.expression,lucee.transformer.bytecode.expression.type,lucee.transformer.bytecode.expression.var,lucee.transformer.bytecode.literal,lucee.transformer.bytecode.op,lucee.transformer.bytecode.reflection,lucee.transformer.bytecode.statement,lucee.transformer.bytecode.statement.tag,lucee.transformer.bytecode.statement.udf,lucee.transformer.bytecode.util,lucee.transformer.bytecode.visitor,lucee.transformer.cfml,lucee.transformer.cfml.attributes,lucee.transformer.cfml.attributes.impl,lucee.transformer.cfml.evaluator,lucee.transformer.cfml.evaluator.func,lucee.transformer.cfml.evaluator.func.impl,lucee.transformer.cfml.evaluator.impl,lucee.transformer.cfml.expression,lucee.transformer.cfml.script,lucee.transformer.cfml.tag,lucee.transformer.expression,lucee.transformer.expression.literal,lucee.transformer.expression.var,lucee.transformer.library,lucee.transformer.library.function,lucee.transformer.library.tag,lucee.transformer.util");
            */
            config.put(Constants.FRAMEWORK_BOOTDELEGATION, "lucee.*");

            Felix felix = factory.getFelix(factory.getResourceRoot(), config);

            bundleCollection = new BundleCollection(felix, felix, null);
            //bundleContext=bundleCollection.getBundleContext();
        } catch (Throwable t) {
            throw new RuntimeException(t);
        }
    }

    this.info = new InfoImpl(bundleCollection == null ? null : bundleCollection.core);
    Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader()); // MUST better location for this

    CFMLEngineFactory.registerInstance((this));// patch, not really good but it works
    ConfigServerImpl cs = getConfigServerImpl();

    // start the controler
    SystemOut.printDate(SystemUtil.getPrintWriter(SystemUtil.OUT), "Start CFML Controller");
    Controler controler = new Controler(cs, initContextes, 5 * 1000, controlerState);
    controler.setDaemon(true);
    controler.setPriority(Thread.MIN_PRIORITY);
    controler.start();

    // install extension defined
    String extensionIds = System.getProperty("lucee-extensions");
    if (!StringUtil.isEmpty(extensionIds, true)) {
        Log log = cs.getLog("deploy", true);
        String[] ids = lucee.runtime.type.util.ListUtil.listToStringArray(extensionIds, ';');
        String id;
        for (int i = 0; i < ids.length; i++) {
            id = ids[i].trim();
            if (StringUtil.isEmpty(id, true))
                continue;
            DeployHandler.deployExtension(cs, id, log);
        }
    }

    //print.e(System.getProperties());

    touchMonitor(cs);
    this.uptime = System.currentTimeMillis();
    //this.config=config; 
}

From source file:lucee.runtime.engine.CFMLEngineImpl.java

public void touchMonitor(ConfigServerImpl cs) {
    if (monitor != null && monitor.isAlive())
        return;/*ww  w  .j a  v a 2s  . c o m*/
    monitor = new Monitor(cs, controlerState);
    monitor.setDaemon(true);
    monitor.setPriority(Thread.MIN_PRIORITY);
    monitor.start();
}

From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java

/**
 * initializes the Object set of this hash table.
 * // w ww . ja va  2s  . c om
 * @param initialCapacity
 *            an <code>int</code> value
 * @return an <code>int</code> value
 * @throws HashtableFullException
 * @throws FileNotFoundException
 */
public long setUp() throws Exception {
    File _fs = new File(fileName);
    if (!_fs.getParentFile().exists()) {
        _fs.getParentFile().mkdirs();
    }
    SDFSLogger.getLog().info("Folder = " + _fs.getPath());
    SDFSLogger.getLog().info("Loading freebits bitset");
    long rsz = 0;
    this.setMaxSize(maxSz);
    File[] files = _fs.getParentFile().listFiles(new DBFileFilter());
    if (files.length > 0) {
        CommandLineProgressBar bar = new CommandLineProgressBar("Loading Existing Hash Tables", files.length,
                System.out);
        this.loadEvent.maxCt = files.length + 128;

        for (int i = 0; i < files.length; i++) {
            this.loadEvent.curCt = this.loadEvent.curCt + 1;
            int sz = NextPrime.getNextPrimeI((int) (this.hashTblSz));
            // SDFSLogger.getLog().debug("will create byte array of size "
            // + sz + " propsize was " + propsize);
            ProgressiveFileByteArrayLongMap m = null;
            String pth = files[i].getPath();
            String pfx = pth.substring(0, pth.length() - 5);
            m = new ProgressiveFileByteArrayLongMap(pfx, sz);
            long mep = m.setUp();
            if (mep > endPos)
                endPos = mep;
            maps.add(m);
            rsz = rsz + m.size();
            bar.update(i);
            if (!m.isFull() && this.activeWriteMaps.size() < AMS) {
                m.activate();
                this.activeWriteMaps.add(m);
            } else {
                m.inActive();
                m.full = true;
            }
        }
        bar.finish();
    }

    this.loadEvent.shortMsg = "Loading BloomFilters";

    if (maps.size() != 0 && !LargeBloomFilter.exists(_fs.getParentFile())) {
        lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .01, true, true, false);
        SDFSLogger.getLog().warn("Recreating BloomFilters...");
        this.loadEvent.shortMsg = "Recreating BloomFilters";

        executor = new ThreadPoolExecutor(Main.writeThreads, Main.writeThreads, 10, TimeUnit.SECONDS,
                worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
        CommandLineProgressBar bar = new CommandLineProgressBar("ReCreating BloomFilters", maps.size(),
                System.out);
        Iterator<AbstractShard> iter = maps.iterator();
        int i = 0;
        ArrayList<LBFReconstructThread> al = new ArrayList<LBFReconstructThread>();
        while (iter.hasNext()) {
            AbstractShard m = iter.next();
            LBFReconstructThread th = new LBFReconstructThread(lbf, m);
            executor.execute(th);
            al.add(th);
            i++;
            bar.update(i);
        }
        executor.shutdown();
        bar.finish();
        try {
            System.out.print("Waiting for all BloomFilters creation threads to finish");
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
                System.out.print(".");

            }
            for (LBFReconstructThread th : al) {
                if (th.ex != null)
                    throw th.ex;
            }
            System.out.println(" done");
        } catch (Exception e1) {
            throw new IOException(e1);
        }
    } else {
        lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .01, true, true, false);
    }
    while (this.activeWriteMaps.size() < AMS) {
        boolean written = false;
        while (!written) {
            String guid = RandomGUID.getGuid();

            File f = new File(fileName + "-" + guid + ".keys");
            if (!f.exists()) {
                ProgressiveFileByteArrayLongMap activeWMap = new ProgressiveFileByteArrayLongMap(
                        fileName + "-" + guid, this.hashTblSz);
                activeWMap.activate();
                activeWMap.setUp();

                this.maps.add(activeWMap);
                written = true;

                this.activeWriteMaps.add(activeWMap);
            }
        }
    }
    this.loadEvent.endEvent("Loaded entries " + rsz);
    System.out.println("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Loading BloomFilters " + rsz);
    this.kSz.set(rsz);
    this.closed = false;
    return size;
}