Example usage for java.lang ThreadGroup ThreadGroup

List of usage examples for java.lang ThreadGroup ThreadGroup

Introduction

In this page you can find the example usage for java.lang ThreadGroup ThreadGroup.

Prototype

public ThreadGroup(String name) 

Source Link

Document

Constructs a new thread group.

Usage

From source file:com.ibm.jaggr.core.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk.// w  w w  .  ja v a  2s. com
 *
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization
 *            resources that are check on every server restart
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    final String sourceMethod = "<ctor>"; //$NON-NLS-1$
    boolean isTraceLogging = log.isLoggable(Level.FINER);
    if (isTraceLogging) {
        log.entering(DepTree.class.getName(), sourceMethod,
                new Object[] { paths, aggregator, stamp, clean, validateDeps });
    }
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();
    cacheBust = AggregatorUtil.getCacheBust(aggregator);

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                if (isTraceLogging) {
                    log.finer("Attempting to read cached dependencies from " + cacheFile.toString()); //$NON-NLS-1$
                }
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (isTraceLogging) {
                log.finer("Current cacheBust = " + cacheBust + ", cached cacheBust = " + cached.cacheBust); //$NON-NLS-1$//$NON-NLS-2$
            }
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
        if (cached != null && !StringUtils.equals(rawConfig, cached.rawConfig)) {
            if (isTraceLogging) {
                log.finer("Current config = " + rawConfig); //$NON-NLS-1$
                log.finer("Cached config = " + cached.rawConfig); //$NON-NLS-1$
            }
            validateDeps = true;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && !validateDeps && !clean) {
        depMap = cached.depMap;
        fromCache = true;
        return;
    } else if (isTraceLogging) {
        log.finer("Building/validating deps: cached = " + cached + ", validateDeps = " + validateDeps //$NON-NLS-1$//$NON-NLS-2$
                + ", clean = " + clean); //$NON-NLS-1$
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    Set<String> nonJSExtensions = Collections.unmodifiableSet(getNonJSExtensions(aggregator));
    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode("", path); //$NON-NLS-1$
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode, nonJSExtensions));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (folder nodes with no children)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        if (isTraceLogging) {
            log.finer("Writing cached dependencies to " + cacheFile.toString()); //$NON-NLS-1$
        }
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    if (isTraceLogging) {
        log.exiting(DepTree.class.getName(), sourceMethod);
    }
}

From source file:axiom.framework.core.Application.java

/**
 * Build an application with the given name, server instance, sources and
 * db directory./*from  w  w  w. ja  va 2  s.  com*/
 */
public Application(String name, Server server, Repository[] repositories, File customAppDir)
        throws RemoteException, IllegalArgumentException, Exception {
    if ((name == null) || (name.trim().length() == 0)) {
        throw new IllegalArgumentException("Invalid application name: " + name);
    }

    this.name = name;

    this.server = server;

    appDir = customAppDir;

    // system-wide properties, default to null
    ResourceProperties sysProps;

    // system-wide properties, default to null
    ResourceProperties sysDbProps;

    sysProps = sysDbProps = null;
    axiomHome = null;

    if (server != null) {
        axiomHome = server.getAxiomHome();

        // get system-wide properties
        sysProps = server.getProperties();
        sysDbProps = server.getDbProperties();
    }

    // give the Thread group a name so the threads can be recognized
    threadgroup = new ThreadGroup("TX-" + name);

    this.repositories = new ArrayList<Repository>();
    try {
        // assume that the appdir is, in fact, a directory...
        Repository newRepository = new FileRepository(appDir);
        this.repositories.add(newRepository);
    } catch (Exception ex) {
        System.out.println("Adding application directory " + appDir + " failed. "
                + "Will not use that repository. Check your initArgs!");
    }

    // create app-level properties
    props = new ResourceProperties(this, "app.properties", sysProps);

    if (repositories == null) {
        repositories = this.initRepositories();
    }
    if (repositories.length == 0) {
        throw new java.lang.IllegalArgumentException("No sources defined for application: " + name);
    }

    this.repositories.addAll(Arrays.asList(repositories));
    resourceComparator = new ResourceComparator(this);

    if (appDir == null) {
        if (repositories[0] instanceof FileRepository) {
            appDir = new File(repositories[0].getName());
            SampleApp sa = new SampleApp();
            sa.setupSampleApp(appDir);
        }
    }

    String dbdir = props.getProperty("dbdir");
    if (dbdir != null) {
        dbDir = new File(dbdir);
        if (!dbDir.isAbsolute()) {
            dbDir = new File(server.getAxiomHome(), dbdir);
        }
    } else {
        dbDir = new File(server.getDbHome(), name);
    }
    if (!dbDir.exists()) {
        dbDir.mkdirs();
    }

    updateDbLocation(name);

    this.cookieDomain = props.getProperty("cookieDomain", "");
    this.staticMountpoint = props.getProperty("staticMountpoint",
            props.getProperty("mountpoint", "/" + this.name) + "/static");

    this.rewriteRules = setupRewriteRules();

    // get log names
    accessLogName = props.getProperty("accessLog",
            new StringBuffer("axiom.").append(name).append(".access").toString());
    eventLogName = props.getProperty("eventLog", new StringBuffer("axiom.").append(name).toString());
    errorLogName = props.getProperty("errorLog");
    requestLogName = props.getProperty("requestLog",
            new StringBuffer("axiom.").append(name).append(".request.log").toString());
    if (!requestLogName.endsWith(".log")) {
        requestLogName += ".log";
    }

    // insert xml declarations into rendered tal?
    omitXmlDecl = props.containsKey("omitxmldeclaration")
            ? (new Boolean((String) props.get("omitXmlDeclaration"))).booleanValue()
            : true;

    ResourceProperties dhprops = props.getSubProperties("draftHost.");
    int count = 1;
    final int dhpropsSize = dhprops.size();
    for (; count <= dhpropsSize; count++) {
        String dhvalue = dhprops.getProperty("" + count);
        if (dhvalue != null) {
            String[] dhvalues = dhvalue.split(",");
            for (int j = 0; j < dhvalues.length; j++) {
                this.draftHosts.put(dhvalues[j].trim(), new Integer(count));
            }
        }
    }
    this.highestPreviewLayer = dhpropsSize == 0 ? 0 : count;

    // create app-level db sources
    dbProps = new ResourceProperties(this, "db.properties", sysDbProps, false);

    setupDefaultDb(dbProps);

    searchProps = new ResourceProperties(this, "search.properties", null, false);

    // reads in and creates a transaction manager properties file for this app 
    try {
        this.tsource = new TransSource(this, dbProps.getSubProperties("_default."));
    } catch (Exception ex) {
        throw new IllegalArgumentException(
                "Could not create the transaction database source: " + ex.getMessage());
    }

    // the properties that map java class names to prototype names
    classMapping = new ResourceProperties(this, "class.properties");
    classMapping.setIgnoreCase(false);

    // get class name of root object if defined. Otherwise native Axiom objectmodel will be used.
    rootObjectClass = classMapping.getProperty("root");

    onstartFunctions = new LinkedHashSet<String>();
    updateProperties();

    dbSources = new Hashtable<String, DbSource>();

    cachenode = new TransientNode("app");

    ArrayList<String> names = this.getDbNames();
    for (int i = 0; i < names.size(); i++) {
        String dbname = names.get(i).toString();
        DbSource dbsource = this.getDbSource(dbname);
        String initClass = dbsource.getProperty("initClass", null);
        if (initClass != null) {
            Class[] parameters = { Application.class };
            IDBSourceInitializer dbsi = (IDBSourceInitializer) Class.forName(initClass)
                    .getConstructor(parameters).newInstance(new Object[] { this });
            dbsi.init();
        }
    }
}

From source file:org.cloudata.core.tabletserver.TabletServer.java

public void init(CloudataConf conf) throws IOException {
    this.serverStartTime = new Date();
    this.testMode = conf.getBoolean("testmode", false);
    this.conf = conf;

    this.maxMajorCompactionThread = this.conf.getInt("tabletServer.maxMajorCompactionThread", 5);
    this.maxSplitThread = this.conf.getInt("tabletServer.maxSplitThread", 5);

    this.compactionExecutor = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxMajorCompactionThread);
    this.splitExecutor = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxSplitThread);
    this.actionExecutor = (ThreadPoolExecutor) Executors
            .newFixedThreadPool(this.conf.getInt("tabletServer.maxMinorCompactionThread", 10));

    this.maxTabletCount = conf.getInt("tabletServer.max.tablet.count", 2000);

    this.maxResultRecord = conf.getInt("client.max.resultRecord", 5000);

    this.maxMemoryCacheCapacity = conf.getLong("memory.maxColumnCacheCapacity", 200) * 1024 * 1024;

    this.fs = CloudataFileSystem.get(conf);

    if (fs == null || !fs.isReady()) {
        LOG.fatal("FileSystem is not ready. TabletServer shutdown");
        shutdown();/*  w w  w  .  j a va  2  s  .c  o  m*/
    }

    InetSocketAddress serverAddress = NetworkUtil.getAddress(
            InetAddress.getLocalHost().getHostName() + ":" + conf.getInt("tabletServer.port", 7001));

    this.hostName = serverAddress.getHostName() + ":" + serverAddress.getPort();

    this.threadGroup = new ThreadGroup("TabletServer_" + hostName);

    this.leaseHolder = new LeaseHolder(threadGroup);

    this.tabletServerLockPath = Constants.SERVER + "/" + hostName;

    this.zk = LockUtil.getZooKeeper(conf, hostName, this);

    //<Split Lock >
    try {
        LockUtil.delete(zk, LockUtil.getZKPath(conf, Constants.TABLETSERVER_SPLIT + "/" + hostName), true);
    } catch (Exception e) {
        throw new IOException(e);
    }
    //</Split Lock >

    schemaMap = new TableSchemaMap(conf, zk);

    tabletServerMetrics = new TabletServerMetrics(conf, this);

    this.server = CRPC.getServer(zk, this, serverAddress.getHostName(), serverAddress.getPort(),
            conf.getInt("tabletServer.handler.count", 10), false, conf, tabletServerMetrics);

    ServerSocket ss = null;
    int port = conf.getInt("tabletServer.scanner.port", 50100);
    String bindAddress = "0.0.0.0";

    try {
        //      LOG.info("Opened Scanner Handler at " + hostName  + ", port=" + port);
        ss = new ServerSocket(port, 0, InetAddress.getByName(bindAddress));
        ss.setReuseAddress(true);
    } catch (IOException ie) {
        LOG.error("Could not open scanner server at " + port + ", stop server and Stop tablet server", ie);
        exit();
    }
    this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));

    try {
        LockUtil.createNodes(zk, LockUtil.getZKPath(conf, tabletServerLockPath), hostName.getBytes(),
                CreateMode.EPHEMERAL);
        LOG.info("TableServer lock created:" + LockUtil.getZKPath(conf, tabletServerLockPath));
    } catch (Exception e) {
        LOG.fatal("TabletServer stopped. Can't server lock:" + tabletServerLockPath, e);
        exit();
    }

    if (tabletDistributionMode.get()) {
        LOG.info("Turn on tablet distribution mode");
    }

    heartbeatThread = new HeartbeatThread();
    heartbeatThread.setDaemon(true);
    heartbeatThread.start();
}

From source file:net.sf.jhylafax.JHylaFAX.java

/**
 * @param args//from w w  w.ja va2  s. c o m
 */
public static void main(final String[] args) {
    final ArgsHandler handler = new ArgsHandler();
    handler.evaluate(args);

    evaluateArgumentsPreVisible(handler);

    ThreadGroup tg = new ThreadGroup("JHylaFAXThreadGroup") {
        public void uncaughtException(Thread t, Throwable e) {
            e.printStackTrace();
        }
    };

    //      System.setProperty("sun.awt.exception.handler", 
    //                     "xnap.util.XNapAWTExceptionHandler");
    Thread mainRunner = new Thread(tg, "JHylaFAXMain") {
        public void run() {
            setContextClassLoader(JHylaFAX.class.getClassLoader());

            JHylaFAX app = new JHylaFAX();
            app.setVisible(true);

            app.evaluateArgumentsPostVisible(handler);
        }
    };
    mainRunner.start();
}

From source file:org.apache.axis2.transport.jms.JMSListener.java

/**
 * Start this JMS Listener (Transport Listener)
 *
 * @throws AxisFault/*from w  w w.j  a  v  a2  s . c  o m*/
 */
public void start() throws AxisFault {
    // create thread pool of workers
    workerPool = new ThreadPoolExecutor(1, WORKERS_MAX_THREADS, WORKER_KEEP_ALIVE, TIME_UNIT,
            new LinkedBlockingQueue(), new org.apache.axis2.util.threadpool.DefaultThreadFactory(
                    new ThreadGroup("JMS Worker thread group"), "JMSWorker"));

    Iterator iter = connectionFactories.values().iterator();
    while (iter.hasNext()) {
        JMSConnectionFactory conFac = (JMSConnectionFactory) iter.next();
        JMSMessageReceiver msgRcvr = new JMSMessageReceiver(conFac, workerPool, configCtx);

        try {
            conFac.listen(msgRcvr);
        } catch (JMSException e) {
            handleException("Error starting connection factory : " + conFac.getName(), e);
        }
    }
}

From source file:CachedThread.java

/**
 * Create a thread cache, after creating a new thread group.
 * @param name The name of the thread group to create.
 *//* w w w  .j  a v a 2 s.  com*/

public ThreadCache(String name) {
    this(new ThreadGroup(name));
}

From source file:org.quartz.core.QuartzScheduler.java

/**
 * <p>/*from  w  w  w.j  a v  a 2s  .c om*/
 * Returns the name of the <code>QuartzScheduler</code>.
 * </p>
 */
public ThreadGroup getSchedulerThreadGroup() {
    if (threadGroup == null) {
        threadGroup = new ThreadGroup("QuartzScheduler:" + getSchedulerName());
        if (resources.getMakeSchedulerThreadDaemon()) {
            threadGroup.setDaemon(true);
        }
    }

    return threadGroup;
}

From source file:org.alfresco.repo.model.filefolder.FileFolderPerformanceTester.java

/**
 * Creates <code>folderCount</code> folders below the given parent and populates each folder with
 * <code>fileCount</code> files.  The folders will be created as siblings in one go, but the files
 * are added one to each folder until each folder has the presribed number of files within it.
 * This can therefore be used to test the performance when the L2 cache sizes are exceeded.
 * <p>/*from   w  ww  .j  a  v  a  2 s. c o m*/
 * Each creation (file or folder) uses the <b>PROPAGATION REQUIRED</b> transaction declaration.
 * 
 * @param parentNodeRef         the level zero parent
 * @param threadCount
 * @param randomOrder           true if each thread must put the children into the folders in a random order
 * @param realFile              <tt>true</tt> if a real binary must be streamed into the node
 * @return Returns the average time (ms) to create the <b>files only</b>
 * @param batchCount
 * @param filesPerBatch
 * @param dumpPoints
 */
private void buildStructure(final NodeRef parentNodeRef, final int threadCount, final boolean randomOrder,
        final int folderCount, final int batchCount, final int filesPerBatch, final boolean realFile,
        final double[] dumpPoints) {
    RetryingTransactionCallback<NodeRef[]> createFoldersCallback = new RetryingTransactionCallback<NodeRef[]>() {
        public NodeRef[] execute() throws Exception {
            NodeRef[] folders = new NodeRef[folderCount];
            for (int i = 0; i < folderCount; i++) {
                FileInfo folderInfo = fileFolderService.create(parentNodeRef, GUID.generate(),
                        ContentModel.TYPE_FOLDER);
                // keep the reference
                folders[i] = folderInfo.getNodeRef();
            }
            return folders;
        }
    };
    final NodeRef[] folders = retryingTransactionHelper.doInTransaction(createFoldersCallback);
    // the worker that will load the files into the folders
    Runnable runnable = new Runnable() {
        private long start;

        public void run() {
            // authenticate
            authenticate(USERNAME);

            // progress around the folders until they have been populated
            start = System.currentTimeMillis();
            int nextDumpNumber = 0;
            for (int i = 0; i < batchCount; i++) {
                // must we dump results
                double completedCount = (double) i;
                double nextDumpCount = (dumpPoints == null || dumpPoints.length == 0
                        || nextDumpNumber >= dumpPoints.length) ? -1.0
                                : (double) batchCount * dumpPoints[nextDumpNumber];
                if ((nextDumpCount - 0.5) < completedCount && completedCount < (nextDumpCount + 0.5)) {
                    dumpResults(i);
                    nextDumpNumber++;
                }
                // shuffle folders if required
                List<NodeRef> foldersList = Arrays.asList(folders);
                if (randomOrder) {
                    // shuffle folder list
                    Collections.shuffle(foldersList);
                }
                for (int j = 0; j < folders.length; j++) {
                    final NodeRef folderRef = folders[j];
                    RetryingTransactionCallback<Void> createFileCallback = new RetryingTransactionCallback<Void>() {
                        public Void execute() throws Exception {
                            for (int i = 0; i < filesPerBatch; i++) {
                                FileInfo fileInfo = fileFolderService.create(folderRef, GUID.generate(),
                                        ContentModel.TYPE_CONTENT);
                                NodeRef nodeRef = fileInfo.getNodeRef();
                                if (realFile) {
                                    // write the content
                                    ContentWriter writer = fileFolderService.getWriter(nodeRef);
                                    writer.setEncoding("UTF-8");
                                    writer.setMimetype(MimetypeMap.MIMETYPE_TEXT_PLAIN);
                                    writer.putContent(dataFile);
                                } else {
                                    // Spoof some content
                                    String contentUrl = SpoofedTextContentReader.createContentUrl(
                                            Locale.ENGLISH, (long) Math.random() * 1000L,
                                            (long) Math.random() * 1024L);
                                    SpoofedTextContentReader reader = new SpoofedTextContentReader(contentUrl);
                                    ContentData contentData = reader.getContentData();
                                    nodeService.setProperty(nodeRef, ContentModel.PROP_CONTENT, contentData);
                                }
                            }
                            // done
                            return null;
                        }
                    };
                    retryingTransactionHelper.doInTransaction(createFileCallback);
                }
            }
            dumpResults(batchCount);
        }

        private void dumpResults(int currentBatchCount) {
            long end = System.currentTimeMillis();
            long time = (end - start);
            double average = (double) time / (double) (folderCount * currentBatchCount * filesPerBatch);
            double percentComplete = (double) currentBatchCount / (double) batchCount * 100.0;

            if (percentComplete > 0) {
                System.out.println("\n" + "[" + Thread.currentThread().getName() + "] \n" + "   Created "
                        + (currentBatchCount * filesPerBatch) + " files in each of " + folderCount
                        + " folders (" + (randomOrder ? "shuffled" : "in order") + ")" + " with "
                        + (realFile ? "real files" : "spoofed content") + " :\n" + "   Progress: "
                        + String.format("%9.2f", percentComplete) + " percent complete \n" + "   Average: "
                        + String.format("%10.2f", average) + " ms per file \n" + "   Average: "
                        + String.format("%10.2f", 1000.0 / average) + " files per second");
            }
        }
    };

    // kick off the required number of threads
    System.out.println("\n" + "Starting " + threadCount + " threads loading " + (batchCount * filesPerBatch)
            + " files in each of " + folderCount + " folders (" + (randomOrder ? "shuffled" : "in order")
            + (filesPerBatch > 1 ? (" and " + filesPerBatch + " files per txn") : "") + ").");
    ThreadGroup threadGroup = new ThreadGroup(getName());
    Thread[] threads = new Thread[threadCount];
    for (int i = 0; i < threadCount; i++) {
        threads[i] = new Thread(threadGroup, runnable, String.format("FileLoader-%02d", i));
        threads[i].start();
    }
    // join each thread so that we wait for them all to finish
    for (int i = 0; i < threads.length; i++) {
        try {
            threads[i].join();
        } catch (InterruptedException e) {
            // not too serious - the worker threads are non-daemon
        }
    }
}

From source file:org.apache.hadoop.dfs.DataNode.java

/**
 * This method starts the data node with the specified conf.
 * //from   w w w  .  ja  v  a2s .co m
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs) throws IOException {
    // use configured nameserver & interface to get local hostname
    if (conf.get("slave.host.name") != null) {
        machineName = conf.get("slave.host.name");
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);

    this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
    this.socketTimeout = conf.getInt("dfs.socket.timeout", FSConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", FSConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024);
    String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port",
            "dfs.datanode.address");
    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID,
            nameNodeAddr, conf);
    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set("StorageId", dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils
                    .newInstance(Class.forName("org.apache.hadoop.dfs.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // find free port
    ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
    Server.bind(ss, socAddr, 0);
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    ss.setSoTimeout(conf.getInt("dfs.dataXceiver.timeoutInMS", 30000)); //30s
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT);
    this.threadGroup = new ThreadGroup("dataXceiveServer");
    this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
    DataNode.nameNodeAddr = nameNodeAddr;

    this.balancingThrottler = new BlockBalanceThrottler(
            conf.getLong("dfs.balance.bandwidthPerSec", 1024L * 1024));

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    String infoAddr = NetUtils.getServerAddress(conf, "dfs.datanode.info.bindAddress", "dfs.datanode.info.port",
            "dfs.datanode.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new StatusHttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0);
    InetSocketAddress secInfoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
    Configuration sslConf = new Configuration(conf);
    sslConf.addResource(conf.get("https.keystore.info.rsrc", "sslinfo.xml"));
    String keyloc = sslConf.get("https.keystore.location");
    if (null != keyloc) {
        this.infoServer.addSslListener(secInfoSocAddr, keyloc, sslConf.get("https.keystore.password", ""),
                sslConf.get("https.keystore.keypassword", ""));
    }
    this.infoServer.addServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID());

    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf);
    ipcServer.start();
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);
}

From source file:common.DataNode.java

/**
 * This method starts the data node with the specified conf.
 * /*from  www .jav a 2  s. c o  m*/
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs, DatanodeProtocol namenode)
        throws IOException {
    // use configured nameserver & interface to get local hostname
    if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) {
        machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    this.nameNodeAddr = NameNode.getAddress(conf);

    this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
            DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
    InetSocketAddress socAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.address", "0.0.0.0:50010"));
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = namenode;

    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils.newInstance(
                    Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // find free port
    ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
    Server.bind(ss, socAddr, 0);
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.threadGroup = new ThreadGroup("dataXceiverServer");
    this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.http.address", "0.0.0.0:50075"));
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf);
    if (conf.getBoolean("dfs.https.enable", false)) {
        boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
        InetSocketAddress secInfoSocAddr = NetUtils
                .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
        Configuration sslConf = new HdfsConfiguration(false);
        sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml"));
        this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
    }
    this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.setAttribute("datanode.conf", conf);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());

    // set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
    }

    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf);
    ipcServer.start();
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);

    plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
    for (ServicePlugin p : plugins) {
        try {
            p.start(this);
            LOG.info("Started plug-in " + p);
        } catch (Throwable t) {
            LOG.warn("ServicePlugin " + p + " could not be started", t);
        }
    }
}