Example usage for java.lang System identityHashCode

List of usage examples for java.lang System identityHashCode

Introduction

In this page you can find the example usage for java.lang System identityHashCode.

Prototype

@HotSpotIntrinsicCandidate
public static native int identityHashCode(Object x);

Source Link

Document

Returns the same hash code for the given object as would be returned by the default method hashCode(), whether or not the given object's class overrides hashCode().

Usage

From source file:android.app.LoaderManager.java

@Override
public String toString() {
    StringBuilder sb = new StringBuilder(128);
    sb.append("LoaderManager{");
    sb.append(Integer.toHexString(System.identityHashCode(this)));
    sb.append(" in ");
    DebugUtils.buildShortClassTag(mActivity, sb);
    sb.append("}}");
    return sb.toString();
}

From source file:org.apache.jmeter.protocol.http.sampler.HTTPHC4Impl.java

private HttpClient setupClient(URL url, SampleResult res) {

    Map<HttpClientKey, HttpClient> mapHttpClientPerHttpClientKey = HTTPCLIENTS_CACHE_PER_THREAD_AND_HTTPCLIENTKEY
            .get();/*from  www .  j a  v a 2s . co  m*/

    final String host = url.getHost();
    String proxyHost = getProxyHost();
    int proxyPort = getProxyPortInt();
    String proxyPass = getProxyPass();
    String proxyUser = getProxyUser();

    // static proxy is the globally define proxy eg command line or properties
    boolean useStaticProxy = isStaticProxy(host);
    // dynamic proxy is the proxy defined for this sampler
    boolean useDynamicProxy = isDynamicProxy(proxyHost, proxyPort);
    boolean useProxy = useStaticProxy || useDynamicProxy;

    // if both dynamic and static are used, the dynamic proxy has priority over static
    if (!useDynamicProxy) {
        proxyHost = PROXY_HOST;
        proxyPort = PROXY_PORT;
        proxyUser = PROXY_USER;
        proxyPass = PROXY_PASS;
    }

    // Lookup key - must agree with all the values used to create the HttpClient.
    HttpClientKey key = new HttpClientKey(url, useProxy, proxyHost, proxyPort, proxyUser, proxyPass);

    HttpClient httpClient = null;
    if (this.testElement.isConcurrentDwn()) {
        httpClient = (HttpClient) JMeterContextService.getContext().getSamplerContext().get(HTTPCLIENT_TOKEN);
    }

    if (httpClient == null) {
        httpClient = mapHttpClientPerHttpClientKey.get(key);
    }

    if (httpClient != null && resetSSLContext
            && HTTPConstants.PROTOCOL_HTTPS.equalsIgnoreCase(url.getProtocol())) {
        ((AbstractHttpClient) httpClient).clearRequestInterceptors();
        ((AbstractHttpClient) httpClient).clearResponseInterceptors();
        httpClient.getConnectionManager().closeIdleConnections(1L, TimeUnit.MICROSECONDS);
        httpClient = null;
        JsseSSLManager sslMgr = (JsseSSLManager) SSLManager.getInstance();
        sslMgr.resetContext();
        resetSSLContext = false;
    }

    if (httpClient == null) { // One-time init for this client

        HttpParams clientParams = new DefaultedHttpParams(new BasicHttpParams(), DEFAULT_HTTP_PARAMS);

        DnsResolver resolver = this.testElement.getDNSResolver();
        if (resolver == null) {
            resolver = SystemDefaultDnsResolver.INSTANCE;
        }
        MeasuringConnectionManager connManager = new MeasuringConnectionManager(createSchemeRegistry(),
                resolver, TIME_TO_LIVE, VALIDITY_AFTER_INACTIVITY_TIMEOUT);

        // Modern browsers use more connections per host than the current httpclient default (2)
        // when using parallel download the httpclient and connection manager are shared by the downloads threads
        // to be realistic JMeter must set an higher value to DefaultMaxPerRoute
        if (this.testElement.isConcurrentDwn()) {
            try {
                int maxConcurrentDownloads = Integer.parseInt(this.testElement.getConcurrentPool());
                connManager.setDefaultMaxPerRoute(
                        Math.max(maxConcurrentDownloads, connManager.getDefaultMaxPerRoute()));
            } catch (NumberFormatException nfe) {
                // no need to log -> will be done by the sampler
            }
        }

        httpClient = new DefaultHttpClient(connManager, clientParams) {
            @Override
            protected HttpRequestRetryHandler createHttpRequestRetryHandler() {
                return new DefaultHttpRequestRetryHandler(RETRY_COUNT, false); // set retry count
            }
        };

        if (IDLE_TIMEOUT > 0) {
            ((AbstractHttpClient) httpClient).setKeepAliveStrategy(IDLE_STRATEGY);
        }
        // see https://issues.apache.org/jira/browse/HTTPCORE-397
        ((AbstractHttpClient) httpClient).setReuseStrategy(DefaultClientConnectionReuseStrategy.INSTANCE);
        ((AbstractHttpClient) httpClient).addResponseInterceptor(RESPONSE_CONTENT_ENCODING);
        ((AbstractHttpClient) httpClient).addResponseInterceptor(METRICS_SAVER); // HACK
        ((AbstractHttpClient) httpClient).addRequestInterceptor(METRICS_RESETTER);

        // Override the default schemes as necessary
        SchemeRegistry schemeRegistry = httpClient.getConnectionManager().getSchemeRegistry();

        if (SLOW_HTTP != null) {
            schemeRegistry.register(SLOW_HTTP);
        }

        // Set up proxy details
        if (useProxy) {

            HttpHost proxy = new HttpHost(proxyHost, proxyPort);
            clientParams.setParameter(ConnRoutePNames.DEFAULT_PROXY, proxy);

            if (proxyUser.length() > 0) {
                ((AbstractHttpClient) httpClient).getCredentialsProvider().setCredentials(
                        new AuthScope(proxyHost, proxyPort),
                        new NTCredentials(proxyUser, proxyPass, localHost, PROXY_DOMAIN));
            }
        }

        // Bug 52126 - we do our own cookie handling
        clientParams.setParameter(ClientPNames.COOKIE_POLICY, CookieSpecs.IGNORE_COOKIES);

        if (log.isDebugEnabled()) {
            log.debug("Created new HttpClient: @" + System.identityHashCode(httpClient) + " " + key.toString());
        }

        mapHttpClientPerHttpClientKey.put(key, httpClient); // save the agent for next time round
    } else {
        if (log.isDebugEnabled()) {
            log.debug("Reusing the HttpClient: @" + System.identityHashCode(httpClient) + " " + key.toString());
        }
    }

    if (this.testElement.isConcurrentDwn()) {
        JMeterContextService.getContext().getSamplerContext().put(HTTPCLIENT_TOKEN, httpClient);
    }

    // TODO - should this be done when the client is created?
    // If so, then the details need to be added as part of HttpClientKey
    setConnectionAuthorization(httpClient, url, getAuthManager(), key);

    return httpClient;
}

From source file:org.apache.hadoop.hive.ql.exec.tez.WorkloadManager.java

/**
 * This is the main method of the master thread the processes one set of events.
 * Be mindful of the fact that events can be queued while we are processing events, so
 * in addition to making sure we keep the current set consistent (e.g. no need to handle
 * update errors for a session that should already be destroyed), this needs to guard itself
 * against the future iterations - e.g. what happens if we kill a query due to plan change,
 * but the DAG finished before the kill happens and the user queues a "return" event? Etc.
 * DO NOT block for a long time in this method.
 * @param e Input events.//from  www.  java 2s  .com
 * @param syncWork Output tasks that cannot be called via async methods.
 */
private void processCurrentEvents(EventState e, WmThreadSyncWork syncWork) throws Exception {
    // The order of processing is as follows. We'd reclaim or kill all the sessions that we can
    // reclaim from various user actions and errors, then apply the new plan if any,
    // then give out all we can give out (restart, get and reopen callers) and rebalance the
    // resource allocations in all the affected pools.
    // For every session, we'd check all the concurrent things happening to it.

    // TODO: also account for Tez-internal session restarts;
    //       AM reg info changes; add notifications, ignore errors, and update alloc.
    HashSet<String> poolsToRedistribute = new HashSet<>();

    // 0. Handle initialization results.
    for (SessionInitContext sw : e.initResults) {
        handleInitResultOnMasterThread(sw, syncWork, poolsToRedistribute);
    }
    e.initResults.clear();

    // 1. Handle kill query results - part 1, just put them in place. We will resolve what
    //    to do with the sessions after we go thru all the concurrent user actions.
    for (Map.Entry<WmTezSession, Boolean> entry : e.killQueryResults.entrySet()) {
        WmTezSession killQuerySession = entry.getKey();
        boolean killResult = entry.getValue();
        LOG.debug("Processing KillQuery {} for {}", killResult ? "success" : "failure", killQuerySession);
        // Note: do not cancel any user actions here; user actions actually interact with kills.
        KillQueryContext killCtx = killQueryInProgress.get(killQuerySession);
        if (killCtx == null) {
            LOG.error("Internal error - cannot find the context for killing {}", killQuerySession);
            continue;
        }
        killCtx.handleKillQueryCallback(!killResult);
    }
    e.killQueryResults.clear();

    // 2. Handle sessions that are being destroyed by users. Destroy implies return.
    for (WmTezSession sessionToDestroy : e.toDestroy) {
        if (e.toReturn.remove(sessionToDestroy)) {
            LOG.warn("The session was both destroyed and returned by the user; destroying");
        }
        LOG.info("Destroying {}", sessionToDestroy);
        RemoveSessionResult rr = handleReturnedInUseSessionOnMasterThread(e, sessionToDestroy,
                poolsToRedistribute, false);
        if (rr == RemoveSessionResult.OK || rr == RemoveSessionResult.NOT_FOUND) {
            // Restart even if there's an internal error.
            syncWork.toRestartInUse.add(sessionToDestroy);
        }
    }
    e.toDestroy.clear();

    // 3. Now handle actual returns. Sessions may be returned to the pool or may trigger expires.
    for (WmTezSession sessionToReturn : e.toReturn) {
        LOG.info("Returning {}", sessionToReturn);
        RemoveSessionResult rr = handleReturnedInUseSessionOnMasterThread(e, sessionToReturn,
                poolsToRedistribute, true);
        switch (rr) {
        case OK:
            WmEvent wmEvent = new WmEvent(WmEvent.EventType.RETURN);
            boolean wasReturned = tezAmPool.returnSessionAsync(sessionToReturn);
            if (!wasReturned) {
                syncWork.toDestroyNoRestart.add(sessionToReturn);
            } else {
                if (sessionToReturn.getWmContext() != null
                        && sessionToReturn.getWmContext().isQueryCompleted()) {
                    sessionToReturn.resolveReturnFuture();
                }
                wmEvent.endEvent(sessionToReturn);
            }
            break;
        case NOT_FOUND:
            syncWork.toRestartInUse.add(sessionToReturn); // Restart if there's an internal error.
            break;
        case IGNORE:
            break;
        default:
            throw new AssertionError("Unknown state " + rr);
        }
    }
    e.toReturn.clear();

    // 4. Reopen is essentially just destroy + get a new session for a session in use.
    for (Map.Entry<WmTezSession, SettableFuture<WmTezSession>> entry : e.toReopen.entrySet()) {
        LOG.info("Reopening {}", entry.getKey());
        handeReopenRequestOnMasterThread(e, entry.getKey(), entry.getValue(), poolsToRedistribute, syncWork);
    }
    e.toReopen.clear();

    // 5. All the sessions in use that were not destroyed or returned with a failed update now die.
    for (Map.Entry<WmTezSession, Integer> entry : e.updateErrors.entrySet()) {
        WmTezSession sessionWithUpdateError = entry.getKey();
        int failedEndpointVersion = entry.getValue();
        LOG.info("Update failed for {}", sessionWithUpdateError);
        handleUpdateErrorOnMasterThread(sessionWithUpdateError, failedEndpointVersion, e.toReuse, syncWork,
                poolsToRedistribute);
    }
    e.updateErrors.clear();

    // 6. Now apply a resource plan if any. This is expected to be pretty rare.
    boolean hasRequeues = false;
    if (e.resourcePlanToApply != null || e.doClearResourcePlan) {
        LOG.info("Applying new resource plan");
        int getReqCount = e.getRequests.size();
        applyNewResourcePlanOnMasterThread(e, syncWork, poolsToRedistribute);
        hasRequeues = getReqCount != e.getRequests.size();
    }
    e.resourcePlanToApply = null;
    e.doClearResourcePlan = false;

    // 7. Handle any move session requests. The way move session works right now is
    // a) sessions get moved to destination pool if there is capacity in destination pool
    // b) if there is no capacity in destination pool, the session gets killed (since we cannot pause a query)
    // TODO: in future this the process of killing can be delayed until the point where a session is actually required.
    // We could consider delaying the move (when destination capacity is full) until there is claim in src pool.
    // May be change command to support ... DELAYED MOVE TO etl ... which will run under src cluster fraction as long
    // as possible
    Map<WmTezSession, WmEvent> recordMoveEvents = new HashMap<>();
    for (MoveSession moveSession : e.moveSessions) {
        handleMoveSessionOnMasterThread(moveSession, syncWork, poolsToRedistribute, e.toReuse,
                recordMoveEvents);
    }
    e.moveSessions.clear();

    // 8. Handle all the get/reuse requests. We won't actually give out anything here, but merely
    //    map all the requests and place them in an appropriate order in pool queues. The only
    //    exception is the reuse without queue contention; can be granted immediately. If we can't
    //    reuse the session immediately, we will convert the reuse to a normal get, because we
    //    want query level fairness, and don't want the get in queue to hold up a session.
    GetRequest req;
    while ((req = e.getRequests.pollFirst()) != null) {
        LOG.info("Processing a new get request from " + req.mappingInput);
        queueGetRequestOnMasterThread(req, poolsToRedistribute, syncWork);
    }
    e.toReuse.clear();

    // 9. Resolve all the kill query requests in flight. Nothing below can affect them.
    Iterator<KillQueryContext> iter = killQueryInProgress.values().iterator();
    while (iter.hasNext()) {
        KillQueryContext ctx = iter.next();
        KillQueryResult kr = ctx.process();
        switch (kr) {
        case IN_PROGRESS:
            continue; // Either the user or the kill is not done yet.
        case OK: {
            iter.remove();
            LOG.debug("Kill query succeeded; returning to the pool: {}", ctx.session);
            ctx.killSessionFuture.set(true);
            WmEvent wmEvent = new WmEvent(WmEvent.EventType.RETURN);
            if (!tezAmPool.returnSessionAsync(ctx.session)) {
                syncWork.toDestroyNoRestart.add(ctx.session);
            } else {
                if (ctx.session.getWmContext() != null && ctx.session.getWmContext().isQueryCompleted()) {
                    ctx.session.resolveReturnFuture();
                }
                wmEvent.endEvent(ctx.session);
            }
            break;
        }
        case RESTART_REQUIRED: {
            iter.remove();
            ctx.killSessionFuture.set(true);
            LOG.debug("Kill query failed; restarting: {}", ctx.session);
            // Note: we assume here the session, before we resolve killQuery result here, is still
            //       "in use". That is because all the user ops above like return, reopen, etc.
            //       don't actually return/reopen/... when kill query is in progress.
            syncWork.toRestartInUse.add(ctx.session);
            break;
        }
        default:
            throw new AssertionError("Unknown state " + kr);
        }
    }

    // 10. If there was a cluster state change, make sure we redistribute all the pools.
    if (e.hasClusterStateChanged) {
        LOG.info("Processing a cluster state change");
        poolsToRedistribute.addAll(pools.keySet());
        e.hasClusterStateChanged = false;
    }

    // 11. Finally, for all the pools that have changes, promote queued queries and rebalance.
    for (String poolName : poolsToRedistribute) {
        if (LOG.isDebugEnabled()) {
            LOG.info("Processing changes for pool " + poolName + ": " + pools.get(poolName));
        }
        processPoolChangesOnMasterThread(poolName, hasRequeues, syncWork);
    }

    // 12. Save state for future iterations.
    for (KillQueryContext killCtx : syncWork.toKillQuery.values()) {
        if (killQueryInProgress.put(killCtx.session, killCtx) != null) {
            LOG.error("One query killed several times - internal error {}", killCtx.session);
        }
    }

    // 13. To record move events, we need to cluster fraction updates that happens at step 11.
    for (Map.Entry<WmTezSession, WmEvent> entry : recordMoveEvents.entrySet()) {
        entry.getValue().endEvent(entry.getKey());
    }

    // 14. Give our final state to UI/API requests if any.
    if (e.dumpStateFuture != null) {
        List<String> result = new ArrayList<>();
        result.add("RESOURCE PLAN " + rpName + "; default pool " + defaultPool);
        for (PoolState ps : pools.values()) {
            dumpPoolState(ps, result);
        }
        e.dumpStateFuture.set(result);
        e.dumpStateFuture = null;
    }

    // 15. Notify tests and global async ops.
    for (SettableFuture<Boolean> testEvent : e.testEvents) {
        LOG.info("Triggering test event " + System.identityHashCode(testEvent));
        testEvent.set(null);
    }
    e.testEvents.clear();

    if (e.applyRpFuture != null) {
        e.applyRpFuture.set(true);
        e.applyRpFuture = null;
    }
}

From source file:org.apache.geode.internal.cache.Oplog.java

public void replaceIncompatibleEntry(DiskRegionView dr, DiskEntry old, DiskEntry repl) {
    boolean useNextOplog = false;
    // No need to get the backup lock prior to synchronizing (correct lock order) since the
    // synchronized block does not attempt to get the backup lock (incorrect lock order)
    synchronized (this.lock) {
        if (getOplogSet().getChild() != this) {
            // make sure to only call replaceIncompatibleEntry for child, because
            // this.lock
            // can only sync with compaction thread on child oplog
            useNextOplog = true;/*from   ww w .j  a  v a  2  s . c o m*/
        } else {
            // This method is use in recovery only and will not be called by
            // compaction.
            // It's only called before or after compaction. It will replace
            // DiskEntry
            // in DiskRegion without modifying DiskId (such as to a new oplogId),
            // Not to change the entry count in oplog either. While doing that,
            // this.lock will lock the current child to sync with compaction thread.
            // If replace thread got this.lock, DiskEntry "old" will not be removed
            // from
            // current oplog (maybe not child). If compaction thread got this.lock,
            // DiskEntry "old" should have been moved to child oplog when replace
            // thread
            // processes it.

            // See #48032. A new region entry has been put into the region map, but
            // we
            // also have to replace it in the oplog live entries that are used to
            // write
            // the krf. If we don't, we will recover the wrong (old) value.
            getOrCreateDRI(dr).replaceLive(old, repl);
            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                logger.trace(LogMarker.PERSIST_RECOVERY,
                        "replacing incompatible entry key = {} old = {} new = {} oldDiskId = {} new diskId = {} tag = {} in child oplog #{}",
                        old.getKey(), System.identityHashCode(old), System.identityHashCode(repl),
                        old.getDiskId(), repl.getDiskId(), old.getVersionStamp(), this.getOplogId());
            }
        }
    }
    if (useNextOplog) {
        if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
            CacheObserverHolder.getInstance().afterSwitchingOplog();
        }
        Assert.assertTrue(getOplogSet().getChild() != this);
        getOplogSet().getChild().replaceIncompatibleEntry(dr, old, repl);
    }
}

From source file:byps.http.HHttpServlet.java

protected void doHtmlUpload(HttpServletRequest request, HttpServletResponse response) throws IOException {
    if (log.isDebugEnabled())
        log.debug("doHtmlUpload(");

    try {//from ww  w  .j  av  a 2 s.c  o m
        // NDC.push(hsess.getId());

        boolean isMultipart = ServletFileUpload.isMultipartContent(request);
        if (!isMultipart) {
            throw new IllegalStateException("File upload must be sent as multipart/form-data.");
        }

        // Create a factory for disk-based file items
        DiskFileItemFactory factory = new DiskFileItemFactory(HConstants.INCOMING_STREAM_BUFFER,
                getConfig().getTempDir());

        // Create a new file upload handler
        ServletFileUpload upload = new ServletFileUpload(factory);

        // Set overall request size constraint
        long maxSize = getHtmlUploadMaxSize();
        if (log.isDebugEnabled())
            log.debug("set max upload file size=" + maxSize);
        upload.setSizeMax(maxSize);

        // Parse the request
        @SuppressWarnings("unchecked")
        List<FileItem> items = upload.parseRequest(request);
        if (log.isDebugEnabled())
            log.debug("received #items=" + items.size());

        ArrayList<HFileUploadItem> uploadItems = new ArrayList<HFileUploadItem>();
        for (FileItem item : items) {

            String fieldName = item.getFieldName();
            if (log.isDebugEnabled())
                log.debug("fieldName=" + fieldName);
            String fileName = item.getName();
            if (log.isDebugEnabled())
                log.debug("fileName=" + fileName);
            boolean formField = item.isFormField();
            if (log.isDebugEnabled())
                log.debug("formField=" + formField);
            if (!formField && fileName.length() == 0)
                continue;
            long streamId = formField ? 0L
                    : (System.currentTimeMillis()
                            ^ ((((long) fileName.hashCode()) << 16L) | (long) System.identityHashCode(this))); // used as pseudo random number

            HFileUploadItem uploadItem = new HFileUploadItem(formField, fieldName, fileName,
                    item.getContentType(), item.getSize(), Long.toString(streamId));
            uploadItems.add(uploadItem);
            if (log.isDebugEnabled())
                log.debug("uploadItem=" + uploadItem);

            if (item.isFormField())
                continue;

            final BTargetId targetId = new BTargetId(getConfig().getMyServerId(), 0, streamId);
            getActiveMessages().addIncomingUploadStream(
                    new HFileUploadItemIncomingStream(item, targetId, getConfig().getTempDir()));

        }

        makeHtmlUploadResult(request, response, uploadItems);

    } catch (Throwable e) {
        if (log.isInfoEnabled())
            log.info("Failed to process message.", e);
        response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
        response.getWriter().print(e.toString());
        response.getWriter().close();
    } finally {
        // NDC.pop();
    }

    if (log.isDebugEnabled())
        log.debug(")doHtmlUpload");
}

From source file:org.apache.hadoop.hdfs.client.ShortCircuitCache.java

@Override
public String toString() {
    return "ShortCircuitCache(0x" + Integer.toHexString(System.identityHashCode(this)) + ")";
}

From source file:gdsc.smlm.ij.plugins.pcpalm.PCPALMMolecules.java

private void runSimulation(boolean resultsAvailable) {
    if (resultsAvailable && !showSimulationDialog())
        return;//from  w ww.j  a  va2 s  .  c o  m

    startLog();

    log("Simulation parameters");
    if (blinkingDistribution == 3) {
        log("  - Clusters = %d", nMolecules);
        log("  - Simulation size = %s um", Utils.rounded(simulationSize, 4));
        log("  - Molecules/cluster = %s", Utils.rounded(blinkingRate, 4));
        log("  - Blinking distribution = %s", BLINKING_DISTRIBUTION[blinkingDistribution]);
        log("  - p-Value = %s", Utils.rounded(p, 4));
    } else {
        log("  - Molecules = %d", nMolecules);
        log("  - Simulation size = %s um", Utils.rounded(simulationSize, 4));
        log("  - Blinking rate = %s", Utils.rounded(blinkingRate, 4));
        log("  - Blinking distribution = %s", BLINKING_DISTRIBUTION[blinkingDistribution]);
    }
    log("  - Average precision = %s nm", Utils.rounded(sigmaS, 4));
    log("  - Clusters simulation = " + CLUSTER_SIMULATION[clusterSimulation]);
    if (clusterSimulation > 0) {
        log("  - Cluster number = %s +/- %s", Utils.rounded(clusterNumber, 4),
                Utils.rounded(clusterNumberSD, 4));
        log("  - Cluster radius = %s nm", Utils.rounded(clusterRadius, 4));
    }

    final double nmPerPixel = 100;
    double width = simulationSize * 1000.0;
    // Allow a border of 3 x sigma for +/- precision
    //if (blinkingRate > 1)
    width -= 3 * sigmaS;
    RandomGenerator randomGenerator = new Well19937c(
            System.currentTimeMillis() + System.identityHashCode(this));
    RandomDataGenerator dataGenerator = new RandomDataGenerator(randomGenerator);
    UniformDistribution dist = new UniformDistribution(null, new double[] { width, width, 0 },
            randomGenerator.nextInt());

    molecules = new ArrayList<Molecule>(nMolecules);
    // Create some dummy results since the calibration is required for later analysis
    results = new MemoryPeakResults();
    results.setCalibration(new gdsc.smlm.results.Calibration(nmPerPixel, 1, 100));
    results.setSource(new NullSource("Molecule Simulation"));
    results.begin();
    int count = 0;

    // Generate a sequence of coordinates
    ArrayList<double[]> xyz = new ArrayList<double[]>((int) (nMolecules * 1.1));

    Statistics statsRadius = new Statistics();
    Statistics statsSize = new Statistics();
    String maskTitle = TITLE + " Cluster Mask";
    ByteProcessor bp = null;
    double maskScale = 0;

    // TODO - Add a fluctuations model to this.

    if (clusterSimulation > 0) {
        // Simulate clusters.

        // Note: In the Veatch et al. paper (Plos 1, e31457) correlation functions are built using circles
        // with small radii of 4-8 Arbitrary Units (AU) or large radii of 10-30 AU. A fluctuations model is
        // created at T = 1.075 Tc. It is not clear exactly how the particles are distributed.
        // It may be that a mask is created first using the model. The particles are placed on the mask using
        // a specified density. This simulation produces a figure to show either a damped cosine function
        // (circles) or an exponential (fluctuations). The number of particles in each circle may be randomly
        // determined just by density. The figure does not discuss the derivation of the cluster size 
        // statistic.
        // 
        // If this plugin simulation is run with a uniform distribution and blinking rate of 1 then the damped
        // cosine function is reproduced. The curve crosses g(r)=1 at a value equivalent to the average
        // distance to the centre-of-mass of each drawn cluster, not the input cluster radius parameter (which 
        // is a hard upper limit on the distance to centre).

        final int maskSize = lowResolutionImageSize;
        int[] mask = null;
        maskScale = width / maskSize; // scale is in nm/pixel

        ArrayList<double[]> clusterCentres = new ArrayList<double[]>();
        int totalSteps = 1 + (int) Math.ceil(nMolecules / clusterNumber);
        if (clusterSimulation == 2 || clusterSimulation == 3) {
            // Clusters are non-overlapping circles

            // Ensure the circles do not overlap by using an exclusion mask that accumulates 
            // out-of-bounds pixels by drawing the last cluster (plus some border) on an image. When no
            // more pixels are available then stop generating molecules.
            // This is done by cumulatively filling a mask and using the MaskDistribution to select 
            // a new point. This may be slow but it works.

            // TODO - Allow clusters of different sizes...

            mask = new int[maskSize * maskSize];
            Arrays.fill(mask, 255);
            MaskDistribution maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale,
                    maskScale, randomGenerator);
            double[] centre;
            IJ.showStatus("Computing clusters mask");
            int roiRadius = (int) Math.round((clusterRadius * 2) / maskScale);

            if (clusterSimulation == 3) {
                // Generate a mask of circles then sample from that.
                // If we want to fill the mask completely then adjust the total steps to be the number of 
                // circles that can fit inside the mask.
                totalSteps = (int) (maskSize * maskSize / (Math.PI * Math.pow(clusterRadius / maskScale, 2)));
            }

            while ((centre = maskDistribution.next()) != null && clusterCentres.size() < totalSteps) {
                IJ.showProgress(clusterCentres.size(), totalSteps);
                // The mask returns the coordinates with the centre of the image at 0,0
                centre[0] += width / 2;
                centre[1] += width / 2;
                clusterCentres.add(centre);

                // Fill in the mask around the centre to exclude any more circles that could overlap
                double cx = centre[0] / maskScale;
                double cy = centre[1] / maskScale;
                fillMask(mask, maskSize, (int) cx, (int) cy, roiRadius, 0);
                //log("[%.1f,%.1f] @ [%.1f,%.1f]", centre[0], centre[1], cx, cy);
                //Utils.display("Mask", new ColorProcessor(maskSize, maskSize, mask));
                try {
                    maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale, maskScale,
                            randomGenerator);
                } catch (IllegalArgumentException e) {
                    // This can happen when there are no more non-zero pixels
                    log("WARNING: No more room for clusters on the mask area (created %d of estimated %d)",
                            clusterCentres.size(), totalSteps);
                    break;
                }
            }
            IJ.showProgress(1);
            IJ.showStatus("");
        } else {
            // Clusters are overlapping circles

            // Pick centres randomly from the distribution 
            while (clusterCentres.size() < totalSteps)
                clusterCentres.add(dist.next());
        }

        if (showClusterMask || clusterSimulation == 3) {
            // Show the mask for the clusters
            if (mask == null)
                mask = new int[maskSize * maskSize];
            else
                Arrays.fill(mask, 0);
            int roiRadius = (int) Math.round((clusterRadius) / maskScale);
            for (double[] c : clusterCentres) {
                double cx = c[0] / maskScale;
                double cy = c[1] / maskScale;
                fillMask(mask, maskSize, (int) cx, (int) cy, roiRadius, 1);
            }

            if (clusterSimulation == 3) {
                // We have the mask. Now pick points at random from the mask.
                MaskDistribution maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale,
                        maskScale, randomGenerator);

                // Allocate each molecule position to a parent circle so defining clusters.
                int[][] clusters = new int[clusterCentres.size()][];
                int[] clusterSize = new int[clusters.length];

                for (int i = 0; i < nMolecules; i++) {
                    double[] centre = maskDistribution.next();
                    // The mask returns the coordinates with the centre of the image at 0,0
                    centre[0] += width / 2;
                    centre[1] += width / 2;
                    xyz.add(centre);

                    // Output statistics on cluster size and number.
                    // TODO - Finding the closest cluster could be done better than an all-vs-all comparison
                    double max = distance2(centre, clusterCentres.get(0));
                    int cluster = 0;
                    for (int j = 1; j < clusterCentres.size(); j++) {
                        double d2 = distance2(centre, clusterCentres.get(j));
                        if (d2 < max) {
                            max = d2;
                            cluster = j;
                        }
                    }

                    // Assign point i to cluster
                    centre[2] = cluster;

                    if (clusterSize[cluster] == 0) {
                        clusters[cluster] = new int[10];
                    }
                    if (clusters[cluster].length <= clusterSize[cluster]) {
                        clusters[cluster] = Arrays.copyOf(clusters[cluster],
                                (int) (clusters[cluster].length * 1.5));
                    }
                    clusters[cluster][clusterSize[cluster]++] = i;
                }

                // Generate real cluster size statistics
                for (int j = 0; j < clusterSize.length; j++) {
                    final int size = clusterSize[j];
                    if (size == 0)
                        continue;

                    statsSize.add(size);

                    if (size == 1) {
                        statsRadius.add(0);
                        continue;
                    }

                    // Find centre of cluster and add the distance to each point
                    double[] com = new double[2];
                    for (int n = 0; n < size; n++) {
                        double[] xy = xyz.get(clusters[j][n]);
                        for (int k = 0; k < 2; k++)
                            com[k] += xy[k];
                    }
                    for (int k = 0; k < 2; k++)
                        com[k] /= size;
                    for (int n = 0; n < size; n++) {
                        double dx = xyz.get(clusters[j][n])[0] - com[0];
                        double dy = xyz.get(clusters[j][n])[1] - com[1];
                        statsRadius.add(Math.sqrt(dx * dx + dy * dy));
                    }
                }
            }

            if (showClusterMask) {
                bp = new ByteProcessor(maskSize, maskSize);
                for (int i = 0; i < mask.length; i++)
                    if (mask[i] != 0)
                        bp.set(i, 128);
                Utils.display(maskTitle, bp);
            }
        }

        // Use the simulated cluster centres to create clusters of the desired size
        if (clusterSimulation == 1 || clusterSimulation == 2) {
            for (double[] clusterCentre : clusterCentres) {
                int clusterN = (int) Math.round(
                        (clusterNumberSD > 0) ? dataGenerator.nextGaussian(clusterNumber, clusterNumberSD)
                                : clusterNumber);
                if (clusterN < 1)
                    continue;
                //double[] clusterCentre = dist.next();
                if (clusterN == 1) {
                    // No need for a cluster around a point
                    xyz.add(clusterCentre);
                    statsRadius.add(0);
                    statsSize.add(1);
                } else {
                    // Generate N random points within a circle of the chosen cluster radius.
                    // Locate the centre-of-mass and the average distance to the centre.
                    double[] com = new double[3];
                    int j = 0;
                    while (j < clusterN) {
                        // Generate a random point within a circle uniformly
                        // http://stackoverflow.com/questions/5837572/generate-a-random-point-within-a-circle-uniformly
                        double t = 2.0 * Math.PI * randomGenerator.nextDouble();
                        double u = randomGenerator.nextDouble() + randomGenerator.nextDouble();
                        double r = clusterRadius * ((u > 1) ? 2 - u : u);
                        double x = r * Math.cos(t);
                        double y = r * Math.sin(t);
                        double[] xy = new double[] { clusterCentre[0] + x, clusterCentre[1] + y };
                        xyz.add(xy);
                        for (int k = 0; k < 2; k++)
                            com[k] += xy[k];
                        j++;
                    }
                    // Add the distance of the points from the centre of the cluster.
                    // Note this does not account for the movement due to precision.
                    statsSize.add(j);
                    if (j == 1) {
                        statsRadius.add(0);
                    } else {
                        for (int k = 0; k < 2; k++)
                            com[k] /= j;
                        while (j > 0) {
                            double dx = xyz.get(xyz.size() - j)[0] - com[0];
                            double dy = xyz.get(xyz.size() - j)[1] - com[1];
                            statsRadius.add(Math.sqrt(dx * dx + dy * dy));
                            j--;
                        }
                    }
                }
            }
        }
    } else {
        // Random distribution
        for (int i = 0; i < nMolecules; i++)
            xyz.add(dist.next());
    }

    // The Gaussian sigma should be applied so the overall distance from the centre
    // ( sqrt(x^2+y^2) ) has a standard deviation of sigmaS?
    final double sigma1D = sigmaS / Math.sqrt(2);

    // Show optional histograms
    StoredDataStatistics intraDistances = null;
    StoredDataStatistics blinks = null;
    if (showHistograms) {
        int capacity = (int) (xyz.size() * blinkingRate);
        intraDistances = new StoredDataStatistics(capacity);
        blinks = new StoredDataStatistics(capacity);
    }

    Statistics statsSigma = new Statistics();
    for (int i = 0; i < xyz.size(); i++) {
        int nOccurrences = getBlinks(dataGenerator, blinkingRate);
        if (showHistograms)
            blinks.add(nOccurrences);

        final int size = molecules.size();

        // Get coordinates in nm
        final double[] moleculeXyz = xyz.get(i);

        if (bp != null && nOccurrences > 0) {
            bp.putPixel((int) Math.round(moleculeXyz[0] / maskScale),
                    (int) Math.round(moleculeXyz[1] / maskScale), 255);
        }

        while (nOccurrences-- > 0) {
            final double[] localisationXy = Arrays.copyOf(moleculeXyz, 2);
            // Add random precision
            if (sigma1D > 0) {
                final double dx = dataGenerator.nextGaussian(0, sigma1D);
                final double dy = dataGenerator.nextGaussian(0, sigma1D);
                localisationXy[0] += dx;
                localisationXy[1] += dy;
                if (!dist.isWithinXY(localisationXy))
                    continue;
                // Calculate mean-squared displacement
                statsSigma.add(dx * dx + dy * dy);
            }
            final double x = localisationXy[0];
            final double y = localisationXy[1];
            molecules.add(new Molecule(x, y, i, 1));

            // Store in pixels
            float[] params = new float[7];
            params[Gaussian2DFunction.X_POSITION] = (float) (x / nmPerPixel);
            params[Gaussian2DFunction.Y_POSITION] = (float) (y / nmPerPixel);
            results.add(i + 1, (int) x, (int) y, 0, 0, 0, params, null);
        }

        if (molecules.size() > size) {
            count++;
            if (showHistograms) {
                int newCount = molecules.size() - size;
                if (newCount == 1) {
                    // No intra-molecule distances
                    //intraDistances.add(0);
                    continue;
                }

                // Get the distance matrix between these molecules
                double[][] matrix = new double[newCount][newCount];
                for (int ii = size, x = 0; ii < molecules.size(); ii++, x++) {
                    for (int jj = size + 1, y = 1; jj < molecules.size(); jj++, y++) {
                        final double d2 = molecules.get(ii).distance2(molecules.get(jj));
                        matrix[x][y] = matrix[y][x] = d2;
                    }
                }

                // Get the maximum distance for particle linkage clustering of this molecule
                double max = 0;
                for (int x = 0; x < newCount; x++) {
                    // Compare to all-other molecules and get the minimum distance 
                    // needed to join at least one
                    double linkDistance = Double.POSITIVE_INFINITY;
                    for (int y = 0; y < newCount; y++) {
                        if (x == y)
                            continue;
                        if (matrix[x][y] < linkDistance)
                            linkDistance = matrix[x][y];
                    }
                    // Check if this is larger 
                    if (max < linkDistance)
                        max = linkDistance;
                }
                intraDistances.add(Math.sqrt(max));
            }
        }
    }
    results.end();

    if (bp != null)
        Utils.display(maskTitle, bp);

    // Used for debugging
    //System.out.printf("  * Molecules = %d (%d activated)\n", xyz.size(), count);
    //if (clusterSimulation > 0)
    //   System.out.printf("  * Cluster number = %s +/- %s. Radius = %s +/- %s\n",
    //         Utils.rounded(statsSize.getMean(), 4), Utils.rounded(statsSize.getStandardDeviation(), 4),
    //         Utils.rounded(statsRadius.getMean(), 4), Utils.rounded(statsRadius.getStandardDeviation(), 4));

    log("Simulation results");
    log("  * Molecules = %d (%d activated)", xyz.size(), count);
    log("  * Blinking rate = %s", Utils.rounded((double) molecules.size() / xyz.size(), 4));
    log("  * Precision (Mean-displacement) = %s nm",
            (statsSigma.getN() > 0) ? Utils.rounded(Math.sqrt(statsSigma.getMean()), 4) : "0");
    if (showHistograms) {
        if (intraDistances.getN() == 0) {
            log("  * Mean Intra-Molecule particle linkage distance = 0 nm");
            log("  * Fraction of inter-molecule particle linkage @ 0 nm = 0 %%");
        } else {
            plot(blinks, "Blinks/Molecule", true);
            double[][] intraHist = plot(intraDistances, "Intra-molecule particle linkage distance", false);

            // Determine 95th and 99th percentile
            int p99 = intraHist[0].length - 1;
            double limit1 = 0.99 * intraHist[1][p99];
            double limit2 = 0.95 * intraHist[1][p99];
            while (intraHist[1][p99] > limit1 && p99 > 0)
                p99--;
            int p95 = p99;
            while (intraHist[1][p95] > limit2 && p95 > 0)
                p95--;

            log("  * Mean Intra-Molecule particle linkage distance = %s nm (95%% = %s, 99%% = %s, 100%% = %s)",
                    Utils.rounded(intraDistances.getMean(), 4), Utils.rounded(intraHist[0][p95], 4),
                    Utils.rounded(intraHist[0][p99], 4),
                    Utils.rounded(intraHist[0][intraHist[0].length - 1], 4));

            if (distanceAnalysis) {
                performDistanceAnalysis(intraHist, p99);
            }
        }
    }
    if (clusterSimulation > 0) {
        log("  * Cluster number = %s +/- %s", Utils.rounded(statsSize.getMean(), 4),
                Utils.rounded(statsSize.getStandardDeviation(), 4));
        log("  * Cluster radius = %s +/- %s nm (mean distance to centre-of-mass)",
                Utils.rounded(statsRadius.getMean(), 4), Utils.rounded(statsRadius.getStandardDeviation(), 4));
    }
}

From source file:org.myrian.persistence.Session.java

public static String str(Object obj) {
    if (obj == null) {
        return "null";
    } else {/*  w ww. j a  v  a2  s  .c o m*/
        Class klass = obj.getClass();
        if (String.class.isAssignableFrom(klass) || Number.class.isAssignableFrom(klass)
                || Event.class.isAssignableFrom(klass) || Property.class.isAssignableFrom(klass)
                || ObjectType.class.isAssignableFrom(klass) || ObjectMap.class.isAssignableFrom(klass)) {
            return obj.toString();
        } else {
            return klass + "@" + Integer.toHexString(System.identityHashCode(obj));
        }
    }
}

From source file:org.openmrs.module.ModuleClassLoader.java

/**
 * @see java.lang.Object#toString()/*from   w w  w .ja v  a 2  s  . c om*/
 */
@Override
public String toString() {
    return "{ModuleClassLoader: uid=" + System.identityHashCode(this) + "; " + module + "}";
}