Example usage for java.lang String intern

List of usage examples for java.lang String intern

Introduction

In this page you can find the example usage for java.lang String intern.

Prototype

public native String intern();

Source Link

Document

Returns a canonical representation for the string object.

Usage

From source file:org.wso2.carbon.registry.core.caching.CacheBackedRegistry.java

private GhostResource<Resource> getGhostCollectionFromCache(String path, int start, int pageSize)
        throws RegistryException {
    Collection collection;//from w w  w  .j  av a2s  .  c  om

    GhostResource<Resource> ghostResource;

    RegistryCacheKey registryCacheKey = getRegistryCacheKey(registry,
            path + ";start=" + start + ";pageSize=" + pageSize);

    Cache<RegistryCacheKey, GhostResource> cache = getCache();
    if (!cache.containsKey(registryCacheKey)) {
        synchronized (path.intern()) {
            //check again to cache to validate whether any other thread have updated with that time.
            if (!cache.containsKey(registryCacheKey)) {
                collection = registry.get(path, start, pageSize);
                ghostResource = new GhostResource<Resource>(collection);
                if (collection.getProperty(RegistryConstants.REGISTRY_LINK) == null) {
                    cache.put(registryCacheKey, ghostResource);
                }
            } else {
                ghostResource = (GhostResource<Resource>) cache.get(registryCacheKey);
            }
        }
    } else {
        ghostResource = (GhostResource<Resource>) cache.get(registryCacheKey);
    }

    return ghostResource;
}

From source file:com.thoughtworks.go.server.service.ScheduleService.java

private String mutexForPipeline(String pipelineName) {
    String s = String.format("%s_forPipeline_%s", getClass().getName(), pipelineName);
    return s.intern(); // interned because we synchronize on it
}

From source file:dk.netarkivet.harvester.indexserver.distribute.IndexRequestServer.java

/**
 * Method that handles the processing of an indexRequestMessage. Returns the requested index immediately, if already
 * available, otherwise proceeds with the index generation of the requested index. Must be run in its own thread,
 * because it blocks while the index is generated.
 *
 * @param irMsg A message requesting an index
 * @see #visit(IndexRequestMessage)//  w  ww.  j  a  v  a 2 s.  c  o  m
 */
private void doProcessIndexRequestMessage(final IndexRequestMessage irMsg) {
    final boolean mustReturnIndex = irMsg.mustReturnIndex();
    try {
        checkMessage(irMsg);
        RequestType type = irMsg.getRequestType();
        Set<Long> jobIDs = irMsg.getRequestedJobs();

        if (log.isInfoEnabled()) {
            log.info("Request received for an index of type '{}' for the {} jobs [{}]", type, jobIDs.size(),
                    StringUtils.conjoin(",", jobIDs));
        }
        FileBasedCache<Set<Long>> handler = handlers.get(type);

        // Here we need to make sure that we don't accidentally process more than
        // one message at the time before the whole process is over
        List<Long> sortedList = new ArrayList<Long>(jobIDs);
        String allIDsString = StringUtils.conjoin("-", sortedList);
        String checksum = ChecksumCalculator.calculateMd5(allIDsString.getBytes());
        log.debug(
                "Waiting to enter the synchronization zone for the indexing job of size {} with checksum '{}'",
                jobIDs.size(), checksum);
        // Begin synchronization
        synchronized (checksum.intern()) {
            log.debug("The indexing job of size {} with checksum '{}' is now in the synchronization zone",
                    jobIDs.size(), checksum);
            Set<Long> foundIDs = handler.cache(jobIDs);
            irMsg.setFoundJobs(foundIDs);
            if (foundIDs.equals(jobIDs)) {
                if (log.isInfoEnabled()) {
                    log.info("Retrieved successfully index of type '{}' for the {} jobs [{}]", type,
                            jobIDs.size(), StringUtils.conjoin(",", jobIDs));
                }
                File cacheFile = handler.getCacheFile(jobIDs);
                if (mustReturnIndex) { // return index now!
                    packageResultFiles(irMsg, cacheFile);
                }
            } else if (satisfactoryTresholdReached(foundIDs, jobIDs)) {
                log.info(
                        "Data for full index w/ {} jobs not available. Only found data for {} jobs - "
                                + "but satisfactoryTreshold reached, so assuming presence of all data",
                        jobIDs.size(), foundIDs.size());
                // Make sure that the index of the data available is generated
                Set<Long> theFoundIDs = handler.cache(foundIDs);
                // TheFoundIDS should be identical to foundIDs
                // Lets make sure of that
                Set<Long> diffSet = new HashSet<Long>(foundIDs);
                diffSet.removeAll(theFoundIDs);

                // Make a copy of the index available, and give it the name of
                // the index cache file wanted.
                File cacheFileWanted = handler.getCacheFile(jobIDs);
                File cacheFileCreated = handler.getCacheFile(foundIDs);

                log.info("Satisfactory threshold reached - copying index {} '{}' to full index: {}",
                        (cacheFileCreated.isDirectory() ? "dir" : "file"), cacheFileCreated.getAbsolutePath(),
                        cacheFileWanted.getAbsolutePath());
                if (cacheFileCreated.isDirectory()) {
                    // create destination cacheFileWanted, and
                    // copy all files in cacheFileCreated to cacheFileWanted.
                    cacheFileWanted.mkdirs();
                    FileUtils.copyDirectory(cacheFileCreated, cacheFileWanted);
                } else {
                    FileUtils.copyFile(cacheFileCreated, cacheFileWanted);
                }

                // TODO This delete-operation commented out, because it is deemed too dangerous,
                // as the cachedir represented by cacheFileCreated may still be used

                // log.info("Deleting the temporary index "
                // + cacheFileCreated.getAbsolutePath());
                // FileUtils.removeRecursively(cacheFileCreated);
                log.info("We keep the index '{}', as we don't know if anybody is using it",
                        cacheFileCreated.getAbsolutePath());

                // Information needed by recipient to store index in local cache
                irMsg.setFoundJobs(jobIDs);
                if (mustReturnIndex) { // return index now.
                    packageResultFiles(irMsg, cacheFileWanted);
                }
            } else {
                Set<Long> missingJobIds = new HashSet<Long>(jobIDs);
                missingJobIds.removeAll(foundIDs);
                log.warn("Failed generating index of type '{}' for the jobs [{}]. Missing data for jobs [{}].",
                        type, StringUtils.conjoin(",", jobIDs), StringUtils.conjoin(",", missingJobIds));
            }

        } // End of synchronization block
    } catch (Throwable t) {
        log.warn("Unable to generate index for jobs [" + StringUtils.conjoin(",", irMsg.getRequestedJobs())
                + "]", t);
        irMsg.setNotOk(t);
    } finally {
        // Remove job from currentJobs Set
        synchronized (currentJobs) {
            currentJobs.remove(irMsg.getID());
        }
        // delete stored message
        deleteStoredMessage(irMsg);
        String state = "failed";
        if (irMsg.isOk()) {
            state = "successful";
        }
        if (mustReturnIndex) {
            log.info("Sending {} reply for IndexRequestMessage back to sender '{}'.", state,
                    irMsg.getReplyTo());
            JMSConnectionFactory.getInstance().reply(irMsg);
        } else {
            log.info("Sending {} IndexReadyMessage to Scheduler for harvest {}", state, irMsg.getHarvestId());
            boolean isindexready = true;
            if (state.equalsIgnoreCase("failed")) {
                isindexready = false;
            }
            IndexReadyMessage irm = new IndexReadyMessage(irMsg.getHarvestId(), isindexready,
                    irMsg.getReplyTo(), Channels.getTheIndexServer());
            JMSConnectionFactory.getInstance().send(irm);
        }
    }
}

From source file:com.github.jsonj.JsonObject.java

/**
 * By default, the hash code is calculated recursively, which can be rather expensive. Calling this method allows
 * you to specify a special field that will be used for calculating this object's hashcode. In case the field value
 * is null it will fall back to recursive behavior.
 *
 * @param fieldName//from  w w w . ja v a 2 s  .com
 *            name of the field value that should be used for calculating the hash code
 */
public void useIdHashCodeStrategy(String fieldName) {
    idField = fieldName.intern();
}

From source file:com.l2jfree.gameserver.gameobjects.templates.L2NpcTemplate.java

/**
 * @param factionId the factionId to set
 *///from  w  w w  .j a  va 2 s . com
public void setFactionId(String factionId) {
    if (factionId == null) {
        _factionId = null;
        return;
    }
    _factionId = factionId.intern();
}

From source file:com.idylwood.yahoo.YahooFinance.java

DivSplitTable HistoricalDivSplits(String symbol) throws IOException {
    DivSplitTable ret;/*ww w . j a v  a 2  s.com*/
    // TODO maybe this lock needs to be more sophisticated.
    synchronized (mDivSplits) {
        ret = mDivSplits.get(symbol);
        // TODO make this more sophisticated. As it is it's going to download the whole table another time every day.
        if (null == ret || System.currentTimeMillis() - ret.dateAccessed > TwentyFourHours) {
            ret = DownloadHistoricalDivSplits(symbol);
            // intern it in case calling code is smart
            mDivSplits.put(symbol.intern(), ret);
        }
    }
    return ret;
}

From source file:org.wso2.andes.kernel.slot.SlotManager.java

/**
 * Get an ordered set of existing, assigned slots that overlap with the input slot range.
 *
 * @param queueName  name of destination queue
 * @param startMsgID start message ID of input slot
 * @param endMsgID   end message ID of input slot
 * @return TreeSet<Slot>//from   w  w w.  ja v  a 2s  .c om
 */
private TreeSet<Slot> getOverlappedAssignedSlots(String queueName, long startMsgID, long endMsgID) {
    TreeSet<Slot> overlappedSlots = new TreeSet<Slot>();

    // Sweep all assigned slots to find overlaps.
    // slotAssignmentMap, cos its optimized for node,queue-wise iteration.
    if (AndesContext.getInstance().isClusteringEnabled()) {

        // The requirement here is to clear slot associations for the queue on all nodes.
        List<String> nodeIDs = HazelcastAgent.getInstance().getMembersNodeIDs();

        for (String nodeID : nodeIDs) {
            String lockKey = nodeID + SlotManager.class;

            TreeSet<Slot> overlappingSlotsOnNode = new TreeSet<Slot>();

            synchronized (lockKey.intern()) {
                HashmapStringTreeSetWrapper wrapper = slotAssignmentMap.get(nodeID);

                if (!overLappedSlotMap.containsKey(nodeID)) {
                    overLappedSlotMap.put(nodeID, new HashmapStringTreeSetWrapper());
                }
                HashmapStringTreeSetWrapper olWrapper = overLappedSlotMap.get(nodeID);

                HashMap<String, TreeSet<Slot>> olSlotMap = olWrapper.getStringListHashMap();

                if (!olSlotMap.containsKey(queueName)) {
                    olSlotMap.put(queueName, new TreeSet<Slot>());
                    olWrapper.setStringListHashMap(olSlotMap);
                    overLappedSlotMap.set(nodeID, olWrapper);
                }

                if (wrapper != null) {
                    HashMap<String, TreeSet<Slot>> queueToSlotMap = wrapper.getStringListHashMap();
                    if (queueToSlotMap != null) {
                        TreeSet<Slot> slotListForQueueOnNode = queueToSlotMap.get(queueName);
                        if (null != slotListForQueueOnNode) {
                            for (Slot slot : slotListForQueueOnNode) {
                                if (endMsgID < slot.getStartMessageId())
                                    continue; // skip this one, its below our range
                                if (startMsgID > slot.getEndMessageId())
                                    continue; // skip this one, its above our range
                                slot.setAnOverlappingSlot(true);
                                if (log.isDebugEnabled()) {
                                    log.debug("Marked already assigned slot as an overlapping" + " slot. Slot= "
                                            + slot);
                                }
                                overlappingSlotsOnNode.add(slot);

                                if (log.isDebugEnabled()) {
                                    log.debug("Found an overlapping slot : " + slot);
                                }

                                //Add to global overlappedSlotMap
                                olSlotMap.get(queueName).remove(slot);
                                olSlotMap.get(queueName).add(slot);

                            }
                        }
                    }
                    wrapper.setStringListHashMap(queueToSlotMap);
                    slotAssignmentMap.set(nodeID, wrapper);
                }

                // Add all marked slots collected into the olSlot to global overlappedSlotsMap.
                olWrapper.setStringListHashMap(olSlotMap);
                overLappedSlotMap.set(nodeID, olWrapper);

                // Add to return collection
                overlappedSlots.addAll(overlappingSlotsOnNode);
            }
        }
    }

    return overlappedSlots;
}

From source file:org.paxle.filter.robots.impl.RobotsTxtManager.java

private RobotsTxt getRobotsTxt(URI baseUri) throws IOException, URISyntaxException {
    String hostPort = this.getHostPort(baseUri);

    synchronized (hostPort.intern()) {
        RobotsTxt robotsTxt = null;/*from w  w w .j av a2  s  .c  o  m*/

        // trying to get the robots.txt from cache
        robotsTxt = this.getFromCache(hostPort);

        // trying to get the robots.txt from file
        if (robotsTxt == null) {
            try {
                robotsTxt = this.loader.read(hostPort);
            } catch (Exception e) {
                this.logger.error(String.format(
                        "Unexpected '%s' while trying to load robots.txt file for domain '%s' from DB.",
                        e.getClass().getName(), baseUri.toASCIIString()), e);
            }

            if (robotsTxt != null) {
                this.putIntoCache(hostPort, robotsTxt);
            }
        }

        // trying to download the robots.txt
        boolean newDomain = robotsTxt == null;
        if (newDomain || (System.currentTimeMillis() - robotsTxt.getLoadedDate().getTime() > robotsTxt
                .getReloadInterval())) {
            robotsTxt = this.getFromWeb(URI.create(baseUri.toASCIIString() + "/robots.txt"));
            this.putIntoCache(hostPort, robotsTxt);
            this.loader.write(robotsTxt);
        }

        return robotsTxt;
    }
}

From source file:org.transitime.utils.csv.CsvBase.java

/**
 * For reading values from a CSVRecord. If the column was not defined then
 * CSVRecord.get() throws an exception. Therefore for optional CSV columns
 * better to use this function so don't get exception. This way can continue
 * processing and all errors for the data will be logged. Better than just
 * logging first error and then quitting.
 * <p>/*w  w w .  j a  v  a2s .co  m*/
 * Also, if the value is empty string then it is converted to null for
 * consistency. Also trims the resulting string since some agencies leave in
 * spaces.
 * 
 * @param record
 *            The data for the row in the CSV file
 * @param name
 *            The name of the column in the CSV file
 * @param required
 *            Whether this value is required. If required and the value is
 *            not set then an error is logged and null is returned.
 * @return The value, or null if it was not defined
 */
private String getValue(CSVRecord record, String name, boolean required) {
    // If the column is not defined in the file then return null.
    // After all, the item is optional so it is fine for it to
    // not be in the file.
    if (!record.isSet(name)) {
        if (required) {
            logger.error("Column {} not defined in file \"{}\" yet it is required", name, getFileName());
        }
        return null;
    }

    // Get the value. First trim whitespace so that
    // value will be consistent. Sometimes agencies will mistakenly have
    // some whitespace in the columns.
    String value = record.get(name).trim();

    // Return the value. But if the value is empty string
    // convert to null for consistency.
    if (value.isEmpty()) {
        if (required) {
            logger.error(
                    "For file \"{}\" line number {} for column {} value was not set " + "yet it is required",
                    getFileName(), lineNumber, name);
        }
        return null;
    } else {
        // Successfully got value so return intern() version of it. Using 
        // intern() because many strings are duplicates such as headsign
        // or shape info. By sharing the strings we can save a huge amount
        // of memory.
        return value.intern();
    }
}

From source file:com.thoughtworks.go.server.service.ScheduleService.java

private String mutexForStageInstance(String pipelineName, Integer pipelineCounter, String stageName,
        String stageCounter) {/*  w  w  w. j  av a 2  s.c om*/
    String s = String.format("%s_forStageInstance_%s_%s_%s_%s", getClass().getName(), pipelineName,
            pipelineCounter, stageName, stageCounter);
    return s.intern(); // interned because we synchronize on it
}