Example usage for com.google.common.base Predicates equalTo

List of usage examples for com.google.common.base Predicates equalTo

Introduction

In this page you can find the example usage for com.google.common.base Predicates equalTo.

Prototype

public static <T> Predicate<T> equalTo(@Nullable T target) 

Source Link

Document

Returns a predicate that evaluates to true if the object being tested equals() the given target or both are null.

Usage

From source file:com.android.build.gradle.tasks.PackageAndroidArtifact.java

/**
 * Obtains all changed inputs of a given input set. Given a set of files mapped to their
 * changed status, this method returns a list of changes computed as follows:
 *
 * <ol>/*from www.j a  v a 2 s.  c o  m*/
 *     <li>Changed inputs are split into deleted and non-deleted inputs. This separation is
 *     needed because deleted inputs may no longer be mappable to any {@link InputSet} just
 *     by looking at the file path, without using {@link KnownFilesSaveData}.
 *     <li>Deleted inputs are filtered through {@link KnownFilesSaveData} to get only those
 *     whose input set matches {@code inputSet}.
 *     <li>Non-deleted inputs are processed through
 *     {@link IncrementalRelativeFileSets#makeFromBaseFiles(Collection, Map, FileCacheByPath)}
 *     to obtain the incremental file changes.
 *     <li>The results of processed deleted and non-deleted are merged and returned.
 * </ol>
 *
 * @param changedInputs all changed inputs
 * @param saveData the save data with all input sets from last run
 * @param inputSet the input set to filter
 * @param baseFiles the base files of the input set
 * @param cacheByPath where to cache files
 * @return the status of all relative files in the input set
 */
@NonNull
private ImmutableMap<RelativeFile, FileStatus> getChangedInputs(@NonNull Map<File, FileStatus> changedInputs,
        @NonNull KnownFilesSaveData saveData, @NonNull InputSet inputSet, @NonNull Collection<File> baseFiles,
        @NonNull FileCacheByPath cacheByPath) throws IOException {

    /*
     * Figure out changes to deleted files.
     */
    Set<File> deletedFiles = Maps.filterValues(changedInputs, Predicates.equalTo(FileStatus.REMOVED)).keySet();
    Set<RelativeFile> deletedRelativeFiles = saveData.find(deletedFiles, inputSet);

    /*
     * Figure out changes to non-deleted files.
     */
    Map<File, FileStatus> nonDeletedFiles = Maps.filterValues(changedInputs,
            Predicates.not(Predicates.equalTo(FileStatus.REMOVED)));
    Map<RelativeFile, FileStatus> nonDeletedRelativeFiles = IncrementalRelativeFileSets
            .makeFromBaseFiles(baseFiles, nonDeletedFiles, cacheByPath);

    /*
     * Merge everything.
     */
    return new ImmutableMap.Builder<RelativeFile, FileStatus>()
            .putAll(Maps.asMap(deletedRelativeFiles, Functions.constant(FileStatus.REMOVED)))
            .putAll(nonDeletedRelativeFiles).build();
}

From source file:org.commoncrawl.service.parser.ec2.EC2ParserMaster.java

public void scanForCompletions() throws IOException {
    AmazonS3Client s3Client = new AmazonS3Client(new BasicAWSCredentials(s3AccessKeyId, s3SecretKey));

    ObjectListing response = s3Client.listObjects(new ListObjectsRequest().withBucketName("aws-publicdatasets")
            .withPrefix(CC_BUCKET_ROOT + CC_PARSER_INTERMEDIATE));

    do {/*from  w w  w .  ja v  a  2 s.c  om*/

        LOG.info("Response Key Count:" + response.getObjectSummaries().size());

        for (S3ObjectSummary entry : response.getObjectSummaries()) {
            Matcher matcher = doneFilePattern.matcher(entry.getKey());
            if (matcher.matches()) {
                ParseCandidate candidate = ParseCandidate.candidateFromBucketEntry(entry.getKey());
                if (candidate == null) {
                    LOG.error("Failed to Parse Candidate for:" + entry.getKey());
                } else {
                    long partialTimestamp = Long.parseLong(matcher.group(2));
                    long position = Long.parseLong(matcher.group(3));
                    LOG.info("Found completion for Log:" + candidate._crawlLogName + " TS:" + partialTimestamp
                            + " Pos:" + position);
                    candidate._lastValidPos = position;

                    // ok lookup existing entry if present ... 
                    ParseCandidate existingCandidate = Iterables.find(_candidates.get(candidate._timestamp),
                            Predicates.equalTo(candidate));
                    // if existing candidate found 
                    if (existingCandidate != null) {
                        LOG.info("Found existing candidate with last pos:" + existingCandidate._lastValidPos);
                        if (candidate._lastValidPos > existingCandidate._lastValidPos) {
                            existingCandidate._lastValidPos = candidate._lastValidPos;
                            if (candidate._lastValidPos == candidate._size) {
                                LOG.info("Found last pos == size for candidate:" + candidate._crawlLogName
                                        + ".REMOVING FROM ACTIVE - MOVING TO COMPLETE");
                                _candidates.remove(candidate._timestamp, candidate);
                                _complete.add(candidate._crawlLogName);
                            }
                        }
                    } else {
                        LOG.info("Skipping Completion for CrawlLog:" + candidate._crawlLogName
                                + " because existing candidate was not found.");
                    }
                }
            }
        }
        if (response.isTruncated()) {
            response = s3Client.listNextBatchOfObjects(response);
        } else {
            break;
        }
    } while (true);
}

From source file:jetbrains.jetpad.hybrid.BaseHybridSynchronizer.java

private CellTrait placeholderTextEditing() {
    return TextEditing.validTextEditing(Predicates.equalTo(""));
}

From source file:org.trancecode.xproc.PipelineParser.java

private static Step addImplicitInputPort(final Step step) {
    if (STEPS_WITH_IMPLICIT_INPUT_PORT.contains(step.getType())) {
        final Iterable<Port> inputPorts = Iterables.filter(step.getInputPorts(false), Predicates.not(
                Predicates.compose(Predicates.equalTo(XProcPorts.XPATH_CONTEXT), PortFunctions.getPortName())));
        if (Iterables.isEmpty(inputPorts)) {
            final Port port = Port.newInputPort(step.getName(), XProcPorts.SOURCE, step.getLocation())
                    .setPrimary(true);//from   w  ww.  ja v  a  2s .co  m
            LOG.trace("  add implicit input port: {}", port);
            return step.declarePort(port);
        }
    }

    return step;
}

From source file:brooklyn.entity.nosql.couchbase.CouchbaseClusterImpl.java

public void createBucket(final Entity primaryNode, final String bucketName, final String bucketType,
        final Integer bucketPort, final Integer bucketRamSize, final Integer bucketReplica) {
    DynamicTasks.queueIfPossible(/* ww  w .  j a v  a 2s  .  c om*/
            TaskBuilder.<Void>builder().name("Creating bucket " + bucketName).body(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this,
                            CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
                    if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
                        CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
                    }
                    setAttribute(CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, true);
                    HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(primaryNode,
                            primaryNode.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));

                    CouchbaseClusterImpl.this.resetBucketCreation.set(HttpFeed.builder()
                            .entity(CouchbaseClusterImpl.this).period(500, TimeUnit.MILLISECONDS)
                            .baseUri(String.format("http://%s/pools/default/buckets/%s", hostAndPort,
                                    bucketName))
                            .credentials(primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME),
                                    primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD))
                            .poll(new HttpPollConfig<Boolean>(BUCKET_CREATION_IN_PROGRESS)
                                    .onSuccess(Functionals.chain(HttpValueFunctions.jsonContents(),
                                            JsonFunctions.walkN("nodes"), new Function<JsonElement, Boolean>() {
                                                @Override
                                                public Boolean apply(JsonElement input) {
                                                    // Wait until bucket has been created on all nodes and the couchApiBase element has been published (indicating that the bucket is useable)
                                                    JsonArray servers = input.getAsJsonArray();
                                                    if (servers.size() != CouchbaseClusterImpl.this.getMembers()
                                                            .size()) {
                                                        return true;
                                                    }
                                                    for (JsonElement server : servers) {
                                                        Object api = server.getAsJsonObject()
                                                                .get("couchApiBase");
                                                        if (api == null
                                                                || Strings.isEmpty(String.valueOf(api))) {
                                                            return true;
                                                        }
                                                    }
                                                    return false;
                                                }
                                            }))
                                    .onFailureOrException(new Function<Object, Boolean>() {
                                        @Override
                                        public Boolean apply(Object input) {
                                            if (input instanceof brooklyn.util.http.HttpToolResponse) {
                                                if (((brooklyn.util.http.HttpToolResponse) input)
                                                        .getResponseCode() == 404) {
                                                    return true;
                                                }
                                            }
                                            if (input instanceof Throwable)
                                                Exceptions.propagate((Throwable) input);
                                            throw new IllegalStateException(
                                                    "Unexpected response when creating bucket:" + input);
                                        }
                                    }))
                            .build());

                    // TODO: Bail out if bucket creation fails, to allow next bucket to proceed
                    Entities.invokeEffectorWithArgs(CouchbaseClusterImpl.this, primaryNode,
                            CouchbaseNode.BUCKET_CREATE, bucketName, bucketType, bucketPort, bucketRamSize,
                            bucketReplica);
                    DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this,
                            CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
                    if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
                        CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
                    }
                    return null;
                }
            }).build()).orSubmitAndBlock();
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CassandraKeyValueService.java

@Override
public Map<Cell, Value> get(String tableName, Map<Cell, Long> timestampByCell) {
    if (timestampByCell.isEmpty()) {
        log.info("Attempted get on '{}' table with empty cells", tableName);
        return ImmutableMap.of();
    }/*from  w w  w .  j a v  a  2 s.  c om*/

    try {
        Long firstTs = timestampByCell.values().iterator().next();
        if (Iterables.all(timestampByCell.values(), Predicates.equalTo(firstTs))) {
            StartTsResultsCollector collector = new StartTsResultsCollector(firstTs);
            loadWithTs(tableName, timestampByCell.keySet(), firstTs, false, collector, readConsistency);
            return collector.collectedResults;
        }

        SetMultimap<Long, Cell> cellsByTs = Multimaps.invertFrom(Multimaps.forMap(timestampByCell),
                HashMultimap.<Long, Cell>create());
        Builder<Cell, Value> builder = ImmutableMap.builder();
        for (long ts : cellsByTs.keySet()) {
            StartTsResultsCollector collector = new StartTsResultsCollector(ts);
            loadWithTs(tableName, cellsByTs.get(ts), ts, false, collector, readConsistency);
            builder.putAll(collector.collectedResults);
        }
        return builder.build();
    } catch (Exception e) {
        throw Throwables.throwUncheckedException(e);
    }
}

From source file:org.apache.brooklyn.entity.nosql.couchbase.CouchbaseClusterImpl.java

public void createBucket(final Entity primaryNode, final String bucketName, final String bucketType,
        final Integer bucketPort, final Integer bucketRamSize, final Integer bucketReplica) {
    DynamicTasks.queueIfPossible(// ww w. j  av a 2  s.com
            TaskBuilder.<Void>builder().displayName("Creating bucket " + bucketName).body(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this,
                            CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
                    if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
                        CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
                    }
                    sensors().set(CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, true);
                    HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(primaryNode,
                            primaryNode.getAttribute(CouchbaseNode.COUCHBASE_WEB_ADMIN_PORT));

                    CouchbaseClusterImpl.this.resetBucketCreation.set(HttpFeed.builder()
                            .entity(CouchbaseClusterImpl.this).period(500, TimeUnit.MILLISECONDS)
                            .baseUri(String.format("http://%s/pools/default/buckets/%s", hostAndPort,
                                    bucketName))
                            .credentials(primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_USERNAME),
                                    primaryNode.getConfig(CouchbaseNode.COUCHBASE_ADMIN_PASSWORD))
                            .poll(new HttpPollConfig<Boolean>(BUCKET_CREATION_IN_PROGRESS)
                                    .onSuccess(Functionals.chain(HttpValueFunctions.jsonContents(),
                                            JsonFunctions.walkN("nodes"), new Function<JsonElement, Boolean>() {
                                                @Override
                                                public Boolean apply(JsonElement input) {
                                                    // Wait until bucket has been created on all nodes and the couchApiBase element has been published (indicating that the bucket is useable)
                                                    JsonArray servers = input.getAsJsonArray();
                                                    if (servers.size() != CouchbaseClusterImpl.this.getMembers()
                                                            .size()) {
                                                        return true;
                                                    }
                                                    for (JsonElement server : servers) {
                                                        Object api = server.getAsJsonObject()
                                                                .get("couchApiBase");
                                                        if (api == null
                                                                || Strings.isEmpty(String.valueOf(api))) {
                                                            return true;
                                                        }
                                                    }
                                                    return false;
                                                }
                                            }))
                                    .onFailureOrException(new Function<Object, Boolean>() {
                                        @Override
                                        public Boolean apply(Object input) {
                                            if (input instanceof HttpToolResponse) {
                                                if (((HttpToolResponse) input).getResponseCode() == 404) {
                                                    return true;
                                                }
                                            }
                                            if (input instanceof Throwable)
                                                Exceptions.propagate((Throwable) input);
                                            throw new IllegalStateException(
                                                    "Unexpected response when creating bucket:" + input);
                                        }
                                    }))
                            .build());

                    // TODO: Bail out if bucket creation fails, to allow next bucket to proceed
                    Entities.invokeEffectorWithArgs(CouchbaseClusterImpl.this, primaryNode,
                            CouchbaseNode.BUCKET_CREATE, bucketName, bucketType, bucketPort, bucketRamSize,
                            bucketReplica);
                    DependentConfiguration.waitInTaskForAttributeReady(CouchbaseClusterImpl.this,
                            CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
                    if (CouchbaseClusterImpl.this.resetBucketCreation.get() != null) {
                        CouchbaseClusterImpl.this.resetBucketCreation.get().stop();
                    }
                    return null;
                }
            }).build()).orSubmitAndBlock();
}

From source file:org.apache.brooklyn.entity.software.base.AbstractSoftwareProcessSshDriver.java

/**
 * Sets up a {@link ScriptHelper} to generate a script that controls the given phase
 * (<em>check-running</em>, <em>launching</em> etc.) including default header and
 * footer commands.//w w  w. j a va 2s .co m
 * <p>
 * Supported flags:
 * <ul>
 * <li><strong>usePidFile</strong> - <em>true</em> or <em>filename</em> to save and retrieve the PID
 * <li><strong>processOwner</strong> - <em>username</em> that owns the running process
 * <li><strong>nonStandardLayout</strong> - <em>true</em> to omit all default commands
 * <li><strong>installIncomplete</strong> - <em>true</em> to prevent marking complete
 * <li><strong>debug</strong> - <em>true</em> to enable shell debug output
 * </li>
 *
 * @param flags a {@link Map} of flags to control script generation
 * @param phase the phase to create the ScriptHelper for
 *
 * @see #newScript(String)
 * @see #USE_PID_FILE
 * @see #PROCESS_OWNER
 * @see #NON_STANDARD_LAYOUT
 * @see #INSTALL_INCOMPLETE
 * @see #DEBUG
 */
protected ScriptHelper newScript(Map<String, ?> flags, String phase) {
    if (!Entities.isManaged(getEntity()))
        throw new IllegalStateException(
                getEntity() + " is no longer managed; cannot create script to run here (" + phase + ")");

    if (!Iterables.all(flags.keySet(), StringPredicates.equalToAny(VALID_FLAGS))) {
        throw new IllegalArgumentException("Invalid flags passed: " + flags);
    }

    ScriptHelper s = new ScriptHelper(this, phase + " " + elvis(entity, this));
    if (!groovyTruth(flags.get(NON_STANDARD_LAYOUT))) {
        if (groovyTruth(flags.get(DEBUG))) {
            s.header.prepend("set -x");
        }
        if (INSTALLING.equals(phase)) {
            // mutexId should be global because otherwise package managers will contend with each other
            s.useMutex(getLocation(), "installation lock at host", "installing " + elvis(entity, this));
            s.header.append("export INSTALL_DIR=\"" + getInstallDir() + "\"", "mkdir -p $INSTALL_DIR",
                    "cd $INSTALL_DIR", "test -f BROOKLYN && exit 0");

            if (!groovyTruth(flags.get(INSTALL_INCOMPLETE))) {
                s.footer.append("date > $INSTALL_DIR/BROOKLYN");
            }
            // don't set vars during install phase, prevent dependency resolution
            s.environmentVariablesReset();
        }
        if (ImmutableSet.of(CUSTOMIZING, LAUNCHING, CHECK_RUNNING, STOPPING, KILLING, RESTARTING)
                .contains(phase)) {
            s.header.append("export RUN_DIR=\"" + getRunDir() + "\"", "mkdir -p $RUN_DIR", "cd $RUN_DIR");
        }
    }

    if (ImmutableSet.of(LAUNCHING, RESTARTING).contains(phase)) {
        s.failIfBodyEmpty();
    }
    if (ImmutableSet.of(STOPPING, KILLING).contains(phase)) {
        // stopping and killing allowed to have empty body if pid file set
        if (!groovyTruth(flags.get(USE_PID_FILE)))
            s.failIfBodyEmpty();
    }
    if (ImmutableSet.of(INSTALLING, LAUNCHING).contains(phase)) {
        s.updateTaskAndFailOnNonZeroResultCode();
    }
    if (phase.equalsIgnoreCase(CHECK_RUNNING)) {
        s.setInessential();
        s.setTransient();
        s.setFlag(SshTool.PROP_CONNECT_TIMEOUT, Duration.TEN_SECONDS.toMilliseconds());
        s.setFlag(SshTool.PROP_SESSION_TIMEOUT, Duration.THIRTY_SECONDS.toMilliseconds());
        s.setFlag(SshTool.PROP_SSH_TRIES, 1);
    }

    if (groovyTruth(flags.get(USE_PID_FILE))) {
        Object usePidFile = flags.get(USE_PID_FILE);
        String pidFile = (usePidFile instanceof CharSequence ? usePidFile
                : Os.mergePathsUnix(getRunDir(), PID_FILENAME)).toString();
        String processOwner = (String) flags.get(PROCESS_OWNER);
        if (LAUNCHING.equals(phase)) {
            entity.sensors().set(SoftwareProcess.PID_FILE, pidFile);
            s.footer.prepend("echo $! > " + pidFile);
        } else if (CHECK_RUNNING.equals(phase)) {
            // old method, for supplied service, or entity.id
            // "ps aux | grep ${service} | grep \$(cat ${pidFile}) > /dev/null"
            // new way, preferred?
            if (processOwner != null) {
                s.body.append(BashCommands.sudoAsUser(processOwner, "test -f " + pidFile) + " || exit 1",
                        "ps -p $(" + BashCommands.sudoAsUser(processOwner, "cat " + pidFile) + ")");
            } else {
                s.body.append("test -f " + pidFile + " || exit 1", "ps -p `cat " + pidFile + "`");
            }
            // no pid, not running; 1 is not running
            s.requireResultCode(Predicates.or(Predicates.equalTo(0), Predicates.equalTo(1)));
        } else if (STOPPING.equals(phase)) {
            if (processOwner != null) {
                s.body.append("export PID=$(" + BashCommands.sudoAsUser(processOwner, "cat " + pidFile) + ")",
                        "test -n \"$PID\" || exit 0", BashCommands.sudoAsUser(processOwner, "kill $PID"),
                        BashCommands.sudoAsUser(processOwner, "kill -9 $PID"),
                        BashCommands.sudoAsUser(processOwner, "rm -f " + pidFile));
            } else {
                s.body.append("export PID=$(cat " + pidFile + ")", "test -n \"$PID\" || exit 0", "kill $PID",
                        "kill -9 $PID", "rm -f " + pidFile);
            }
        } else if (KILLING.equals(phase)) {
            if (processOwner != null) {
                s.body.append("export PID=$(" + BashCommands.sudoAsUser(processOwner, "cat " + pidFile) + ")",
                        "test -n \"$PID\" || exit 0", BashCommands.sudoAsUser(processOwner, "kill -9 $PID"),
                        BashCommands.sudoAsUser(processOwner, "rm -f " + pidFile));
            } else {
                s.body.append("export PID=$(cat " + pidFile + ")", "test -n \"$PID\" || exit 0", "kill -9 $PID",
                        "rm -f " + pidFile);
            }
        } else if (RESTARTING.equals(phase)) {
            if (processOwner != null) {
                s.footer.prepend(BashCommands.sudoAsUser(processOwner, "test -f " + pidFile) + " || exit 1",
                        "ps -p $(" + BashCommands.sudoAsUser(processOwner, "cat " + pidFile) + ") || exit 1");
            } else {
                s.footer.prepend("test -f " + pidFile + " || exit 1", "ps -p $(cat " + pidFile + ") || exit 1");
            }
            // no pid, not running; no process; can't restart, 1 is not running
        } else {
            log.warn(USE_PID_FILE + ": script option not valid for " + s.summary);
        }
    }

    return s;
}

From source file:com.google.devtools.j2cpp.translate.DeadCodeEliminator.java

/**
 * Determines whether a type is visible in the scope of the specified context.
 *//*from w  ww.j  a  va2s.  c  o  m*/
private boolean inScope(ITypeBinding type, ITypeBinding context) {
    return context.equals(type) || context.getSuperclass() != null && inScope(type, context.getSuperclass())
            || Iterables.any(Arrays.asList(context.getDeclaredTypes()), Predicates.equalTo(type));
}

From source file:brooklyn.catalog.internal.BasicBrooklynCatalog.java

@Deprecated
public CatalogItem<?, ?> getCatalogItemForType(String typeName) {
    final CatalogItem<?, ?> resultI;
    final BrooklynCatalog catalog = mgmt.getCatalog();
    if (CatalogUtils.looksLikeVersionedId(typeName)) {
        //All catalog identifiers of the form xxxx:yyyy are composed of symbolicName+version.
        //No javaType is allowed as part of the identifier.
        resultI = CatalogUtils.getCatalogItemOptionalVersion(mgmt, typeName);
    } else {/*from ww  w  .ja  va2  s .co  m*/
        //Usually for catalog items with javaType (that is items from catalog.xml)
        //the symbolicName and javaType match because symbolicName (was ID)
        //is not specified explicitly. But could be the case that there is an item
        //whose symbolicName is explicitly set to be different from the javaType.
        //Note that in the XML the attribute is called registeredTypeName.
        Iterable<CatalogItem<Object, Object>> resultL = catalog
                .getCatalogItems(CatalogPredicates.javaType(Predicates.equalTo(typeName)));
        if (!Iterables.isEmpty(resultL)) {
            //Push newer versions in front of the list (not that there should
            //be more than one considering the items are coming from catalog.xml).
            resultI = sortVersionsDesc(resultL).iterator().next();
            if (log.isDebugEnabled() && Iterables.size(resultL) > 1) {
                log.debug("Found " + Iterables.size(resultL) + " matches in catalog for type " + typeName
                        + "; returning the result with preferred version, " + resultI);
            }
        } else {
            //As a last resort try searching for items with the same symbolicName supposedly
            //different from the javaType.
            resultI = catalog.getCatalogItem(typeName, BrooklynCatalog.DEFAULT_VERSION);
            if (resultI != null) {
                if (resultI.getJavaType() == null) {
                    throw new NoSuchElementException("Unable to find catalog item for type " + typeName
                            + ". There is an existing catalog item with ID " + resultI.getId()
                            + " but it doesn't define a class type.");
                }
            }
        }
    }
    return resultI;
}