List of usage examples for com.google.common.base Predicates equalTo
public static <T> Predicate<T> equalTo(@Nullable T target)
From source file:clocker.mesos.entity.MesosClusterImpl.java
public List<String> scanFrameworks(JsonArray frameworks) { List<String> frameworkNames = MutableList.<String>of(); for (int i = 0; i < frameworks.size(); i++) { JsonObject task = frameworks.get(i).getAsJsonObject(); String id = task.get("id").getAsString(); JsonElement pidObj = task.get("pid"); String pid = null;//from www . j a v a2 s.c om if (pidObj != null && !pidObj.isJsonNull()) { pid = pidObj.getAsString(); } String name = task.get("name").getAsString(); String url = task.get("webui_url").getAsString(); frameworkNames.add(name); Optional<Entity> entity = Iterables.tryFind(sensors().get(MESOS_FRAMEWORKS).getMembers(), Predicates .compose(Predicates.equalTo(id), EntityFunctions.attribute(MesosFramework.FRAMEWORK_ID))); if (entity.isPresent()) continue; EntitySpec<? extends MesosFramework> frameworkSpec = EntitySpec .create(FRAMEWORKS.containsKey(name) ? FRAMEWORKS.get(name) : EntitySpec.create(MesosFramework.class)) .configure(MesosFramework.FRAMEWORK_ID, id).configure(MesosFramework.FRAMEWORK_PID, pid) .configure(MesosFramework.FRAMEWORK_NAME, name).configure(MesosFramework.FRAMEWORK_URL, url) .configure(MesosFramework.MESOS_CLUSTER, this) .displayName(String.format("%s Framework", Strings.toInitialCapOnly(name))); MesosFramework added = sensors().get(MESOS_FRAMEWORKS).addMemberChild(frameworkSpec); added.start(ImmutableList.<Location>of()); } return frameworkNames; }
From source file:edu.harvard.med.iccbl.screensaver.policy.IccblEntityViewPolicy.java
private Set<Screen> findMutualScreens() { if (_mutualScreens == null) { _mutualScreens = Sets.newHashSet(); if (getScreensaverUser().getScreensaverUserRoles() .contains(ScreensaverUserRole.SM_DSL_LEVEL1_MUTUAL_SCREENS)) { _mutualScreens.addAll(findOthersLevel1AndLevel2Screens(ScreenType.SMALL_MOLECULE)); }//from w ww. j a v a 2 s. c om if (getScreensaverUser().getScreensaverUserRoles() .contains(ScreensaverUserRole.RNAI_DSL_LEVEL1_MUTUAL_SCREENS)) { _mutualScreens.addAll(findOthersLevel1AndLevel2Screens(ScreenType.RNAI)); } // filter out the level 2 screens, since we've called findOthersLevel1AndLevel2Screens() for code reuse, even though it returns a superset of screens that we need in this method _mutualScreens = Sets.newHashSet(Iterables.filter(_mutualScreens, Predicates.compose( Predicates.equalTo(ScreenDataSharingLevel.MUTUAL_SCREENS), Screen.ToDataSharingLevel))); if (log.isDebugEnabled()) { log.debug("other's mutually shared screens: " + Joiner.on(", ").join(Iterables.transform(_mutualScreens, Screen.ToNameFunction))); } } return _mutualScreens; }
From source file:org.apache.aurora.scheduler.thrift.ReadOnlySchedulerImpl.java
private Map<IJobKey, IJobConfiguration> getJobs(Optional<String> ownerRole, Multimap<IJobKey, IScheduledTask> tasks) { // We need to synthesize the JobConfiguration from the the current tasks because the // ImmediateJobManager doesn't store jobs directly and ImmediateJobManager#getJobs always // returns an empty Collection. Map<IJobKey, IJobConfiguration> jobs = Maps.newHashMap(); jobs.putAll(Maps.transformEntries(tasks.asMap(), (jobKey, tasks1) -> { // Pick the latest transitioned task for each immediate job since the job can be in the // middle of an update or some shards have been selectively created. TaskConfig mostRecentTaskConfig = Tasks.getLatestActiveTask(tasks1).getAssignedTask().getTask() .newBuilder();/*from w w w . jav a 2 s .c o m*/ return IJobConfiguration.build( new JobConfiguration().setKey(jobKey.newBuilder()).setOwner(mostRecentTaskConfig.getOwner()) .setTaskConfig(mostRecentTaskConfig).setInstanceCount(tasks1.size())); })); // Get cron jobs directly from the manager. Do this after querying the task store so the real // template JobConfiguration for a cron job will overwrite the synthesized one that could have // been created above. Predicate<IJobConfiguration> configFilter = ownerRole.isPresent() ? Predicates.compose(Predicates.equalTo(ownerRole.get()), JobKeys::getRole) : Predicates.alwaysTrue(); jobs.putAll(Maps.uniqueIndex(FluentIterable.from(Storage.Util.fetchCronJobs(storage)).filter(configFilter), IJobConfiguration::getKey)); return jobs; }
From source file:clocker.mesos.entity.MesosClusterImpl.java
public List<String> scanSlaves(JsonArray slaves) throws UnknownHostException { List<String> slaveIds = MutableList.<String>of(); for (int i = 0; i < slaves.size(); i++) { JsonObject slave = slaves.get(i).getAsJsonObject(); boolean active = slave.get("active").getAsBoolean(); String id = slave.get("id").getAsString(); String hostname = slave.get("hostname").getAsString(); Double registered = slave.get("registered_time").getAsDouble(); Group group = sensors().get(MESOS_SLAVES); Optional<Entity> entity = Iterables.tryFind(group.getMembers(), Predicates .compose(Predicates.equalTo(id), EntityFunctions.attribute(MesosSlave.MESOS_SLAVE_ID))); if (entity.isPresent()) { Entity found = entity.get(); found.sensors().set(MesosSlave.SLAVE_ACTIVE, active); if (!active) { Lifecycle state = found.sensors().get(Attributes.SERVICE_STATE_ACTUAL); if (Lifecycle.ON_FIRE.equals(state) || Lifecycle.STARTING.equals(state)) { continue; } else if (Lifecycle.STOPPING.equals(state) || Lifecycle.STOPPED.equals(state)) { group.removeMember(found); group.removeChild(found); Entities.unmanage(found); } else { ServiceStateLogic.setExpectedState(found, Lifecycle.STOPPING); }/*from w ww . j a va 2 s .c om*/ } } else if (active) { LocationSpec<SshMachineLocation> spec = LocationSpec.create(SshMachineLocation.class) .configure(SshMachineLocation.SSH_HOST, hostname) .configure("address", InetAddress.getByName(hostname)).displayName(hostname); if (config().get(MESOS_SLAVE_ACCESSIBLE)) { spec.configure(CloudLocationConfig.WAIT_FOR_SSHABLE, "true") .configure(SshMachineLocation.DETECT_MACHINE_DETAILS, true) .configure(SshMachineLocation.SSH_PORT, config().get(MesosSlave.SLAVE_SSH_PORT)) .configure(LocationConfigKeys.USER, config().get(MesosSlave.SLAVE_SSH_USER)) .configure(LocationConfigKeys.PASSWORD, config().get(MesosSlave.SLAVE_SSH_PASSWORD)) .configure(SshTool.PROP_PASSWORD, config().get(MesosSlave.SLAVE_SSH_PASSWORD)) .configure(SshTool.PROP_PORT, config().get(MesosSlave.SLAVE_SSH_PORT)) .configure(LocationConfigKeys.PRIVATE_KEY_DATA, config().get(MesosSlave.SLAVE_SSH_PRIVATE_KEY_DATA)) .configure(LocationConfigKeys.PRIVATE_KEY_FILE, config().get(MesosSlave.SLAVE_SSH_PRIVATE_KEY_FILE)); } else { spec.configure(CloudLocationConfig.WAIT_FOR_SSHABLE, "false") .configure(SshMachineLocation.DETECT_MACHINE_DETAILS, false); } SshMachineLocation machine = getManagementContext().getLocationManager().createLocation(spec); // Setup port forwarding MarathonPortForwarder portForwarder = new MarathonPortForwarder(); portForwarder.setManagementContext(getManagementContext()); EntitySpec<MesosSlave> slaveSpec = EntitySpec.create(MesosSlave.class) .configure(MesosSlave.MESOS_SLAVE_ID, id) .configure(MesosSlave.REGISTERED_AT, registered.longValue()) .configure(MesosSlave.MESOS_CLUSTER, this).displayName("Mesos Slave (" + hostname + ")"); MesosSlave added = sensors().get(MESOS_SLAVES).addMemberChild(slaveSpec); added.sensors().set(MesosSlave.SLAVE_ACTIVE, active); added.sensors().set(MesosSlave.HOSTNAME, hostname); added.sensors().set(MesosSlave.ADDRESS, hostname); added.start(ImmutableList.of(machine)); portForwarder.init(hostname, this); // Setup subnet tier SubnetTier subnetTier = added.addChild( EntitySpec.create(SubnetTier.class).configure(SubnetTier.PORT_FORWARDER, portForwarder) .configure(SubnetTier.SUBNET_CIDR, Cidr.UNIVERSAL)); Entities.start(subnetTier, ImmutableList.of(machine)); added.sensors().set(MesosSlave.SUBNET_TIER, subnetTier); } if (active) slaveIds.add(id); } return slaveIds; }
From source file:org.eclipse.xtext.util.formallang.PdaUtil.java
public <S, P> List<S> shortestPathTo(Pda<S, P> pda, Iterator<P> stack, S match) { return shortestPathTo(pda, pda.getStart(), stack, Predicates.equalTo(match), Predicates.<S>alwaysTrue()); }
From source file:com.android.builder.internal.packaging.DexIncrementalRenameManager.java
/** * Updates the state of the manager with file changes. * * @param files the files that have changed * @return the changed in the packaged files * @throws IOException failed to process the changes *//* w ww.j ava 2 s . co m*/ @NonNull Set<PackagedFileUpdate> update(@NonNull ImmutableMap<RelativeFile, FileStatus> files) throws IOException { /* * This describes the algorithm to update the files. This algorithm: * - (1) Generates the minimal number of PackagedFileUpdates * - (2) Ensures that the data that results from making the updates does not contain any * gaps in the dex sequences as defined by DexFileNameSupplier. * - (3) If at least one of the input files is "classes.dex", that input file will be * mapped to "classes.dex". * * To explain the algorithm, we describe all steps and follow 3 different scenarios, whose * initial conditions are: * == Scenario S1 == * - mNameMap = { FileA -> classes.dex, FileB -> classes2.dex, FileC -> classes3.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * == Scenario S2 == * - mNameMap = { FileA -> classes.dex, FileB -> classes3.dex, FileC -> classes3.dex } * - files = { FileB: removed, FileC: updated, FileD: new, FileE: new } * == Scenario S3 == * - mNameMap = { FileA -> classes.dex, FileB -> classes2.dex } * - files = { classes.dex: new, FileB: updated } * * * 1. We start by getting all names in the order defined by the DexFileNameSupplier and * put all names that are in the map in "nameList". * * == Scenario 1 == * - mNameMap = { FileA -> classes.dex, FileB -> classes2.dex, FileC -> classes3.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * == Scenario 2 == * - mNameMap = { FileA -> classes.dex, FileB -> classes2.dex, FileC -> classes3.dex } * - files = { FileB: removed, FileC: updated, FileD: new, FileE: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * == Scenario S3 == * - mNameMap = { FileA -> classes.dex, FileB -> classes2.dex } * - files = { classes.dex: new, FileB: updated } * - nameList = [ classes.dex, classes2.dex ] * * * 2. For every deleted file in the set, we remove it from the name map and keep its * name in "deletedNames". Put the file/name map in "deletedFiles". * * == Scenario 1 == * - mNameMap = { FileC -> classes3.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes.dex, classes2.dex ] * - deletedFiles = { classes.dex -> FileA, classes2 -> FileB } * == Scenario 2 == * - mNameMap = { FileA -> classes.dex, FileC -> classes3.dex } * - files = { FileB: removed, FileC: updated, FileD: new, FileE: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes2.dex ] * - deletedFiles = { classes2 -> FileB } * == Scenario S3 == * - mNameMap = { FileA -> classes.dex, FileB -> classes2.dex } * - files = { classes.dex: new, FileB: updated } * - nameList = [ classes.dex, classes2.dex ] * - deletedNames = [] * - deletedFiles = {} * * * 3. For every added file in the set, we add it to newFiles. If any of the new files is * named "classes.dex" is it added to the beginning of newFiles and the addingClassesDex * is set to true. * * == Scenario 1 == * - mNameMap = { FileC -> classes3.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes.dex, classes2.dex ] * - deletedFiles = { classes.dex -> FileA, classes2 -> FileB } * - newFiles = [ FileD ] * - addingClassesDex = false * == Scenario 2 == * - mNameMap = { FileA -> classes.dex, FileC -> classes3.dex } * - files = { FileB: removed, FileC: updated, FileD: new, FileE: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes2.dex ] * - deletedFiles = { classes2 -> FileB } * - newFiles = [ FileD, FileE] * - addingClassesDex = false * == Scenario S3 == * - mNameMap = { FileA -> classes.dex, FileB -> classes2.dex } * - files = { classes.dex: new, FileB: updated } * - nameList = [ classes.dex, classes2.dex ] * - deletedNames = [] * - deletedFiles = {} * - newFiles = [ classes.dex ] * - addingClassesDex = true * * * 4.If addingClassesDex is true, mNameMap contains a mapping for classes.dex and the file * it is mapped from is not classes.dex, remove it from the mapping and add it to * newFiles. Also, add "classes.dex" to "deletedNames". * * == Scenario 1 == * - mNameMap = { FileC -> classes3.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes.dex, classes2.dex ] * - deletedFiles = { classes.dex -> FileA, classes2 -> FileB } * - newFiles = [ FileD ] * - addingClassesDex = false * == Scenario 2 == * - mNameMap = { FileA -> classes.dex, FileC -> classes3.dex } * - files = { FileB: removed, FileC: updated, FileD: new, FileE: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes2.dex ] * - deletedFiles = { classes2 -> FileB } * - newFiles = [] * - addingClassesDex = false * == Scenario S3 == * - mNameMap = { FileB -> classes2.dex } * - files = { classes.dex: new, FileB: updated } * - nameList = [ classes.dex, classes2.dex ] * - deletedNames = [ classes.dex ] * - deletedFiles = {} * - newFiles = [ classes.dex, FileA ] * - addingClassesDex = true * * * 5. For every added file in the set, we add it to the name map using names from * "deletedNames", if possible. If a name is used from "deletedNames", we remove it from * "deletedNames" and add it to "updatedNames". If no name is available in "deletedNames", * we fetch a new name and add it to "addedNames". If we need to fetch new names, we also * add them to "nameList". If we remove entries from "deletedNames", we also remove it * from "deletedFiles". * * == Scenario 1 == * - mNameMap = { FileC -> classes3.dex, FileD -> classes.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes2.dex ] * - deletedFiles = { classes2 -> FileB } * - newFiles = [ FileD ] * - addingClassesDex = false * - updatedNames = { classes.dex } * - addedNames = {} * == Scenario 2 == * - mNameMap = { FileA -> classes.dex, FileC -> classes3.dex, FileD -> classes2.dex, * FileE -> classes4.dex } * - files = { FileB: removed, FileC: updated, FileD -> new, FileE: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex, classes4.dex ] * - deletedNames = [] * - deletedFiles = {} * - newFiles = [] * - addingClassesDex = false * - updatedNames = { classes2.dex } * - addedNames = { classes4.dex } * == Scenario S3 == * - mNameMap = { FileB -> classes2.dex, classes.dex -> classes.dex, * FileA -> classes3.dex } * - files = { classes.dex: new, FileB: updated } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [] * - deletedFiles = {} * - newFiles = [ classes.dex, FileA ] * - addingClassesDex = true * - updatedNames = { classes.dex } * - addedNames = { classes3.dex } * * * 6. For every updated file in the set, we search for it in the name map * and add it to "updatedNames". * * == Scenario 1 == * - mNameMap = { FileC -> classes3.dex, FileD -> classes.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [ classes2.dex ] * - deletedFiles = { classes2 -> FileB } * - newFiles = [ FileD ] * - addingClassesDex = false * - updatedNames = { classes.dex, classes3.dex } * - addedNames = {} * == Scenario 2 == * - mNameMap = { FileA -> classes.dex, FileC -> classes3.dex, FileD -> classes2.dex, * FileE -> classes4.dex } * - files = { FileB: removed, FileC: updated, FileD: new, FileE: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex, classes4.dex ] * - deletedNames = [] * - deletedFiles = {} * - newFiles = [] * - addingClassesDex = false * - updatedNames = { classes2.dex, classes3.dex } * - addedNames = { classes4.dex } * == Scenario S3 == * - mNameMap = { FileB -> classes2.dex, classes.dex -> classes.dex, * FileA -> classes3.dex } * - files = { classes.dex: new, FileB: updated } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [] * - deletedFiles = {} * - newFiles = [ classes.dex, FileA ] * - addingClassesDex = true * - updatedNames = { classes.dex } * - addedNames = { classes3.dex } * * * 7. Do one of the following: * 7.1. If "deletedNames" is empty, we end step 5. * 7.2. If the last item of "deletedNames" matches the last name in "nameList", we move it * to "finalDeletedNames". We also remove the last name in "nameList". Restart step 5. * 7.3. Do the following: * - Move the last entry in "nameList" to "finallyDeletedNames" and copy the corresponding * entry from mNameMap to deletedFiles. * - Rename the name of the file in "mNameMap" corresponding to the moved item of * "nameList" to the first position of "deletedNames". * - Move the name in the first position of "deletedNames" to "updatedNames". * - If the last item from "nameList" that was removed existed in "updatedNames", remove it * from "updatedNames". * - Restart step 7. * * == Scenario 1 == * (after executing 7.3 and then 7.1): * - mNameMap = { FileC -> classes2.dex, FileD -> classes.dex } * - files = { FileA: removed, FileB: removed, FileC: updated, FileD: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex ] * - deletedNames = [] * - deletedFiles = { classes2 -> FileB, classes3.dex -> FileC } * - newFiles = [ FileD ] * - addingClassesDex = false * - updatedNames = { classes.dex, classes2.dex } * - addedNames = {} * - finallyDeletedNames = { classes3.dex } * == Scenario 2 == * (after executing 7.1): * - mNameMap = { FileA -> classes.dex, FileC -> classes3.dex, FileD -> classes2.dex, * FileE -> classes4.dex } * - files = { FileB: removed, FileC: updated, FileD: new, FileE: new } * - nameList = [ classes.dex, classes2.dex, classes3.dex, classes4.dex ] * - deletedNames = [] * - deletedFiles = {} * - newFiles = [] * - addingClassesDex = false * - updatedNames = { classes2.dex, classes3.dex } * - addedNames = { classes4.dex } * - finallyDeletedNames = {} * == Scenario S3 == * (after executing 7.1): * - mNameMap = { FileB -> classes2.dex, classes.dex -> classes.dex, * FileA -> classes2.dex } * - files = { classes.dex: new, FileB: updated } * - nameList = [ classes.dex, classes2.dex ] * - deletedNames = [] * - deletedFiles = {} * - newFiles = [ classes.dex, FileA ] * - addingClassesDex = true * - updatedNames = { classes.dex } * - addedNames = { classes3.dex } * * 8. Build the final list with the changes defined in "addedNames", "updatedNames" and * "finallyDeletedNames". */ /* * Step 1. */ Deque<String> nameList = Lists.newLinkedList(); DexFileNameSupplier nameSupplier = new DexFileNameSupplier(); for (int i = 0; i < mNameMap.size(); i++) { String nextName = nameSupplier.get(); nameList.add(nextName); Verify.verify(mNameMap.containsValue(nextName), "mNameMap does not contain '" + nextName + "', but has a total of " + mNameMap.size() + " entries {mNameMap = " + mNameMap + "}"); } /* * Step 2. * * Make sure that classes.dex, if it was removed, is the first in the deletedNames. */ Deque<String> deletedNames = Lists.newLinkedList(); Map<String, RelativeFile> deletedFiles = Maps.newHashMap(); for (RelativeFile deletedRf : Maps.filterValues(files, Predicates.equalTo(FileStatus.REMOVED)).keySet()) { String deletedName = mNameMap.get(deletedRf); if (deletedName == null) { throw new IOException("Incremental update refers to relative file '" + deletedRf + "' as deleted, but this file is not known."); } if (deletedName.equals(SdkConstants.FN_APK_CLASSES_DEX)) { deletedNames.addFirst(deletedName); } else { deletedNames.add(deletedName); } deletedFiles.put(deletedName, deletedRf); mNameMap.remove(deletedRf); } /* * Step 3. */ AtomicBoolean addingClassesDex = new AtomicBoolean(false); Deque<RelativeFile> addedFiles = Lists.newLinkedList( Maps.filterValues(files, Predicates.equalTo(FileStatus.NEW)).keySet().stream().peek(rf -> { if (getOsIndependentFileName(rf).equals(SdkConstants.FN_APK_CLASSES_DEX)) { addingClassesDex.set(true); } }).sorted(new DexNameComparator()).collect(Collectors.toList())); /* * Step 4. */ if (addingClassesDex.get()) { RelativeFile mappingToClassesDex = mNameMap.inverse().get(SdkConstants.FN_APK_CLASSES_DEX); if (mappingToClassesDex != null) { if (!getOsIndependentFileName(mappingToClassesDex).equals(SdkConstants.FN_APK_CLASSES_DEX)) { /* * If we get here is because we're adding a file named "classes.dex" and the * current file that maps to "classes.dex" is not named "classes.dex". We * prefer having "classes.dex" mapping to "classes.dex". */ mNameMap.remove(mappingToClassesDex); addedFiles.add(mappingToClassesDex); deletedNames.add(SdkConstants.FN_APK_CLASSES_DEX); } } } /* * Step 5. */ Set<String> addedNames = Sets.newHashSet(); Set<String> updatedNames = Sets.newHashSet(); Iterator<String> deletedNamesIterator = deletedNames.iterator(); for (RelativeFile addedRf : addedFiles) { if (deletedNamesIterator.hasNext()) { String toUse = deletedNamesIterator.next(); deletedNamesIterator.remove(); deletedFiles.remove(toUse); updatedNames.add(toUse); mNameMap.put(addedRf, toUse); } else { String addedName = nameSupplier.get(); addedNames.add(addedName); nameList.add(addedName); mNameMap.put(addedRf, addedName); } } /* * Step 6. */ for (RelativeFile updatedRf : Maps.filterValues(files, Predicates.equalTo(FileStatus.CHANGED)).keySet()) { String updatedName = mNameMap.get(updatedRf); if (updatedName == null) { throw new IOException("Incremental update refers to relative file '" + updatedRf + "' as updated, but this file is not known."); } updatedNames.add(updatedName); } /* * Step 7. */ Set<String> finallyDeletedNames = Sets.newHashSet(); while (true) { /* * Step 7.1. */ if (deletedNames.isEmpty()) { break; } /* * Step 7.2. */ if (deletedNames.getLast().equals(nameList.getLast())) { nameList.removeLast(); finallyDeletedNames.add(deletedNames.removeLast()); continue; } /* * Step 7.3. */ String lastInNames = nameList.removeLast(); String firstInDeleted = deletedNames.remove(); finallyDeletedNames.add(lastInNames); updatedNames.remove(lastInNames); updatedNames.add(firstInDeleted); RelativeFile file = mNameMap.inverse().get(lastInNames); Verify.verifyNotNull(file, "file == null"); mNameMap.put(file, firstInDeleted); deletedFiles.put(lastInNames, file); } /* * Step 8. */ Set<PackagedFileUpdate> updates = Sets.newHashSet(); for (String addedName : addedNames) { RelativeFile file = Verify.verifyNotNull(mNameMap.inverse().get(addedName)); updates.add(new PackagedFileUpdate(file, addedName, FileStatus.NEW)); } for (String updatedName : updatedNames) { RelativeFile file = Verify.verifyNotNull(mNameMap.inverse().get(updatedName)); updates.add(new PackagedFileUpdate(file, updatedName, FileStatus.CHANGED)); } for (String deletedName : finallyDeletedNames) { RelativeFile file = Verify.verifyNotNull(deletedFiles.get(deletedName)); updates.add(new PackagedFileUpdate(file, deletedName, FileStatus.REMOVED)); } /* * Phew! We're done! Yey! */ return updates; }
From source file:org.eclipse.xtext.util.formallang.PdaUtil.java
public <S, P> List<S> shortestPathToFinalState(Pda<S, P> pda, Iterator<P> stack) { return shortestPathTo(pda, pda.getStart(), stack, Predicates.equalTo(pda.getStop()), Predicates.<S>alwaysTrue()); }
From source file:org.eclipse.sirius.diagram.sequence.ui.tool.internal.edit.validator.ISEComplexMoveValidator.java
private ISequenceEvent getRemoteEnd(Option<Message> message, Option<Lifeline> lifeline, Range insertionRange) { ISequenceEvent remoteEnd = null;/* www . j a v a 2 s.c o m*/ if (lifeline.some()) { EventFinder remoteSrcFinder = new EventFinder(lifeline.get()); remoteSrcFinder.setEventsToIgnore(Predicates.equalTo((ISequenceEvent) message.get())); remoteSrcFinder.setVerticalRangefunction(rangeFunction); remoteSrcFinder.setExpansionZone(expansionZone); remoteSrcFinder.setReconnection(true); remoteEnd = remoteSrcFinder.findMostSpecificEvent(insertionRange); } return remoteEnd; }
From source file:jetbrains.jetpad.hybrid.HybridSynchronizer.java
private TextCell createPlaceholder() { TextCell result = new TextCell(); result.addTrait(new DerivedCellTrait() { @Override//www. jav a 2 s .com protected CellTrait getBase(Cell cell) { return TextEditing.validTextEditing(Predicates.equalTo("")); } @Override public Object get(Cell cell, CellTraitPropertySpec<?> spec) { if (spec == TextEditing.EAGER_COMPLETION) return true; if (spec == Completion.COMPLETION) return tokenCompletion().placeholderCompletion(); return super.get(cell, spec); } }); return result; }
From source file:org.eclipse.xtext.util.formallang.PdaUtil.java
public <S, P> List<S> shortestStackpruningPathTo(Pda<S, P> pda, Iterator<P> stack, S matches) { return shortestStackpruningPathTo(pda, pda.getStart(), stack, Predicates.equalTo(matches), Predicates.<S>alwaysTrue()); }