List of usage examples for com.google.common.collect TreeMultimap create
public static <K extends Comparable, V extends Comparable> TreeMultimap<K, V> create()
From source file:org.sonar.server.permission.ws.TemplateGroupsAction.java
private static WsPermissions.WsGroupsResponse buildResponse(List<GroupDto> groups, List<PermissionTemplateGroupDto> groupPermissions, Paging paging) { Multimap<Long, String> permissionsByGroupId = TreeMultimap.create(); groupPermissions.forEach(groupPermission -> permissionsByGroupId.put(groupPermission.getGroupId(), groupPermission.getPermission())); WsPermissions.WsGroupsResponse.Builder response = WsPermissions.WsGroupsResponse.newBuilder(); groups.forEach(group -> {//from ww w .ja v a2s . c om WsPermissions.Group.Builder wsGroup = response.addGroupsBuilder().setName(group.getName()); if (group.getId() != 0L) { wsGroup.setId(String.valueOf(group.getId())); } if (group.getDescription() != null) { wsGroup.setDescription(group.getDescription()); } wsGroup.addAllPermissions(permissionsByGroupId.get(group.getId())); }); response.getPagingBuilder().setPageIndex(paging.pageIndex()).setPageSize(paging.pageSize()) .setTotal(paging.total()); return response.build(); }
From source file:com.google.cloud.pubsub.SubscriberConnection.java
public SubscriberConnection(String subscription, Credentials credentials, MessageReceiver receiver, Duration ackExpirationPadding, Channel channel, FlowController flowController, ScheduledExecutorService executor) { this.executor = executor; this.credentials = credentials; this.ackExpirationPadding = ackExpirationPadding; streamAckDeadlineSeconds = Math.max(INITIAL_ACK_DEADLINE_SECONDS, Ints.saturatedCast(ackExpirationPadding.getStandardSeconds())); this.receiver = receiver; this.subscription = subscription; this.flowController = flowController; outstandingAckHandlers = TreeMultimap.create(); pendingAcks = new HashSet<>(MAX_PER_REQUEST_CHANGES * 2); pendingNacks = new HashSet<>(MAX_PER_REQUEST_CHANGES * 2); // 601 buckets of 1s resolution from 0s to MAX_ACK_DEADLINE_SECONDS ackLatencyDistribution = new Distribution(MAX_ACK_DEADLINE_SECONDS + 1); this.channel = channel; alarmsLock = new ReentrantLock(); nextAckDeadlineExtensionAlarmTime = new Instant(Long.MAX_VALUE); messagesWaiter = new MessagesWaiter(); }
From source file:google.registry.dns.ReadDnsQueueAction.java
/** Leases all tasks from the pull queue and creates per-tld update actions for them. */ @Override//from www. ja v a2s . c om public void run() { Set<String> tldsOfInterest = getTlds(); List<TaskHandle> tasks = dnsQueue.leaseTasks(writeLockTimeout); if (tasks.isEmpty()) { return; } logger.infofmt("leased %d tasks", tasks.size()); // Normally, all tasks will be deleted from the pull queue. But some might have to remain if // we are not interested in the associated TLD, or if the TLD is paused. Remember which these // are. Set<TaskHandle> tasksToKeep = new HashSet<>(); // The paused TLDs for which we found at least one refresh request. Set<String> pausedTlds = new HashSet<>(); // Create a sorted multimap into which we will insert the refresh items, so that the items for // each TLD will be grouped together, and domains and hosts will be grouped within a TLD. The // grouping and ordering of domains and hosts is not technically necessary, but a predictable // ordering makes it possible to write detailed tests. SortedSetMultimap<String, RefreshItem> refreshItemMultimap = TreeMultimap.create(); // Read all tasks on the DNS pull queue and load them into the refresh item multimap. for (TaskHandle task : tasks) { try { Map<String, String> params = ImmutableMap.copyOf(task.extractParams()); String tld = params.get(RequestParameters.PARAM_TLD); if (tld == null) { logger.severe("discarding invalid DNS refresh request; no TLD specified"); } else if (!tldsOfInterest.contains(tld)) { tasksToKeep.add(task); } else if (Registry.get(tld).getDnsPaused()) { tasksToKeep.add(task); pausedTlds.add(tld); } else { String typeString = params.get(DNS_TARGET_TYPE_PARAM); String name = params.get(DNS_TARGET_NAME_PARAM); TargetType type = TargetType.valueOf(typeString); switch (type) { case DOMAIN: case HOST: refreshItemMultimap.put(tld, RefreshItem.create(type, name)); break; default: logger.severefmt("discarding DNS refresh request of type %s", typeString); break; } } } catch (RuntimeException | UnsupportedEncodingException e) { logger.severefmt(e, "discarding invalid DNS refresh request (task %s)", task); } } if (!pausedTlds.isEmpty()) { logger.infofmt("the dns-pull queue is paused for tlds: %s", pausedTlds); } // Loop through the multimap by TLD and generate refresh tasks for the hosts and domains. for (Map.Entry<String, Collection<RefreshItem>> tldRefreshItemsEntry : refreshItemMultimap.asMap() .entrySet()) { for (List<RefreshItem> chunk : Iterables.partition(tldRefreshItemsEntry.getValue(), tldUpdateBatchSize)) { TaskOptions options = withUrl(PublishDnsUpdatesAction.PATH).countdownMillis( jitterSeconds.isPresent() ? random.nextInt((int) SECONDS.toMillis(jitterSeconds.get())) : 0) .param(RequestParameters.PARAM_TLD, tldRefreshItemsEntry.getKey()); for (RefreshItem refreshItem : chunk) { options.param((refreshItem.type() == TargetType.HOST) ? PublishDnsUpdatesAction.HOSTS_PARAM : PublishDnsUpdatesAction.DOMAINS_PARAM, refreshItem.name()); } taskEnqueuer.enqueue(dnsPublishPushQueue, options); } } Set<TaskHandle> tasksToDelete = difference(ImmutableSet.copyOf(tasks), tasksToKeep); // In keepTasks mode, never delete any tasks. if (keepTasks) { logger.infofmt("would have deleted %d tasks", tasksToDelete.size()); for (TaskHandle task : tasks) { dnsQueue.dropTaskLease(task); } // Otherwise, either delete or drop the lease of each task. } else { logger.infofmt("deleting %d tasks", tasksToDelete.size()); dnsQueue.deleteTasks(ImmutableList.copyOf(tasksToDelete)); logger.infofmt("dropping %d tasks", tasksToKeep.size()); for (TaskHandle task : tasksToKeep) { dnsQueue.dropTaskLease(task); } logger.infofmt("done"); } }
From source file:org.sonar.server.permission.ws.template.TemplateGroupsAction.java
private static WsPermissions.WsGroupsResponse buildResponse(List<GroupDto> groups, List<PermissionTemplateGroupDto> groupPermissions, Paging paging) { Multimap<Integer, String> permissionsByGroupId = TreeMultimap.create(); groupPermissions.forEach(groupPermission -> permissionsByGroupId.put(groupPermission.getGroupId(), groupPermission.getPermission())); WsPermissions.WsGroupsResponse.Builder response = WsPermissions.WsGroupsResponse.newBuilder(); groups.forEach(group -> {//from www. j a v a 2s .c o m WsPermissions.Group.Builder wsGroup = response.addGroupsBuilder().setName(group.getName()); if (group.getId() != 0) { wsGroup.setId(String.valueOf(group.getId())); } setNullable(group.getDescription(), wsGroup::setDescription); wsGroup.addAllPermissions(permissionsByGroupId.get(group.getId())); }); response.getPagingBuilder().setPageIndex(paging.pageIndex()).setPageSize(paging.pageSize()) .setTotal(paging.total()); return response.build(); }
From source file:com.music.web.websocket.Game.java
private void calculateResults() { TreeMultimap<Integer, String> rankings = TreeMultimap.create(); for (Player player : players.values()) { int score = 0; List<Answer> playerAnswers = new ArrayList<>(); //cannot simply copy the values() of player.getAnswers(), because it is an unordered map (as it needs to be concurrent) for (Piece piece : pieces) { Answer answer = player.getAnswers().get(piece.getId()); if (answer.getTempo() > -1) { int diff = Math.abs(answer.getTempo() - piece.getTempo()); if (diff < 3) { score += 15;//from w w w .j a v a 2 s.c o m } else { score += 5 / Math.log10(diff); } } if (answer.getMainInstrument() == piece.getMainInstrument()) { score += 10; } if (answer.getMetreNumerator() == piece.getMetreNumerator() && answer.getMetreDenominator() == piece.getMetreDenominator()) { score += 10; } playerAnswers.add(answer); } results.getScores().put(player.getName(), score); rankings.put(score, player.getSession().getId()); } // the ordered player ids results.setRanking(new ArrayList<>(rankings.values())); Collections.reverse(results.getRanking()); }
From source file:com.google.publicalerts.cap.validator.ValidationResult.java
/** * @return a map of line number to validation messages corresponding to * that line//from www .ja v a2s .c om */ public Multimap<Integer, ValidationMessage> getByLineValidationMessages() { Multimap<Integer, ValidationMessage> map = TreeMultimap.create(); for (ValidationMessage validationMessage : validationMessages) { map.put(validationMessage.getLineNumber(), validationMessage); } return map; }
From source file:org.cinchapi.concourse.server.storage.db.SecondaryRecord.java
/** * Explore this record and return a mapping from PrimaryKey to the Values * that cause the corresponding records to satisfy {@code operator} in * relation to the specified {@code values} (and at the specified * {@code timestamp} if {@code historical} is {@code true}). * // w w w.j a v a2 s . c o m * @param historical - if {@code true} query the history, otherwise query * the current state * @param timestamp - this value is ignored if {@code historical} is * {@code false}, otherwise this value is the historical * timestamp at which to query the field * @param operator * @param values * @return the relevant data that causes the matching records to satisfy the * criteria */ private Map<PrimaryKey, Set<Value>> explore(boolean historical, long timestamp, Operator operator, Value... values) { /* Authorized */ read.lock(); try { SetMultimap<PrimaryKey, Value> data = TreeMultimap.create(); Value value = values[0]; if (operator == Operator.EQUALS) { for (PrimaryKey record : historical ? get(value, timestamp) : get(value)) { data.put(record, value); } } else if (operator == Operator.NOT_EQUALS) { for (Value stored : historical ? history.keySet() : present.keySet()) { if (!value.equals(stored)) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else if (operator == Operator.GREATER_THAN) { for (Value stored : historical ? history.keySet() : ((NavigableSet<Value>) present.keySet()).tailSet(value, false)) { if (!historical || stored.compareTo(value) > 0) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else if (operator == Operator.GREATER_THAN_OR_EQUALS) { for (Value stored : historical ? history.keySet() : ((NavigableSet<Value>) present.keySet()).tailSet(value, true)) { if (!historical || stored.compareTo(value) >= 0) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else if (operator == Operator.LESS_THAN) { for (Value stored : historical ? history.keySet() : ((NavigableSet<Value>) present.keySet()).headSet(value, false)) { if (!historical || stored.compareTo(value) < 0) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else if (operator == Operator.LESS_THAN_OR_EQUALS) { for (Value stored : historical ? history.keySet() : ((NavigableSet<Value>) present.keySet()).headSet(value, true)) { if (!historical || stored.compareTo(value) <= 0) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else if (operator == Operator.BETWEEN) { Preconditions.checkArgument(values.length > 1); Value value2 = values[1]; for (Value stored : historical ? history.keySet() : ((NavigableSet<Value>) present.keySet()).subSet(value, true, value2, false)) { if (!historical || (stored.compareTo(value) >= 0 && stored.compareTo(value2) < 0)) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else if (operator == Operator.REGEX) { Pattern p = Pattern.compile(value.getObject().toString()); for (Value stored : historical ? history.keySet() : present.keySet()) { Matcher m = p.matcher(stored.getObject().toString()); if (m.matches()) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else if (operator == Operator.NOT_REGEX) { Pattern p = Pattern.compile(value.getObject().toString()); for (Value stored : historical ? history.keySet() : present.keySet()) { Matcher m = p.matcher(stored.getObject().toString()); if (!m.matches()) { for (PrimaryKey record : historical ? get(stored, timestamp) : get(stored)) { data.put(record, stored); } } } } else { throw new UnsupportedOperationException(); } return Multimaps.asMap(data); } finally { read.unlock(); } }
From source file:org.sonar.server.permission.ws.template.TemplateUsersAction.java
private static WsPermissions.UsersWsResponse buildResponse(List<UserDto> users, List<PermissionTemplateUserDto> permissionTemplateUsers, Paging paging) { Multimap<Integer, String> permissionsByUserId = TreeMultimap.create(); permissionTemplateUsers.forEach(userPermission -> permissionsByUserId.put(userPermission.getUserId(), userPermission.getPermission())); UsersWsResponse.Builder responseBuilder = UsersWsResponse.newBuilder(); users.forEach(user -> {/*from w w w. j ava 2 s.c o m*/ WsPermissions.User.Builder userResponse = responseBuilder.addUsersBuilder().setLogin(user.getLogin()) .addAllPermissions(permissionsByUserId.get(user.getId())); setNullable(user.getEmail(), userResponse::setEmail); setNullable(user.getName(), userResponse::setName); }); responseBuilder.getPagingBuilder().setPageIndex(paging.pageIndex()).setPageSize(paging.pageSize()) .setTotal(paging.total()).build(); return responseBuilder.build(); }
From source file:org.sonar.server.permission.ws.GroupsAction.java
private static WsGroupsResponse buildResponse(List<GroupDto> groups, List<GroupPermissionDto> groupPermissions, Paging paging) {//ww w. ja v a2s . c o m Multimap<Integer, String> permissionsByGroupId = TreeMultimap.create(); groupPermissions.forEach(groupPermission -> permissionsByGroupId.put(groupPermission.getGroupId(), groupPermission.getRole())); WsGroupsResponse.Builder response = WsGroupsResponse.newBuilder(); groups.forEach(group -> { Group.Builder wsGroup = response.addGroupsBuilder().setName(group.getName()); if (group.getId() != 0) { wsGroup.setId(String.valueOf(group.getId())); } setNullable(group.getDescription(), wsGroup::setDescription); wsGroup.addAllPermissions(permissionsByGroupId.get(group.getId())); }); response.getPagingBuilder().setPageIndex(paging.pageIndex()).setPageSize(paging.pageSize()) .setTotal(paging.total()); return response.build(); }
From source file:sadl.modellearner.rtiplus.SearchingPDRTALearner.java
private void greedyRTIplus(PDRTA a, StateColoring sc) { final boolean preExit = (bOp[2] instanceof OrOperator) && distrCheckType.equals(DistributionCheckType.DISABLED); if (preExit) { logger.info("Pre-Exiting algorithm when number of tails falls below minData"); }// w w w . jav a 2 s .co m int counter = 0; Transition t; while ((t = getMostVisitedTrans(a, sc)) != null && !(preExit && t.in.getTails().size() >= PDRTA.getMinData())) { if (directory != null) { draw(a, true, directory, counter); } logger.debug("Automaton contains {} states and {} transitions", a.getStateCount(), a.getSize()); logger.debug("Found most visited transition {} containing {} tails", t.toString(), t.in.getTails().size()); counter++; if (!distrCheckType.equals(DistributionCheckType.DISABLED)) { logger.debug("Checking data distribution"); final List<Interval> idaIns = checkDistribution(t.source, t.symAlphIdx, distrCheckType, sc); if (idaIns.size() > 0) { logger.debug("#{} DO: Split interval due to IDA into {} intervals", counter, idaIns.size()); // TODO Printing the intervals may be to expensive just for logging final StringBuilder sb = new StringBuilder(); for (final Interval in : idaIns) { sb.append(" "); sb.append(in.toString()); } logger.trace("Resulting intervals are:{}", sb.toString()); continue; } else { logger.debug("No splits because of data distributuion were perfomed in: {}", t.in.toString()); if (bOp[2] instanceof OrOperator && t.in.getTails().size() < PDRTA.getMinData()) { // Shortcut for skipping merges and splits when OR is selected if (mainModel == a) { logger.debug("#{} DO: Color state {} red", counter, t.target.getIndex()); } sc.setRed(t.target); continue; } } } logger.debug("Testing splits"); final NavigableSet<Refinement> splits = getSplitRefs(t, sc).descendingSet(); logger.debug("Found {} possible splits", splits.size()); logger.debug("Testing merges"); final NavigableSet<Refinement> merges = getMergeRefs(t, sc).descendingSet(); logger.debug("Found {} possible merges", merges.size()); logger.debug("Calculating sizes for splits"); final TreeMultimap<Double, Refinement> all = TreeMultimap.create(); int c = 0; for (final Refinement r : splits) { if (c >= maxSplitsToSearch) { break; } final PDRTA copy = new PDRTA(a); final StateColoring cColoring = new StateColoring(sc, copy); final Refinement cR = new Refinement(copy, r, cColoring); cR.refine(); complete(copy, cColoring); // TODO Create algo param for selecting between AIC and size // final double modelScore = copy.getSize(); final double modelScore = calcAIC(copy); all.put(modelScore, r); c++; } logger.debug("Calculating sizes for merges"); c = 0; for (final Refinement r : merges) { if (c >= maxMergesToSearch) { break; } final PDRTA copy = new PDRTA(a); final StateColoring cColoring = new StateColoring(sc, copy); final Refinement cR = new Refinement(copy, r, cColoring); cR.refine(); complete(copy, cColoring); // TODO Create algo param for selecting between AIC and size // final double modelScore = copy.getSize(); final double modelScore = calcAIC(copy); all.put(modelScore, r); c++; } assert (all.size() <= (maxMergesToSearch + maxSplitsToSearch)); if (!all.isEmpty()) { final double minSize = all.keySet().first(); final Refinement r = all.get(minSize).last(); logger.debug("#{} DO: {} quality={}", counter, r.toString(), minSize); r.refine(); } else { logger.debug("#{} DO: Color state {} red", counter, t.target.getIndex()); sc.setRed(t.target); } if (Settings.isDebug()) { a.checkConsistency(); } } a.checkConsistency(); assert (a.getStateCount() == sc.getNumRedStates()); if (directory != null) { draw(a, true, directory, counter); } }