List of usage examples for org.apache.commons.lang3.tuple Pair getValue
@Override
public R getValue()
Gets the value from this pair.
This method implements the Map.Entry interface returning the right element as the value.
From source file:io.pravega.controller.store.stream.tables.TableHelper.java
/** * Find history record from the event when the given segment was sealed. * If segment is never sealed this method returns an empty list. * If segment is yet to be created, this method still returns empty list. * <p>// w w w . j a va 2s. c o m * Find index that corresponds to segment start event. * Perform binary search on index+history records to find segment seal event. * <p> * If index table is not up to date we may have two cases: * 1. Segment create time > highest event time in index * 2. Segment seal time > highest event time in index * <p> * For 1 we cant have any searches in index and will need to fall through * History table starting from last indexed record. * <p> * For 2, fall through History Table starting from last indexed record * to find segment sealed event in history table. * * @param segment segment * @param indexTable index table * @param historyTable history table * @return */ public static List<Integer> findSegmentSuccessorCandidates(final Segment segment, final byte[] indexTable, final byte[] historyTable) { // fetch segment start time from segment Is // fetch last index Ic // fetch record corresponding to Ic. If segment present in that history record, fall through history table // else perform binary searchIndex // Note: if segment is present at Ic, we will fall through in the history table one record at a time Pair<Integer, Optional<IndexRecord>> search = IndexRecord.search(segment.getStart(), indexTable); final Optional<IndexRecord> recordOpt = search.getValue(); final int startingOffset = recordOpt.isPresent() ? recordOpt.get().getHistoryOffset() : 0; final Optional<HistoryRecord> historyRecordOpt = findSegmentCreatedEvent(startingOffset, segment, historyTable); // segment information not in history table if (!historyRecordOpt.isPresent()) { return new ArrayList<>(); } final int lower = search.getKey() / IndexRecord.INDEX_RECORD_SIZE; final int upper = (indexTable.length - IndexRecord.INDEX_RECORD_SIZE) / IndexRecord.INDEX_RECORD_SIZE; // index table may be stale, whereby we may not find segment.start to match an entry in the index table final Optional<IndexRecord> indexRecord = IndexRecord.readLatestRecord(indexTable); // if nothing is indexed read the first record in history table, hence offset = 0 final int lastIndexedRecordOffset = indexRecord.isPresent() ? indexRecord.get().getHistoryOffset() : 0; final Optional<HistoryRecord> lastIndexedRecord = HistoryRecord.readRecord(historyTable, lastIndexedRecordOffset, false); // if segment is present in history table but its offset is greater than last indexed record, // we cant do anything on index table, fall through. OR // if segment exists at the last indexed record in history table, fall through, // no binary search possible on index if (lastIndexedRecord.get().getScaleTime() < historyRecordOpt.get().getScaleTime() || lastIndexedRecord.get().getSegments().contains(segment.getNumber())) { // segment was sealed after the last index entry HistoryRecord startPoint = lastIndexedRecord.get().getScaleTime() < historyRecordOpt.get() .getScaleTime() ? historyRecordOpt.get() : lastIndexedRecord.get(); Optional<HistoryRecord> next = HistoryRecord.fetchNext(startPoint, historyTable, false); while (next.isPresent() && next.get().getSegments().contains(segment.getNumber())) { startPoint = next.get(); next = HistoryRecord.fetchNext(startPoint, historyTable, false); } if (next.isPresent()) { return next.get().getSegments(); } else { // we have reached end of history table which means segment was never sealed return new ArrayList<>(); } } else { // segment is definitely sealed and segment sealed event is also present in index table // we should be able to find it by doing binary search on Index table final Optional<HistoryRecord> record = findSegmentSealedEvent(lower, upper, segment.getNumber(), indexTable, historyTable); return record.isPresent() ? record.get().getSegments() : new ArrayList<>(); } }
From source file:gndata.app.ui.util.converter.JenaPropertyClassStringConverter.java
@Override public String toString(Pair<ObjectProperty, OntClass> pcPair) { return pcPair == null ? null : pcPair.getValue().getLocalName() + " (link by " + pcPair.getKey().getLocalName() + ")"; }
From source file:com.twitter.distributedlog.service.balancer.CountBasedStreamChooser.java
@Override public int compare(Pair<SocketAddress, LinkedList<String>> o1, Pair<SocketAddress, LinkedList<String>> o2) { return o2.getValue().size() - o1.getValue().size(); }
From source file:com.vmware.photon.controller.common.dcp.ServiceHostUtils.java
public static void waitForNodeGroupConvergence(ServiceHost localHost, Collection<Pair<String, Integer>> remoteHostIpAndPortPairs, String nodeGroupPath, int maxRetries, int retryInterval) throws Throwable { checkArgument(localHost != null, "localHost cannot be null"); checkArgument(remoteHostIpAndPortPairs != null, "remoteHostIpAndPortPairs cannot be null"); checkArgument(!Strings.isNullOrEmpty(nodeGroupPath), "nodeGroupPath cannot be null or empty"); checkArgument(maxRetries > 0, "maxRetries must be > 0"); if (remoteHostIpAndPortPairs.size() == 1) { // nothing to synchronize if we only have one host return;/*ww w.j a va2 s. co m*/ } for (Pair<String, Integer> remoteHostIpAndPortPair : remoteHostIpAndPortPairs) { int checkRetries = maxRetries; int checksToConvergence = REQUIRED_STABLE_STATE_COUNT; while (checkRetries > 0 && checksToConvergence > 0) { // update retry count and sleep checkRetries--; Thread.sleep(retryInterval * checksToConvergence); // check the host response NodeGroupService.NodeGroupState response = getNodeGroupState(localHost, remoteHostIpAndPortPair.getKey(), remoteHostIpAndPortPair.getValue(), nodeGroupPath); if (response.nodes.size() < remoteHostIpAndPortPairs.size()) { continue; } // check host status checksToConvergence--; for (NodeState nodeState : response.nodes.values()) { if (nodeState.status != NodeState.NodeStatus.AVAILABLE) { checksToConvergence = REQUIRED_STABLE_STATE_COUNT; break; // Note that we are not breaking from the above while loop where checksToConvergence is done // This is because the nodes might switch between AVAILABLE and SYNCHRONIZING as the other nodes join } } } if (checkRetries == 0) { throw new TimeoutException("nodes did not converge"); } } }
From source file:com.kantenkugel.kanzebot.api.command.CommandGroup.java
@Override public boolean handleGuild(TextChannel channel, User author, Message fullMessage, String args, Object[] customArgs) {/*from ww w.j a v a 2 s . c om*/ if (args.length() == 0) return false; String[] split = args.split("\\s+", 2); Pair<Command, ArgParser> sub = subCommands.get(split[0]); if (sub != null) { if (sub.getValue() != null) { ArgParser.ParserResult parserResult = sub.getValue().parseArgs(channel.getJDA(), channel, args); if (parserResult.getError() != null) { MessageUtil.sendMessage(channel, parserResult.getError() + "\nUsage:\n" + sub.getKey().getUsage()); return true; } customArgs = parserResult.getArgs(); } return sub.getKey().handleGuild(channel, author, fullMessage, args, customArgs); } else { return false; } }
From source file:com.kantenkugel.kanzebot.api.command.CommandGroup.java
@Override public boolean handlePrivate(PrivateChannel channel, User author, Message fullMessage, String args, Object[] customArgs) {/* w w w . j a v a2 s.c o m*/ if (args.length() == 0) return false; String[] split = args.split("\\s+", 2); Pair<Command, ArgParser> sub = subCommands.get(split[0]); if (sub != null) { if (sub.getValue() != null) { ArgParser.ParserResult parserResult = sub.getValue().parseArgs(channel.getJDA(), null, args); if (parserResult.getError() != null) { MessageUtil.sendMessage(channel, parserResult.getError() + "\nUsage:\n" + sub.getKey().getUsage()); return true; } customArgs = parserResult.getArgs(); } return sub.getKey().handlePrivate(channel, author, fullMessage, args, customArgs); } else { return false; } }
From source file:au.gov.ga.earthsci.bookmark.properties.layer.LayersProperty.java
/** * Add additional layer state to this property. * //from w ww . j a v a 2s . c om * @param id * The id of the layer * @param opacity * The opacity of the layer */ public void addLayer(String id, Pair<String, String>... keyandValues) { if (!layerStateInfo.containsKey(id)) { layerStateInfo.put(id, new ConcurrentHashMap<String, String>()); } for (Pair<String, String> pair : keyandValues) { layerStateInfo.get(id).put(pair.getKey(), pair.getValue()); } }
From source file:com.microsoft.tooling.msservices.serviceexplorer.azure.vm.VMServiceModule.java
@Override protected void refresh(@NotNull EventStateHandle eventState) throws AzureCmdException { // remove all child nodes removeAllChildNodes();// w ww . j a v a 2 s. com AzureManager azureManager = AzureManagerImpl.getManager(getProject()); // load all VMs List<Subscription> subscriptionList = azureManager.getSubscriptionList(); List<Pair<String, String>> failedSubscriptions = new ArrayList<>(); for (Subscription subscription : subscriptionList) { try { List<VirtualMachine> virtualMachines = azureManager.getVirtualMachines(subscription.getId()); for (VirtualMachine vm : virtualMachines) { addChildNode(new VMNode(this, vm)); } if (eventState.isEventTriggered()) { return; } } catch (Exception ex) { failedSubscriptions.add(new ImmutablePair<>(subscription.getName(), ex.getMessage())); continue; } } if (!failedSubscriptions.isEmpty()) { StringBuilder errorMessage = new StringBuilder( "An error occurred when trying to load VMs for the subscriptions:\n\n"); for (Pair error : failedSubscriptions) { errorMessage.append(error.getKey()).append(": ").append(error.getValue()).append("\n"); } DefaultLoader.getUIHelper() .logError("An error occurred when trying to load VMs\n\n" + errorMessage.toString(), null); } }
From source file:com.galenframework.speclang2.specs.SpecComponentProcessor.java
private Map<String, Object> processArguments(List<Pair<String, String>> unprocessedArguments) { Map<String, Object> arguments = new HashMap<>(); for (Pair<String, String> textArgument : unprocessedArguments) { arguments.put(textArgument.getKey(), processArgumentValue(textArgument.getValue())); }//from ww w. j a va2 s . co m return arguments; }
From source file:code.elix_x.excore.utils.net.packets.runnable.RunnableMessageHandler.java
@Override public REPLY onMessage(REQ message, MessageContext ctx) { Pair<Runnable, REPLY> pair = run.apply(new ImmutablePair<REQ, MessageContext>(message, ctx)); getThreadListener(ctx).addScheduledTask(pair.getKey()); return pair.getValue(); }