List of usage examples for org.apache.commons.lang3.tuple Pair getKey
@Override public final L getKey()
Gets the key from this pair.
This method implements the Map.Entry interface returning the left element as the key.
From source file:com.snaplogic.snaps.lunex.BaseService.java
private StringBuilder prepareJson(List<Pair<String, ExpressionProperty>> requestContent, Document document) { StringBuilder json = new StringBuilder(); StringBuilder subJson = new StringBuilder(); boolean isSubJsonRequired = false, isEmptyJson = true; if (requestContent != null) { if (resourceType.equals(CResource.NewOrder.toString()) || resourceType.equals(CResource.PreOrder.toString())) { subJson.append(QUOTE).append(ADDRESS).append(QUOTE).append(COLON).append(OPENTAG); isSubJsonRequired = true;// w w w. j a va2s.c o m } for (Pair<String, ExpressionProperty> paramPair : requestContent) { if (isSubJsonRequired && ADDRESS_JSONOBJ.contains(paramPair.getKey())) { subJson.append(getJsonSlice(paramPair, document)); isEmptyJson = false; } else { json.append(getJsonSlice(paramPair, document)); } } if (!isEmptyJson) { subJson.append(CLOSETAG).append(COMMA); } return new StringBuilder().append(OPENTAG).append(subJson).append(json.deleteCharAt(json.length() - 1)) .append(CLOSETAG); } return new StringBuilder(""); }
From source file:com.microsoft.azure.storage.blob.BlobEncryptionPolicy.java
/** * Set up the encryption context required for encrypting blobs. * @param metadata/* w ww . j a v a 2 s.com*/ * Reference to blob metadata object that is used to set the encryption materials. * @param noPadding * Value indicating if the padding mode should be set or not. * @return The Cipher to use to decrypt the blob. * @throws StorageException * An exception representing any error which occurred during the operation. */ Cipher createAndSetEncryptionContext(Map<String, String> metadata, boolean noPadding) throws StorageException { Utility.assertNotNull("metadata", metadata); // The Key should be set on the policy for encryption. Otherwise, throw an error. if (this.keyWrapper == null) { throw new IllegalArgumentException(SR.KEY_MISSING); } try { KeyGenerator keyGen = KeyGenerator.getInstance("AES"); keyGen.init(256); Cipher myAes; if (noPadding) { myAes = Cipher.getInstance("AES/CBC/NoPadding"); } else { myAes = Cipher.getInstance("AES/CBC/PKCS5Padding"); } SecretKey aesKey = keyGen.generateKey(); myAes.init(Cipher.ENCRYPT_MODE, aesKey); BlobEncryptionData encryptionData = new BlobEncryptionData(); encryptionData.setEncryptionAgent(new EncryptionAgent( Constants.EncryptionConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)); // Wrap key Pair<byte[], String> encryptedKey = this.keyWrapper .wrapKeyAsync(aesKey.getEncoded(), null /* algorithm */).get(); encryptionData.setWrappedContentKey(new WrappedContentKey(this.keyWrapper.getKid(), encryptedKey.getKey(), encryptedKey.getValue())); encryptionData.setContentEncryptionIV(myAes.getIV()); metadata.put(Constants.EncryptionConstants.BLOB_ENCRYPTION_DATA, encryptionData.serialize()); return myAes; } catch (Exception e) { throw StorageException.translateClientException(e); } }
From source file:alfio.manager.AdminReservationManagerIntegrationTest.java
@Test public void testReserveFromNewCategory() throws Exception { List<TicketCategoryModification> categories = Collections.singletonList(new TicketCategoryModification(null, "default", 1, new DateTimeModification(LocalDate.now(), LocalTime.now()), new DateTimeModification(LocalDate.now(), LocalTime.now()), DESCRIPTION, BigDecimal.TEN, false, "", true, null, null, null, null, null)); Pair<Event, String> eventWithUsername = initEvent(categories, organizationRepository, userManager, eventManager, eventRepository); Event event = eventWithUsername.getKey(); String username = eventWithUsername.getValue(); DateTimeModification expiration = DateTimeModification.fromZonedDateTime(ZonedDateTime.now().plusDays(1)); CustomerData customerData = new CustomerData("Integration", "Test", "integration-test@test.ch", "Billing Address", "en"); Category category = new Category(null, "name", new BigDecimal("100.00")); int attendees = AVAILABLE_SEATS; List<TicketsInfo> ticketsInfoList = Collections .singletonList(new TicketsInfo(category, generateAttendees(attendees), true, false)); AdminReservationModification modification = new AdminReservationModification(expiration, customerData, ticketsInfoList, "en", false, null); Result<Pair<TicketReservation, List<Ticket>>> result = adminReservationManager .createReservation(modification, event.getShortName(), username); assertTrue(result.isSuccess());//from w w w.j a v a 2 s . co m Pair<TicketReservation, List<Ticket>> data = result.getData(); List<Ticket> tickets = data.getRight(); assertTrue(tickets.size() == attendees); assertNotNull(data.getLeft()); int categoryId = tickets.get(0).getCategoryId(); Event modified = eventManager.getSingleEvent(event.getShortName(), username); assertEquals(attendees + 1, eventRepository.countExistingTickets(event.getId()).intValue()); assertEquals(attendees, ticketRepository.findPendingTicketsInCategories(Collections.singletonList(categoryId)).size()); TicketCategory categoryModified = ticketCategoryRepository.getByIdAndActive(categoryId, event.getId()); assertEquals(categoryModified.getMaxTickets(), attendees); ticketCategoryRepository.findByEventId(event.getId()) .forEach(tc -> assertTrue(specialPriceRepository.findAllByCategoryId(tc.getId()).stream() .allMatch(sp -> sp.getStatus() == SpecialPrice.Status.PENDING))); adminReservationManager.confirmReservation(event.getShortName(), data.getLeft().getId(), username); ticketCategoryRepository.findByEventId(event.getId()) .forEach(tc -> assertTrue(specialPriceRepository.findAllByCategoryId(tc.getId()).stream() .allMatch(sp -> sp.getStatus() == SpecialPrice.Status.TAKEN))); assertFalse(ticketRepository.findAllReservationsConfirmedButNotAssigned(event.getId()) .contains(data.getLeft().getId())); }
From source file:com.samsung.sjs.backend.CBackend.java
/** * Returns a new ID for a new vtable, or the id of an existing * vtable if argument vt is identical to a previous vtable array. *//*from www .j av a2 s . c o m*/ protected int memo_vtable(int[] vt) { int result = -1; int hash = Arrays.hashCode(vt); int oldsize = vtables_by_hash.size(); int oldsetsize = -1; boolean collision = false; if (vtables_by_hash.containsKey(hash)) { Set<Pair<int[], Integer>> possible_matches = vtables_by_hash.get(hash); assert (possible_matches != null); for (Pair<int[], Integer> test : possible_matches) { if (Arrays.equals(test.getKey(), vt)) { collision = true; result = test.getValue(); } } if (!collision) { // We hit an existing has bucket, but don't match result = next_vtable_id++; Pair<int[], Integer> newpair = Pair.of(vt, result); oldsetsize = possible_matches.size(); possible_matches.add(newpair); assert (possible_matches.size() > oldsetsize); } } else { // We don't match any existing bucket result = next_vtable_id++; Pair<int[], Integer> newpair = Pair.of(vt, result); Set<Pair<int[], Integer>> newset = new HashSet<Pair<int[], Integer>>(); newset.add(newpair); vtables_by_hash.put(hash, newset); assert (vtables_by_hash.size() >= oldsize); } assert (result >= 0); // we initialize next_vtable_id to 0, so -1 is invalid return result; }
From source file:alfio.manager.AdminReservationManagerIntegrationTest.java
private Triple<Event, String, TicketReservation> performExistingCategoryTest( List<TicketCategoryModification> categories, boolean bounded, List<Integer> attendeesNr, boolean addSeatsIfNotAvailable, boolean expectSuccess, int reservedTickets, int expectedEventSeats) { assertEquals("Test error: categories' size must be equal to attendees' size", categories.size(), attendeesNr.size());/*www . j a va2s. co m*/ Pair<Event, String> eventWithUsername = initEvent(categories, organizationRepository, userManager, eventManager, eventRepository); Event event = eventWithUsername.getKey(); String username = eventWithUsername.getValue(); DateTimeModification expiration = DateTimeModification.fromZonedDateTime(ZonedDateTime.now().plusDays(1)); CustomerData customerData = new CustomerData("Integration", "Test", "integration-test@test.ch", "Billing Address", "en"); Iterator<Integer> attendeesIterator = attendeesNr.iterator(); List<TicketCategory> existingCategories = ticketCategoryRepository.findByEventId(event.getId()); List<Attendee> allAttendees = new ArrayList<>(); List<TicketsInfo> ticketsInfoList = existingCategories.stream().map(existingCategory -> { Category category = new Category(existingCategory.getId(), existingCategory.getName(), existingCategory.getPrice()); List<Attendee> attendees = generateAttendees(attendeesIterator.next()); allAttendees.addAll(attendees); return new TicketsInfo(category, attendees, addSeatsIfNotAvailable, false); }).collect(toList()); AdminReservationModification modification = new AdminReservationModification(expiration, customerData, ticketsInfoList, "en", false, null); if (reservedTickets > 0) { TicketReservationModification trm = new TicketReservationModification(); trm.setAmount(reservedTickets); trm.setTicketCategoryId(existingCategories.get(0).getId()); TicketReservationWithOptionalCodeModification r = new TicketReservationWithOptionalCodeModification(trm, Optional.empty()); ticketReservationManager.createTicketReservation(event, Collections.singletonList(r), Collections.emptyList(), DateUtils.addDays(new Date(), 1), Optional.empty(), Optional.empty(), Locale.ENGLISH, false); } Result<Pair<TicketReservation, List<Ticket>>> result = adminReservationManager .createReservation(modification, event.getShortName(), username); if (expectSuccess) { validateSuccess(bounded, attendeesNr, event, username, existingCategories, result, allAttendees, expectedEventSeats, reservedTickets); } else { assertFalse(result.isSuccess()); return null; } return Triple.of(eventWithUsername.getLeft(), eventWithUsername.getRight(), result.getData().getKey()); }
From source file:alfio.manager.AdminReservationManagerIntegrationTest.java
@Test public void testReserveMixed() throws Exception { List<TicketCategoryModification> categories = Collections.singletonList(new TicketCategoryModification(null, "default", 1, new DateTimeModification(LocalDate.now(), LocalTime.now()), new DateTimeModification(LocalDate.now(), LocalTime.now()), DESCRIPTION, BigDecimal.TEN, false, "", false, null, null, null, null, null)); Pair<Event, String> eventWithUsername = initEvent(categories, organizationRepository, userManager, eventManager, eventRepository); Event event = eventWithUsername.getKey(); String username = eventWithUsername.getValue(); DateTimeModification expiration = DateTimeModification.fromZonedDateTime(ZonedDateTime.now().plusDays(1)); CustomerData customerData = new CustomerData("Integration", "Test", "integration-test@test.ch", "Billing Address", "en"); TicketCategory existingCategory = ticketCategoryRepository.findByEventId(event.getId()).get(0); Category resExistingCategory = new Category(existingCategory.getId(), "", existingCategory.getPrice()); Category resNewCategory = new Category(null, "name", new BigDecimal("100.00")); int attendees = 1; List<TicketsInfo> ticketsInfoList = Arrays.asList( new TicketsInfo(resExistingCategory, generateAttendees(attendees), false, false), new TicketsInfo(resNewCategory, generateAttendees(attendees), false, false), new TicketsInfo(resExistingCategory, generateAttendees(attendees), false, false)); AdminReservationModification modification = new AdminReservationModification(expiration, customerData, ticketsInfoList, "en", false, null); Result<Pair<TicketReservation, List<Ticket>>> result = adminReservationManager .createReservation(modification, event.getShortName(), username); assertTrue(result.isSuccess());//www .ja va2s . co m Pair<TicketReservation, List<Ticket>> data = result.getData(); List<Ticket> tickets = data.getRight(); assertTrue(tickets.size() == 3); assertNotNull(data.getLeft()); assertTrue(tickets.stream().allMatch(t -> t.getTicketsReservationId().equals(data.getKey().getId()))); int resExistingCategoryId = tickets.get(0).getCategoryId(); int resNewCategoryId = tickets.get(2).getCategoryId(); Event modified = eventManager.getSingleEvent(event.getShortName(), username); assertEquals(AVAILABLE_SEATS, eventRepository.countExistingTickets(event.getId()).intValue()); assertEquals(3, ticketRepository .findPendingTicketsInCategories(Arrays.asList(resExistingCategoryId, resNewCategoryId)).size()); assertEquals(3, ticketRepository.findTicketsInReservation(data.getLeft().getId()).size()); String reservationId = data.getLeft().getId(); assertEquals(ticketRepository.findTicketsInReservation(reservationId).stream().findFirst().get().getId(), ticketRepository.findFirstTicketInReservation(reservationId).get().getId()); ticketCategoryRepository.findByEventId(event.getId()) .forEach(tc -> assertTrue(specialPriceRepository.findAllByCategoryId(tc.getId()).stream() .allMatch(sp -> sp.getStatus() == SpecialPrice.Status.PENDING))); adminReservationManager.confirmReservation(event.getShortName(), data.getLeft().getId(), username); ticketCategoryRepository.findByEventId(event.getId()) .forEach(tc -> assertTrue(specialPriceRepository.findAllByCategoryId(tc.getId()).stream() .allMatch(sp -> sp.getStatus() == SpecialPrice.Status.TAKEN))); assertFalse(ticketRepository.findAllReservationsConfirmedButNotAssigned(event.getId()) .contains(data.getLeft().getId())); }
From source file:com.pgcraft.spectatorplus.guis.SpectatorsToolsGUI.java
@Override protected void onUpdate() { Spectator spectator = SpectatorPlus.get().getPlayerData(getPlayer()); /* ** ----- Size ----- ** */ // We first need to know what is the size of the inventory // If a death location is registered for this player, and if every tool is // enabled, a line will have to be added. // That's why this is defined here, not below. // If the "tp to death" tool is disabled, the death location is not set. So it's useless to // check this here. Location deathPoint = spectator.getDeathLocation(); int height = 0, offset = 0; if (Toggles.TOOLS_TOOLS_SPEED.get()) { height++;//from ww w . j a v a2 s . c o m offset = 9; } if (Toggles.TOOLS_TOOLS_DIVINGSUIT.get() || Toggles.TOOLS_TOOLS_NIGHTVISION.get() || Toggles.TOOLS_TOOLS_NOCLIP.get() || (Toggles.TOOLS_TOOLS_TPTODEATH_ENABLED.get() && deathPoint != null)) height++; if (Toggles.TOOLS_TOOLS_DIVINGSUIT.get() && Toggles.TOOLS_TOOLS_NIGHTVISION.get() && Toggles.TOOLS_TOOLS_NOCLIP.get() && Toggles.TOOLS_TOOLS_TPTODEATH_ENABLED.get() && deathPoint != null) height++; setSize(height * 9); setTitle(ChatColor.BLACK + "Spectators' tools"); /* ** ----- Active tools & effects ----- ** */ // Retrieves the current speed level, and the other enabled effects // 0 = no speed; 1 = speed I, etc. Integer speedLevel = 0; Boolean nightVisionActive = false; for (PotionEffect effect : getPlayer().getActivePotionEffects()) { if (effect.getType().equals(PotionEffectType.SPEED)) { speedLevel = effect.getAmplifier() + 1; // +1 because Speed I = amplifier 0. } else if (effect.getType().equals(PotionEffectType.NIGHT_VISION)) { nightVisionActive = true; } } Boolean divingSuitEquipped = false; if (getPlayer().getInventory().getBoots() != null && getPlayer().getInventory().getBoots().getType() == Material.DIAMOND_BOOTS) { divingSuitEquipped = true; } List<String> activeLore = Collections.singletonList("" + ChatColor.GRAY + ChatColor.ITALIC + "Active"); /* ** ----- Speed tools ----- ** */ if (Toggles.TOOLS_TOOLS_SPEED.get()) { // Normal speed ItemStack normalSpeed = GuiUtils.makeItem(Material.STRING, ChatColor.DARK_AQUA + "Normal speed", speedLevel == 0 ? activeLore : null); if (speedLevel == 0) GlowEffect.addGlow(normalSpeed); action("speed_0", 2, normalSpeed); // Speed I ItemStack speedI = GuiUtils.makeItem(Material.FEATHER, ChatColor.AQUA + "Speed I", speedLevel == 1 ? activeLore : null); if (speedLevel == 1) GlowEffect.addGlow(speedI); action("speed_1", 3, speedI); // Speed II ItemStack speedII = GuiUtils.makeItem(Material.FEATHER, ChatColor.AQUA + "Speed II", speedLevel == 2 ? activeLore : null); speedII.setAmount(2); if (speedLevel == 2) GlowEffect.addGlow(speedII); action("speed_2", 4, speedII); // Speed III ItemStack speedIII = GuiUtils.makeItem(Material.FEATHER, ChatColor.AQUA + "Speed III", speedLevel == 3 ? activeLore : null); speedIII.setAmount(3); if (speedLevel == 3) GlowEffect.addGlow(speedIII); action("speed_3", 5, speedIII); // Speed IV ItemStack speedIV = GuiUtils.makeItem(Material.FEATHER, ChatColor.AQUA + "Speed IV", speedLevel == 4 ? activeLore : null); speedIV.setAmount(4); if (speedLevel == 4) GlowEffect.addGlow(speedIV); action("speed_4", 6, speedIV); } /* ** ----- Lines 2 & 3: content ----- ** */ List<Pair<String, ItemStack>> toolsOnLine2 = new ArrayList<>(); // No-clip if (Toggles.TOOLS_TOOLS_NOCLIP.get()) { ItemStack noClip = GuiUtils.makeItem(Material.BARRIER, ChatColor.LIGHT_PURPLE + "No-clip mode", Arrays.asList(ChatColor.GRAY + "Allows you to go through all the blocks.", "", ChatColor.GRAY + "You can also first-spectate a player", ChatColor.GRAY + "by left-clicking on him", ChatColor.DARK_GRAY + "Use Shift to quit the first-person", ChatColor.DARK_GRAY + "spectator mode.", "", ChatColor.DARK_GRAY + "" + ChatColor.ITALIC + "In this mode, open your inventory", ChatColor.DARK_GRAY + "" + ChatColor.ITALIC + "to access controls!")); toolsOnLine2.add(Pair.of("noClip", noClip)); } // Night vision if (Toggles.TOOLS_TOOLS_NIGHTVISION.get()) { ItemStack nightVision = GuiUtils.makeItem( nightVisionActive ? Material.EYE_OF_ENDER : Material.ENDER_PEARL, nightVisionActive ? ChatColor.DARK_PURPLE + "Disable night vision" : ChatColor.GOLD + "Enable night vision"); toolsOnLine2.add(Pair.of("nightVision", nightVision)); } // Diving suit if (Toggles.TOOLS_TOOLS_DIVINGSUIT.get()) { ItemStack divingSuit = GuiUtils.makeItem(Material.DIAMOND_BOOTS, ChatColor.BLUE + "Diving suit", Collections.singletonList(ChatColor.GRAY + "Get a pair of Depth Strider III boots")); if (divingSuitEquipped) { ItemMeta meta = divingSuit.getItemMeta(); List<String> lore = meta.getLore(); lore.add(activeLore.get(0)); meta.setLore(lore); divingSuit.setItemMeta(meta); GlowEffect.addGlow(divingSuit); } toolsOnLine2.add(Pair.of("divingSuit", divingSuit)); } // Teleportation to the death point ItemStack tpToDeathPoint = null; if (Toggles.TOOLS_TOOLS_TPTODEATH_ENABLED.get() && deathPoint != null) { tpToDeathPoint = GuiUtils.makeItem(Material.NETHER_STAR, ChatColor.YELLOW + "Go to your death point", Toggles.TOOLS_TOOLS_TPTODEATH_DISPLAYCAUSE.get() && spectator.getLastDeathMessage() != null ? Collections.singletonList(ChatColor.GRAY + spectator.getLastDeathMessage()) : null); } /* ** ----- Lines 2 & 3: display ----- ** */ int lineSize = toolsOnLine2.size(); if (lineSize == 0 && deathPoint != null) { action("deathPoint", offset + 4, tpToDeathPoint); } else if (lineSize == 1) { if (deathPoint != null) { final Pair<String, ItemStack> toolZero = toolsOnLine2.get(0); action(toolZero.getKey(), offset + 2, toolZero.getValue()); action("deathPoint", offset + 6, tpToDeathPoint); } else { final Pair<String, ItemStack> toolZero = toolsOnLine2.get(0); action(toolZero.getKey(), offset + 4, toolZero.getValue()); } } else if (lineSize == 2) { if (deathPoint != null) { final Pair<String, ItemStack> toolZero = toolsOnLine2.get(0); final Pair<String, ItemStack> toolOne = toolsOnLine2.get(1); action(toolZero.getKey(), offset + 2, toolZero.getValue()); action(toolOne.getKey(), offset + 4, toolOne.getValue()); action("deathPoint", offset + 6, tpToDeathPoint); } else { final Pair<String, ItemStack> toolZero = toolsOnLine2.get(0); final Pair<String, ItemStack> toolOne = toolsOnLine2.get(1); action(toolZero.getKey(), offset + 2, toolZero.getValue()); action(toolOne.getKey(), offset + 6, toolOne.getValue()); } } else if (lineSize == 3) { final Pair<String, ItemStack> toolZero = toolsOnLine2.get(0); final Pair<String, ItemStack> toolOne = toolsOnLine2.get(1); final Pair<String, ItemStack> toolTwo = toolsOnLine2.get(2); action(toolZero.getKey(), offset + 2, toolZero.getValue()); action(toolOne.getKey(), offset + 4, toolOne.getValue()); action(toolTwo.getKey(), offset + 6, toolTwo.getValue()); if (deathPoint != null) { action("deathPoint", offset + 6, tpToDeathPoint); } } }
From source file:forge.deck.io.OldDeckParser.java
private void convertConstructedAndSealed() { boolean allowDeleteUnsupportedConstructed = false; final Map<String, Pair<DeckGroup, MutablePair<File, File>>> sealedDecks = new TreeMap<String, Pair<DeckGroup, MutablePair<File, File>>>( String.CASE_INSENSITIVE_ORDER); for (final File f : this.deckDir.listFiles(DeckStorage.DCK_FILE_FILTER)) { boolean importedOk = false; final List<String> fileLines = FileUtil.readFile(f); final Map<String, List<String>> sections = FileSection.parseSections(fileLines); final DeckFileHeader dh = DeckSerializer.readDeckMetadata(sections); String name = dh.getName(); if (dh.isCustomPool()) { try { this.cube.add(DeckSerializer.fromSections(sections)); importedOk = true;/* w w w. j a v a 2s . c om*/ } catch (final NoSuchElementException ex) { if (!allowDeleteUnsupportedConstructed) { final String msg = String.format( "Can not convert deck '%s' for some unsupported cards it contains. %n%s%n%nMay Forge delete all such decks?", name, ex.getMessage()); allowDeleteUnsupportedConstructed = SOptionPane.showConfirmDialog(msg, "Problem converting decks"); } } if (importedOk || allowDeleteUnsupportedConstructed) { f.delete(); } continue; } switch (dh.getDeckType()) { case Constructed: try { this.constructed.add(DeckSerializer.fromSections(sections)); importedOk = true; } catch (final NoSuchElementException ex) { if (!allowDeleteUnsupportedConstructed) { final String msg = String.format( "Can not convert deck '%s' for some unsupported cards it contains. %n%s%n%nMay Forge delete all such decks?", name, ex.getMessage()); allowDeleteUnsupportedConstructed = SOptionPane.showConfirmDialog(msg, "Problem converting decks"); } } if (importedOk || allowDeleteUnsupportedConstructed) { f.delete(); } break; case Limited: name = name.startsWith("AI_") ? name.replace("AI_", "") : name; Pair<DeckGroup, MutablePair<File, File>> stored = sealedDecks.get(name); if (null == stored) { stored = ImmutablePair.of(new DeckGroup(name), MutablePair.of((File) null, (File) null)); } final Deck deck = DeckSerializer.fromSections(sections); if (dh.isIntendedForAi()) { stored.getLeft().addAiDeck(deck); stored.getRight().setRight(f); } else { stored.getLeft().setHumanDeck(deck); stored.getRight().setLeft(f); } if ((stored.getLeft().getHumanDeck() != null) && !stored.getLeft().getAiDecks().isEmpty()) { // have both parts of sealed deck, may convert this.sealed.add(stored.getLeft()); stored.getRight().getLeft().delete(); stored.getRight().getRight().delete(); // there stay only orphans sealedDecks.remove(name); } else { sealedDecks.put(name, stored); } break; default: break; } } // advise to kill orphaned decks if (!sealedDecks.isEmpty()) { final StringBuilder sb = new StringBuilder(); for (final Pair<DeckGroup, MutablePair<File, File>> s : sealedDecks.values()) { final String missingPart = s.getRight().getLeft() == null ? "human" : "computer"; sb.append(String.format("Sealed deck '%s' has no matching '%s' deck.%n", s.getKey().getName(), missingPart)); } sb.append(System.getProperty("line.separator")); sb.append("May Forge delete these decks?"); if (SOptionPane.showConfirmDialog(sb.toString(), "Some of your sealed decks are orphaned")) { for (final Pair<DeckGroup, MutablePair<File, File>> s : sealedDecks.values()) { if (s.getRight().getLeft() != null) { s.getRight().getLeft().delete(); } if (s.getRight().getRight() != null) { s.getRight().getRight().delete(); } } } } }
From source file:com.epam.dlab.backendapi.core.commands.CommandParserMock.java
/** * Parse command line./* ww w .j av a 2 s. co m*/ * * @param cmd command line. */ public void parse(String cmd, String uuid) { command = null; action = null; resourceType = null; imageType = null; requestId = uuid; responsePath = null; name = null; json = null; envMap.clear(); varMap.clear(); otherArgs.clear(); variables.clear(); List<String> args = extractArgs(cmd); dockerCommand = args.contains("docker"); int i = 0; String s; Pair<String, String> p; while (i < args.size()) { if ((s = getArgValue(args, i, "-v")) != null) { p = getPair("-v", s, ":"); varMap.put(p.getValue(), p.getKey()); } else if ((s = getArgValue(args, i, "-e")) != null) { p = getPair("-e", s, "="); envMap.put(p.getKey(), p.getValue()); } else if ((s = getArgValue(args, i, "docker")) != null || (s = getArgValue(args, i, "python")) != null) { command = s; } else if ((s = getArgValue(args, i, "--action")) != null) { action = s; } else if ((s = getArgValue(args, i, "--name")) != null) { name = s; } else if ((s = getArgValue(args, i, "echo")) != null) { if (s.equals("-e")) { if (i >= args.size()) { throw new DlabException("Argument \"echo -e\" detected but not have value"); } s = args.get(i); args.remove(i); } json = s; } else if ((s = getArgValue(args, i, "--result_path")) != null) { responsePath = s; varMap.put("/response", responsePath); args.remove(i); } else { i++; } } if (args.size() > 0) { otherArgs.addAll(args); } resourceType = envMap.get("conf_resource"); if (isDockerCommand()) { imageType = getImageName(args); imageType = imageType.replace("docker.dlab-", "").replace(":latest", ""); } responsePath = varMap.get("/response"); variables.putAll(envMap); variables.putAll(getJsonVariables(json)); variables.put("request_id", requestId); variables.put("instance_id", "i-" + requestId.replace("-", "").substring(0, 17)); variables.put("cluster_id", "j-" + requestId.replace("-", "").substring(0, 13).toUpperCase()); variables.put("notebook_id", requestId.replace("-", "").substring(17, 22)); }
From source file:com.linkedin.pinot.routing.builder.KafkaLowLevelConsumerRoutingTableBuilder.java
@Override public List<ServerToSegmentSetMap> computeRoutingTableFromExternalView(String tableName, ExternalView externalView, List<InstanceConfig> instanceConfigList) { // We build the routing table based off the external view here. What we want to do is to make sure that we uphold // the guarantees clients expect (no duplicate records, eventual consistency) and spreading the load as equally as // possible between the servers. ///*from w w w . j av a 2 s . c o m*/ // Each Kafka partition contains a fraction of the data, so we need to make sure that we query all partitions. // Because in certain unlikely degenerate scenarios, we can consume overlapping data until segments are flushed (at // which point the overlapping data is discarded during the reconciliation process with the controller), we need to // ensure that the query that is sent has only one partition in CONSUMING state in order to avoid duplicate records. // // Because we also want to want to spread the load as equally as possible between servers, we use a weighted random // replica selection that favors picking replicas with fewer segments assigned to them, thus having an approximately // equal distribution of load between servers. // // For example, given three replicas with 1, 2 and 3 segments assigned to each, the replica with one segment should // have a weight of 2, which is the maximum segment count minus the segment count for that replica. Thus, each // replica other than the replica(s) with the maximum segment count should have a chance of getting a segment // assigned to it. This corresponds to alternative three below: // // Alternative 1 (weight is sum of segment counts - segment count in that replica): // (6 - 1) = 5 -> P(0.4166) // (6 - 2) = 4 -> P(0.3333) // (6 - 3) = 3 -> P(0.2500) // // Alternative 2 (weight is max of segment counts - segment count in that replica + 1): // (3 - 1) + 1 = 3 -> P(0.5000) // (3 - 2) + 1 = 2 -> P(0.3333) // (3 - 3) + 1 = 1 -> P(0.1666) // // Alternative 3 (weight is max of segment counts - segment count in that replica): // (3 - 1) = 2 -> P(0.6666) // (3 - 2) = 1 -> P(0.3333) // (3 - 3) = 0 -> P(0.0000) // // Of those three weighting alternatives, the third one has the smallest standard deviation of the number of // segments assigned per replica, so it corresponds to the weighting strategy used for segment assignment. Empirical // testing shows that for 20 segments and three replicas, the standard deviation of each alternative is respectively // 2.112, 1.496 and 0.853. // // This algorithm works as follows: // 1. Gather all segments and group them by Kafka partition, sorted by sequence number // 2. Ensure that for each partition, we have at most one partition in consuming state // 3. Sort all the segments to be used during assignment in ascending order of replicas // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments // assigned to each replica. // 1. Gather all segments and group them by Kafka partition, sorted by sequence number Map<String, SortedSet<SegmentName>> sortedSegmentsByKafkaPartition = new HashMap<String, SortedSet<SegmentName>>(); for (String helixPartitionName : externalView.getPartitionSet()) { // Ignore segments that are not low level consumer segments if (!SegmentNameBuilder.Realtime.isRealtimeV2Name(helixPartitionName)) { continue; } final LLCSegmentName segmentName = new LLCSegmentName(helixPartitionName); String kafkaPartitionName = segmentName.getPartitionRange(); SortedSet<SegmentName> segmentsForPartition = sortedSegmentsByKafkaPartition.get(kafkaPartitionName); // Create sorted set if necessary if (segmentsForPartition == null) { segmentsForPartition = new TreeSet<SegmentName>(); sortedSegmentsByKafkaPartition.put(kafkaPartitionName, segmentsForPartition); } segmentsForPartition.add(segmentName); } // 2. Ensure that for each Kafka partition, we have at most one Helix partition (Pinot segment) in consuming state Map<String, SegmentName> allowedSegmentInConsumingStateByKafkaPartition = new HashMap<String, SegmentName>(); for (String kafkaPartition : sortedSegmentsByKafkaPartition.keySet()) { SortedSet<SegmentName> sortedSegmentsForKafkaPartition = sortedSegmentsByKafkaPartition .get(kafkaPartition); SegmentName lastAllowedSegmentInConsumingState = null; for (SegmentName segmentName : sortedSegmentsForKafkaPartition) { Map<String, String> helixPartitionState = externalView.getStateMap(segmentName.getSegmentName()); boolean allInConsumingState = true; int replicasInConsumingState = 0; // Only keep the segment if all replicas have it in CONSUMING state for (String externalViewState : helixPartitionState.values()) { // Ignore ERROR state if (externalViewState.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ERROR)) { continue; } // Not all segments are in CONSUMING state, therefore don't consider the last segment assignable to CONSUMING // replicas if (externalViewState.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) { allInConsumingState = false; break; } // Otherwise count the replica as being in CONSUMING state if (externalViewState.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)) { replicasInConsumingState++; } } // If all replicas have this segment in consuming state (and not all of them are in ERROR state), then pick this // segment to be the last allowed segment to be in CONSUMING state if (allInConsumingState && 0 < replicasInConsumingState) { lastAllowedSegmentInConsumingState = segmentName; break; } } if (lastAllowedSegmentInConsumingState != null) { allowedSegmentInConsumingStateByKafkaPartition.put(kafkaPartition, lastAllowedSegmentInConsumingState); } } // 3. Sort all the segments to be used during assignment in ascending order of replicas // PriorityQueue throws IllegalArgumentException when given a size of zero int segmentCount = Math.max(externalView.getPartitionSet().size(), 1); PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueue = new PriorityQueue<Pair<String, Set<String>>>( segmentCount, new Comparator<Pair<String, Set<String>>>() { @Override public int compare(Pair<String, Set<String>> firstPair, Pair<String, Set<String>> secondPair) { return Integer.compare(firstPair.getRight().size(), secondPair.getRight().size()); } }); RoutingTableInstancePruner instancePruner = new RoutingTableInstancePruner(instanceConfigList); for (Map.Entry<String, SortedSet<SegmentName>> entry : sortedSegmentsByKafkaPartition.entrySet()) { String kafkaPartition = entry.getKey(); SortedSet<SegmentName> segmentNames = entry.getValue(); // The only segment name which is allowed to be in CONSUMING state or null SegmentName validConsumingSegment = allowedSegmentInConsumingStateByKafkaPartition.get(kafkaPartition); for (SegmentName segmentName : segmentNames) { Set<String> validReplicas = new HashSet<String>(); Map<String, String> externalViewState = externalView.getStateMap(segmentName.getSegmentName()); for (Map.Entry<String, String> instanceAndStateEntry : externalViewState.entrySet()) { String instance = instanceAndStateEntry.getKey(); String state = instanceAndStateEntry.getValue(); // Skip pruned replicas (shutting down or otherwise disabled) if (instancePruner.isInactive(instance)) { continue; } // Replicas in ONLINE state are always allowed if (state.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) { validReplicas.add(instance); continue; } // Replicas in CONSUMING state are only allowed on the last segment if (state.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING) && segmentName.equals(validConsumingSegment)) { validReplicas.add(instance); } } segmentToReplicaSetQueue .add(new ImmutablePair<String, Set<String>>(segmentName.getSegmentName(), validReplicas)); // If this segment is the segment allowed in CONSUMING state, don't process segments after it in that Kafka // partition if (segmentName.equals(validConsumingSegment)) { break; } } } // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments // assigned to each replica. List<ServerToSegmentSetMap> routingTables = new ArrayList<ServerToSegmentSetMap>(routingTableCount); for (int i = 0; i < routingTableCount; ++i) { Map<String, Set<String>> instanceToSegmentSetMap = new HashMap<String, Set<String>>(); PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueueCopy = new PriorityQueue<Pair<String, Set<String>>>( segmentToReplicaSetQueue); while (!segmentToReplicaSetQueueCopy.isEmpty()) { Pair<String, Set<String>> segmentAndValidReplicaSet = segmentToReplicaSetQueueCopy.poll(); String segment = segmentAndValidReplicaSet.getKey(); Set<String> validReplicaSet = segmentAndValidReplicaSet.getValue(); String replica = pickWeightedRandomReplica(validReplicaSet, instanceToSegmentSetMap); if (replica != null) { Set<String> segmentsForInstance = instanceToSegmentSetMap.get(replica); if (segmentsForInstance == null) { segmentsForInstance = new HashSet<String>(); instanceToSegmentSetMap.put(replica, segmentsForInstance); } segmentsForInstance.add(segment); } } routingTables.add(new ServerToSegmentSetMap(instanceToSegmentSetMap)); } return routingTables; }