Example usage for org.apache.commons.lang3.tuple Pair get

List of usage examples for org.apache.commons.lang3.tuple Pair get

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair get.

Prototype

V get(Object key);

Source Link

Document

Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.

Usage

From source file:act.installer.pubchem.PubchemTTLMerger.java

protected void merge(Pair<RocksDB, Map<COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles)
        throws RocksDBException, IOException, ClassNotFoundException {
    LOGGER.info("Beginning merge on Pubchem CID");
    RocksDB db = dbAndHandles.getLeft();
    ColumnFamilyHandle pubchemIdCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.CID_TO_HASHES);
    ColumnFamilyHandle meshCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.HASH_TO_MESH);
    ColumnFamilyHandle synonymCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.HASH_TO_SYNONYMS);
    ColumnFamilyHandle synonymTypeCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.HASH_TO_SYNONYM_TYPE);
    ColumnFamilyHandle mergeResultsCFH = dbAndHandles.getRight().get(COLUMN_FAMILIES.CID_TO_SYNONYMS);

    RocksIterator cidIterator = db.newIterator(pubchemIdCFH);
    // With help from https://github.com/facebook/rocksdb/wiki/Basic-Operations
    int processed = 0;
    for (cidIterator.seekToFirst(); cidIterator.isValid(); cidIterator.next()) {
        byte[] key = cidIterator.key();
        byte[] val = cidIterator.value();
        String pubchemId = new String(key, UTF8);
        List<String> hashes;
        try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(val))) {
            // We know all our values so far have been lists of strings, so this should be completely safe.
            hashes = (List<String>) ois.readObject();
        }/*from   w  w w  . j  a v  a 2  s. c  o m*/

        PubchemSynonyms pubchemSynonyms = new PubchemSynonyms(pubchemId);

        /* The hash keys are based on synonym value, which we can manually compute with:
         *   $ echo -n  'dimethyltin(iv)' | md5
         * This means that MeSH ids are linked to synonyms rather than pubchem ids.  We need to look up each cid-linked
         * hash in both the MeSH and synonym collections, as the key may legitimately exist in both (and serve to link
         * cid to synonym and cid to MeSH). */
        for (String hash : hashes) {
            /* Note: these ids are not proper MeSH topic ids, but are internal MeSH ids found in the RDF and TTL
             * representations of the MeSH corpus.  You can find them in the MeSH .nt or .xml files, but they won't turn up
             * anything on the MeSH website. */
            List<String> meshIds = getValueAsObject(db, meshCFH, hash);
            if (meshIds != null) {
                pubchemSynonyms.addMeSHIds(meshIds);
            }

            List<String> synonyms = getValueAsObject(db, synonymCFH, hash);
            // There are, surprisingly, some dangling hashes in the DB!  Handle them gracefully.
            if (synonyms == null) {
                LOGGER.warn(
                        "Dangling synonym hash reference, adding empty list in place of value: cid = %s, hash = %s",
                        pubchemId, hash);
                synonyms = Collections.emptyList();
            }

            List<String> synonymTypeStrings = getValueAsObject(db, synonymTypeCFH, hash);
            Set<PC_SYNONYM_TYPES> synonymTypes = DEFAULT_SYNONYM_DATA_TYPES;
            if (synonymTypeStrings != null) {
                synonymTypes = synonymTypeStrings.stream().map(PC_SYNONYM_TYPES::valueOf)
                        .collect(Collectors.toSet());
            }

            if (synonymTypes.size() == 0) {
                LOGGER.warn("Found zero synonym types for synonym, defaulting to %s: %s %s, synonyms = %s",
                        PC_SYNONYM_TYPES.UNKNOWN.name(), pubchemId, hash, StringUtils.join(synonyms, ", "));
            }
            /* It turns out that *lots* of synonyms are duplicated as depositor supplied names, so don't complain about it
             * here.  For performance sake we might want to consider changing the data model of PubchemSynonyms to reduce
             * synonym string duplication, as the current model is pretty inefficient. */

            for (PC_SYNONYM_TYPES synonymType : synonymTypes) {
                for (String synonym : synonyms) {
                    // Let the PubchemSynonyms object do the de-duplication for us rather than reducing `synonyms` to a Set.
                    pubchemSynonyms.addSynonym(synonymType, synonym);
                }
            }
        }

        try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
                ObjectOutputStream oo = new ObjectOutputStream(bos)) {
            oo.writeObject(pubchemSynonyms);
            oo.flush();

            db.put(mergeResultsCFH, key, bos.toByteArray());
        }

        processed++;
        if (processed % 100000 == 0) {
            LOGGER.info("Merged %d entries on Pubchem compound id", processed);
        }
    }
    LOGGER.info("Merge complete, %d entries processed", processed);
}

From source file:com.yahoo.pulsar.common.naming.NamespaceBundlesTest.java

@Test
public void testSplitBundleInTwo() throws Exception {
    final int NO_BUNDLES = 2;
    NamespaceName nsname = new NamespaceName("pulsar/global/ns1");
    DestinationName dn = DestinationName.get("persistent://pulsar/global/ns1/topic-1");
    NamespaceBundles bundles = factory.getBundles(nsname);
    NamespaceBundle bundle = bundles.findBundle(dn);
    // (1) split : [0x00000000,0xffffffff] => [0x00000000_0x7fffffff,0x7fffffff_0xffffffff]
    Pair<NamespaceBundles, List<NamespaceBundle>> splitBundles = factory.splitBundles(bundle, NO_BUNDLES);
    assertNotNull(splitBundles);//from w w w .  j ava2  s  .c o m
    assertBundleDivideInTwo(bundle, splitBundles.getRight(), NO_BUNDLES);

    // (2) split: [0x00000000,0x7fffffff] => [0x00000000_0x3fffffff,0x3fffffff_0x7fffffff],
    // [0x7fffffff,0xffffffff] => [0x7fffffff_0xbfffffff,0xbfffffff_0xffffffff]
    NamespaceBundleFactory utilityFactory = NamespaceBundleFactory.createFactory(Hashing.crc32());
    assertBundles(utilityFactory, nsname, bundle, splitBundles, NO_BUNDLES);

    // (3) split: [0x00000000,0x3fffffff] => [0x00000000_0x1fffffff,0x1fffffff_0x3fffffff],
    // [0x3fffffff,0x7fffffff] => [0x3fffffff_0x5fffffff,0x5fffffff_0x7fffffff]
    Pair<NamespaceBundles, List<NamespaceBundle>> splitChildBundles = splitBundlesUtilFactory(utilityFactory,
            nsname, splitBundles.getLeft(), splitBundles.getRight().get(0), NO_BUNDLES);
    assertBundles(utilityFactory, nsname, splitBundles.getRight().get(0), splitChildBundles, NO_BUNDLES);

    // (4) split: [0x7fffffff,0xbfffffff] => [0x7fffffff_0x9fffffff,0x9fffffff_0xbfffffff],
    // [0xbfffffff,0xffffffff] => [0xbfffffff_0xdfffffff,0xdfffffff_0xffffffff]
    splitChildBundles = splitBundlesUtilFactory(utilityFactory, nsname, splitBundles.getLeft(),
            splitBundles.getRight().get(1), NO_BUNDLES);
    assertBundles(utilityFactory, nsname, splitBundles.getRight().get(1), splitChildBundles, NO_BUNDLES);

}

From source file:alfio.manager.AdminReservationManagerIntegrationTest.java

private void validateSuccess(boolean bounded, List<Integer> attendeesNr, Event event, String username,
        List<TicketCategory> existingCategories, Result<Pair<TicketReservation, List<Ticket>>> result,
        List<Attendee> allAttendees, int expectedEventSeats, int reservedTickets) {

    assertTrue(result.isSuccess());/*from   www .ja v a 2s . c om*/
    Pair<TicketReservation, List<Ticket>> data = result.getData();
    assertTrue(data.getRight().size() == attendeesNr.stream().mapToInt(i -> i).sum());
    assertNotNull(data.getLeft());
    Event modified = eventManager.getSingleEvent(event.getShortName(), username);
    assertEquals(expectedEventSeats, eventRepository.countExistingTickets(event.getId()).intValue());
    List<Ticket> tickets = ticketRepository.findPendingTicketsInCategories(
            existingCategories.stream().map(TicketCategory::getId).collect(toList()));
    assertEquals(attendeesNr.stream().mapToInt(i -> i).sum(), tickets.size() - reservedTickets);
    if (bounded) {
        final Iterator<Integer> iterator = attendeesNr.iterator();
        existingCategories.forEach(existingCategory -> {
            TicketCategory categoryModified = ticketCategoryRepository
                    .getByIdAndActive(existingCategory.getId(), event.getId());
            assertEquals(categoryModified.getMaxTickets(), iterator.next().intValue());
        });
    }
    for (int i = 0; i < tickets.size() - reservedTickets; i++) {
        Attendee attendee = allAttendees.get(i);
        if (!attendee.isEmpty()) {
            Ticket ticket = data.getRight().get(i);
            assertTrue(ticket.getAssigned());
            assertNotNull(ticket.getFullName());
            assertEquals(attendee.getFullName(), ticket.getFullName());
            assertEquals(attendee.getEmailAddress(), ticket.getEmail());
            assertEquals(Ticket.TicketStatus.PENDING, ticket.getStatus());
            assertEquals(data.getLeft().getId(), ticket.getTicketsReservationId());
        }
    }
    ticketCategoryRepository.findByEventId(modified.getId())
            .forEach(tc -> assertTrue(specialPriceRepository.findAllByCategoryId(tc.getId()).stream()
                    .allMatch(sp -> sp.getStatus() == SpecialPrice.Status.PENDING)));
    assertFalse(ticketRepository.findAllReservationsConfirmedButNotAssigned(event.getId())
            .contains(data.getLeft().getId()));
}

From source file:co.rsk.peg.BridgeSerializationUtilsTest.java

@Test
public void serializeDeserializeOneOffLockWhitelistAndDisableBlockHeight() {
    NetworkParameters btcParams = NetworkParameters.fromID(NetworkParameters.ID_REGTEST);
    Map<Address, LockWhitelistEntry> whitelist = new HashMap<>();
    Address address = BtcECKey.fromPrivate(BigInteger.valueOf(100L)).toAddress(btcParams);
    whitelist.put(address, new OneOffWhiteListEntry(address, Coin.COIN));

    LockWhitelist originalLockWhitelist = new LockWhitelist(whitelist, 0);
    byte[] serializedLockWhitelist = BridgeSerializationUtils
            .serializeOneOffLockWhitelist(Pair.of(originalLockWhitelist.getAll(OneOffWhiteListEntry.class),
                    originalLockWhitelist.getDisableBlockHeight()));
    Pair<HashMap<Address, OneOffWhiteListEntry>, Integer> deserializedLockWhitelist = BridgeSerializationUtils
            .deserializeOneOffLockWhitelistAndDisableBlockHeight(serializedLockWhitelist, btcParams);

    List<Address> originalAddresses = originalLockWhitelist.getAddresses();
    List<Address> deserializedAddresses = new ArrayList(deserializedLockWhitelist.getLeft().keySet());
    Assert.assertThat(originalAddresses, hasSize(1));
    Assert.assertThat(deserializedAddresses, hasSize(1));
    Assert.assertThat(originalAddresses, is(deserializedAddresses));
    Assert.assertThat(//from w  w w . j a  v  a2 s  .  c o m
            ((OneOffWhiteListEntry) originalLockWhitelist.get(originalAddresses.get(0))).maxTransferValue(),
            is((deserializedLockWhitelist.getLeft().get(deserializedAddresses.get(0))).maxTransferValue()));
}

From source file:act.installer.pubchem.PubchemTTLMergerTest.java

@Test
public void testValuesAreReadableAfterIndexIsClosedAndReopened() throws Exception {
    PubchemTTLMerger merger = new PubchemTTLMerger();
    Pair<RocksDB, Map<PubchemTTLMerger.COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles = PubchemTTLMerger
            .createNewRocksDB(tempDirPath.toFile());

    // Alas, we can't swap this with a JAR-safe stream as we must list the files.
    File testSynonymFileDir = new File(this.getClass().getResource(TEST_RDF_PATH).getFile());
    List<File> testFiles = Arrays.asList(testSynonymFileDir.listFiles());
    Collections.sort(testFiles);//from  w w w .j a v a2s  .  c o  m

    merger.buildIndex(dbAndHandles, testFiles);
    merger.merge(dbAndHandles);
    dbAndHandles.getLeft().close();

    dbAndHandles = merger.openExistingRocksDB(tempDirPath.toFile());

    Map<String, PubchemSynonyms> expected = new HashMap<String, PubchemSynonyms>() {
        {
            put("CID01",
                    new PubchemSynonyms("CID01", new HashMap<PubchemTTLMerger.PC_SYNONYM_TYPES, Set<String>>() {
                        {
                            put(PubchemTTLMerger.PC_SYNONYM_TYPES.TRIVIAL_NAME,
                                    new HashSet<>(Arrays.asList("test1")));
                        }
                    }, Arrays.asList("M01")));
            put("CID02",
                    new PubchemSynonyms("CID02", new HashMap<PubchemTTLMerger.PC_SYNONYM_TYPES, Set<String>>() {
                        {
                            put(PubchemTTLMerger.PC_SYNONYM_TYPES.UNKNOWN,
                                    new HashSet<>(Arrays.asList("test2")));
                            put(PubchemTTLMerger.PC_SYNONYM_TYPES.INTL_NONPROPRIETARY_NAME,
                                    new HashSet<>(Arrays.asList("TEST3", "test3")));
                        }
                    }, Arrays.asList("M02")));
            put("CID03",
                    new PubchemSynonyms("CID03", new HashMap<PubchemTTLMerger.PC_SYNONYM_TYPES, Set<String>>() {
                        {
                            put(PubchemTTLMerger.PC_SYNONYM_TYPES.INTL_NONPROPRIETARY_NAME,
                                    new HashSet<>(Arrays.asList("TEST3", "test3")));
                        }
                    }, Collections.emptyList()));
        }
    };

    RocksIterator iterator = dbAndHandles.getLeft()
            .newIterator(dbAndHandles.getRight().get(PubchemTTLMerger.COLUMN_FAMILIES.CID_TO_SYNONYMS));
    for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) {
        assertNotNull("Iterator key should never be null", iterator.key());
        assertNotNull("Iterator value should never be null", iterator.value());

        String key = new String(iterator.key());
        PubchemSynonyms synonyms;
        try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(iterator.value()))) {
            // We know all our values so far have been lists of strings, so this should be completely safe.
            synonyms = (PubchemSynonyms) ois.readObject();
        }
        assertEquals(String.format("Pubchem synonyms for %s match expected", key), expected.get(key), synonyms);
    }
}

From source file:org.apache.bookkeeper.statelib.impl.kv.RocksdbKVStore.java

protected void openRocksdb(StateStoreSpec spec) throws StateStoreException {

    // initialize the db options

    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
    tableConfig.setBlockSize(BLOCK_SIZE);
    tableConfig.setChecksumType(DEFAULT_CHECKSUM_TYPE);

    dbOpts = new DBOptions();
    dbOpts.setCreateIfMissing(true);//from w  w  w. j av  a  2 s  .c o m
    dbOpts.setErrorIfExists(false);
    dbOpts.setInfoLogLevel(DEFAULT_LOG_LEVEL);
    dbOpts.setIncreaseParallelism(DEFAULT_PARALLELISM);
    dbOpts.setCreateMissingColumnFamilies(true);

    cfOpts = new ColumnFamilyOptions();
    cfOpts.setTableFormatConfig(tableConfig);
    cfOpts.setWriteBufferSize(WRITE_BUFFER_SIZE);
    cfOpts.setCompressionType(DEFAULT_COMPRESSION_TYPE);
    cfOpts.setCompactionStyle(DEFAULT_COMPACTION_STYLE);
    cfOpts.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);

    // initialize the write options

    writeOpts = new WriteOptions();
    writeOpts.setDisableWAL(true); // disable wal, since the source of truth will be on distributedlog

    // initialize the flush options

    flushOpts = new FlushOptions();
    flushOpts.setWaitForFlush(true);

    // open the rocksdb

    this.dbDir = spec.getLocalStateStoreDir();
    Pair<RocksDB, List<ColumnFamilyHandle>> dbPair = openLocalDB(dbDir, dbOpts, cfOpts);
    this.db = dbPair.getLeft();
    this.metaCfHandle = dbPair.getRight().get(0);
    this.dataCfHandle = dbPair.getRight().get(1);
}

From source file:org.apache.samza.sql.serializers.TestSamzaSqlRelMessageSerde.java

@Test
public void testNestedRecordConversion() {
    Map<String, String> props = new HashMap<>();
    SystemStream ss1 = new SystemStream("test", "nestedRecord");
    props.put(String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA, ss1.getSystem(),
            ss1.getStream()), Profile.SCHEMA$.toString());
    ConfigBasedAvroRelSchemaProviderFactory factory = new ConfigBasedAvroRelSchemaProviderFactory();
    AvroRelSchemaProvider nestedRecordSchemaProvider = (AvroRelSchemaProvider) factory.create(ss1,
            new MapConfig(props));
    AvroRelConverter nestedRecordAvroRelConverter = new AvroRelConverter(ss1, nestedRecordSchemaProvider,
            new MapConfig());

    Pair<SamzaSqlRelMessage, GenericData.Record> messageRecordPair = createNestedSamzaSqlRelMessage(
            nestedRecordAvroRelConverter);
    SamzaSqlRelMessageSerde serde = (SamzaSqlRelMessageSerde) new SamzaSqlRelMessageSerdeFactory()
            .getSerde(null, null);//from  w w  w  .  j  a  v  a2 s.c o  m
    SamzaSqlRelMessage resultMsg = serde.fromBytes(serde.toBytes(messageRecordPair.getKey()));
    KV<Object, Object> samzaMessage = nestedRecordAvroRelConverter.convertToSamzaMessage(resultMsg);
    GenericRecord recordPostConversion = (GenericRecord) samzaMessage.getValue();

    for (Schema.Field field : Profile.SCHEMA$.getFields()) {
        // equals() on GenericRecord does the nested record equality check as well.
        Assert.assertEquals(messageRecordPair.getValue().get(field.name()),
                recordPostConversion.get(field.name()));
    }
}

From source file:org.apache.samza.sql.serializers.TestSamzaSqlRelRecordSerde.java

@Test
public void testNestedRecordConversion() {
    Map<String, String> props = new HashMap<>();
    SystemStream ss1 = new SystemStream("test", "nestedRecord");
    props.put(String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA, ss1.getSystem(),
            ss1.getStream()), Profile.SCHEMA$.toString());
    ConfigBasedAvroRelSchemaProviderFactory factory = new ConfigBasedAvroRelSchemaProviderFactory();
    AvroRelSchemaProvider nestedRecordSchemaProvider = (AvroRelSchemaProvider) factory.create(ss1,
            new MapConfig(props));
    AvroRelConverter nestedRecordAvroRelConverter = new AvroRelConverter(ss1, nestedRecordSchemaProvider,
            new MapConfig());

    Pair<SamzaSqlRelMessage, GenericData.Record> messageRecordPair = TestSamzaSqlRelMessageSerde
            .createNestedSamzaSqlRelMessage(nestedRecordAvroRelConverter);
    SamzaSqlRelRecordSerdeFactory.SamzaSqlRelRecordSerde serde = (SamzaSqlRelRecordSerdeFactory.SamzaSqlRelRecordSerde) new SamzaSqlRelRecordSerdeFactory()
            .getSerde(null, null);//from  w  w w . ja  v a 2s. co m
    SamzaSqlRelRecord resultRecord = serde
            .fromBytes(serde.toBytes(messageRecordPair.getKey().getSamzaSqlRelRecord()));
    GenericData.Record recordPostConversion = (GenericData.Record) nestedRecordAvroRelConverter
            .convertToAvroObject(resultRecord, Profile.SCHEMA$);

    for (Schema.Field field : Profile.SCHEMA$.getFields()) {
        // equals() on GenericRecord does the nested record equality check as well.
        Assert.assertEquals(messageRecordPair.getValue().get(field.name()),
                recordPostConversion.get(field.name()));
    }
}

From source file:org.grouplens.lenskit.basic.TopNItemRecommender.java

/**
 * Pick the top {@var n} items from a score vector.
 *
 * @param n      The number of items to recommend.
 * @param scores The scored item vector.
 * @return The top {@var n} items from {@var scores}, in descending
 *         order of score.//from   w  w  w. j a v a 2  s .  c o m
 */
protected List<ScoredId> recommend(int n, SparseVector scores) {
    if (scores.isEmpty()) {
        return Collections.emptyList();
    }

    if (n < 0) {
        n = scores.size();
    }

    ScoredItemAccumulator accum = new TopNScoredItemAccumulator(n);
    for (VectorEntry pred : scores.fast()) {
        final double v = pred.getValue();
        accum.put(pred.getKey(), v);
    }

    List<ScoredId> results = accum.finish();
    if (!scores.getChannelSymbols().isEmpty()) {
        ScoredIdListBuilder builder = ScoredIds.newListBuilder(results.size());
        List<Pair<Symbol, SparseVector>> cvs = Lists.newArrayList();
        List<Pair<TypedSymbol<?>, Long2ObjectMap<?>>> channels = Lists.newArrayList();
        for (Symbol sym : scores.getChannelVectorSymbols()) {
            builder.addChannel(sym, Double.NaN);
            cvs.add(Pair.of(sym, scores.getChannelVector(sym)));
        }
        for (TypedSymbol<?> sym : scores.getChannelSymbols()) {
            if (!sym.getType().equals(Double.class)) {
                builder.addChannel(sym);
                channels.add((Pair) Pair.of(sym, scores.getChannel(sym)));
            }
        }
        for (ScoredId id : CollectionUtils.fast(results)) {
            ScoredIdBuilder copy = ScoredIds.copyBuilder(id);
            for (Pair<Symbol, SparseVector> pair : cvs) {
                if (pair.getRight().containsKey(id.getId())) {
                    copy.addChannel(pair.getLeft(), pair.getRight().get(id.getId()));
                }
            }
            for (Pair<TypedSymbol<?>, Long2ObjectMap<?>> pair : channels) {
                if (pair.getRight().containsKey(id.getId())) {
                    copy.addChannel((TypedSymbol) pair.getLeft(), pair.getRight().get(id.getId()));
                }
            }
            builder.add(copy.build());
        }
        return builder.finish();
    } else {
        return results;
    }
}

From source file:org.jamocha.rating.fraj.RatingProvider.java

private double costPosInsVarII(final StatisticsProvider statisticsProvider,
        final Set<PathFilterList> inputComponent,
        final List<Pair<List<Set<PathFilterList>>, List<PathFilter>>> joinOrder,
        final Set<Set<PathFilterList>> regularComponents,
        final Map<Path, Set<PathFilterList>> pathToPreNetworkComponents) {
    final double[] jsfs = statisticsProvider.getAllJSFs(inputComponent, joinOrder, pathToPreNetworkComponents);
    assert jsfs.length == joinOrder.size();
    final Builder costs = DoubleStream.builder();
    double size = 1.0;
    int i = 0;//www. ja v a  2  s  . c  o  m
    for (final Pair<List<Set<PathFilterList>>, List<PathFilter>> pair : joinOrder) {
        final List<Set<PathFilterList>> components = pair.getLeft();
        final Set<PathFilterList> component = components.get(0);
        if (regularComponents.contains(component)) {
            if (component.size() > 1) {
                final int lastIndex = components.size() - 1;
                for (int ci = 0; ci < lastIndex; ++ci) {
                    costs.accept(
                            jc(statisticsProvider, 1.0, statisticsProvider.getData(components.get(ci)), size));
                }
                costs.accept(jc(statisticsProvider, jsfs[i],
                        statisticsProvider.getData(components.get(lastIndex)), size));
            } else {
                costs.accept(jc(statisticsProvider, jsfs[i], statisticsProvider.getData(component), size));
            }
            size *= statisticsProvider.getData(component).getRowCount() * jsfs[i];
        } else {
            costs.accept(jc(
                    statisticsProvider, statisticsProvider.getJSF(regularComponents, component,
                            pair.getRight().get(0), pathToPreNetworkComponents),
                    statisticsProvider.getData(component), size));
        }
        ++i;
    }
    return costs.add(size).build().sum();
}