Example usage for com.google.common.collect Multimap values

List of usage examples for com.google.common.collect Multimap values

Introduction

In this page you can find the example usage for com.google.common.collect Multimap values.

Prototype

Collection<V> values();

Source Link

Document

Returns a view collection containing the value from each key-value pair contained in this multimap, without collapsing duplicates (so values().size() == size() ).

Usage

From source file:net.shibboleth.idp.saml.attribute.mapping.AbstractSAMLAttributeDesignatorsMapper.java

/**
 * Constructor to create the mapping from an existing resolver.
 * //from   ww  w .  jav  a2s  .  c  o m
 * <p>This code inverts the {@link AttributeEncoder} (internal attribute -> SAML Attributes) into
 * {@link AttributeMapper} (SAML [AttributeDesignators] -> internal [Requested] Attributes). <br/>
 * to generate the {@link AbstractSAMLAttributeDesignatorMapper} (with no
 * {@link AbstractSAMLAttributeDesignatorMapper#getAttributeIds()}. These are accumulated into a {@link Multimap},
 * where the key is the {@link AbstractSAMLAttributeDesignatorMapper} and the values are the (IdP) attribute names.
 * The collection of {@link AttributeMapper}s can then be extracted from the map, and the appropriate internal names
 * added (these being the value of the {@link Multimap})</p>
 * 
 * @param resolver the resolver
 * @param id the ID
 * @param mapperFactory factory to generate new mappers of the correct type.
 */
public AbstractSAMLAttributeDesignatorsMapper(@Nonnull final AttributeResolver resolver,
        @Nonnull @NotEmpty final String id,
        @Nonnull final Supplier<AbstractSAMLAttributeDesignatorMapper<OutType>> mapperFactory) {

    setId(id);
    mappers = Collections.emptyList();

    final Multimap<AbstractSAMLAttributeDesignatorMapper<OutType>, String> theMappers = HashMultimap.create();

    for (final AttributeDefinition attributeDef : resolver.getAttributeDefinitions().values()) {
        for (final AttributeEncoder encoder : attributeDef.getAttributeEncoders()) {
            if (encoder instanceof AttributeDesignatorMapperProcessor) {
                // There is an appropriate reverse mapper.
                final AttributeDesignatorMapperProcessor factory = (AttributeDesignatorMapperProcessor) encoder;
                final AbstractSAMLAttributeDesignatorMapper<OutType> mapper = mapperFactory.get();
                factory.populateAttributeMapper(mapper);

                theMappers.put(mapper, attributeDef.getId());
            }
        }
    }

    mappers = new ArrayList<>(theMappers.values().size());

    for (final Entry<AbstractSAMLAttributeDesignatorMapper<OutType>, Collection<String>> entry : theMappers
            .asMap().entrySet()) {

        final AbstractSAMLAttributeDesignatorMapper<OutType> mapper = entry.getKey();
        mapper.setAttributeIds(new ArrayList<>(entry.getValue()));
        mappers.add(mapper);
    }
}

From source file:org.codeqinvest.codechanges.scm.svn.DefaultSvnRevisionsRetriever.java

/**
 * {@inheritDoc}/* w w  w . j av a2s  . c om*/
 */
@Override
@Cacheable("svnRevisions")
public DailyRevisions retrieveRevisions(ScmConnectionSettings connectionSettings, LocalDate day)
        throws SVNException {
    log.info("Retrieve revisions on day {} for {}", day, connectionSettings);
    final SVNRepository repository = SvnRepositoryFactory.create(connectionSettings);
    final LocalDateTime startTime = day.toDateTimeAtStartOfDay().toLocalDateTime();
    final long startRevision = repository.getDatedRevision(startTime.toDate());
    final long endRevision = repository.getDatedRevision(startTime.withTime(23, 59, 59, 999).toDate());

    final Multimap<String, SvnFileRevision> revisions = ArrayListMultimap.create();
    repository.log(null, startRevision, endRevision, true, true, new ISVNLogEntryHandler() {

        @Override
        public void handleLogEntry(SVNLogEntry logEntry) throws SVNException {
            for (SVNLogEntryPath logEntryPath : logEntry.getChangedPaths().values()) {
                if (logEntryPath.getCopyPath() != null) {
                    revisions.put(logEntryPath.getPath(), new SvnFileRevision(logEntry.getRevision(),
                            logEntryPath.getCopyPath(), logEntryPath.getPath()));
                } else {
                    revisions.put(logEntryPath.getPath(), new SvnFileRevision(logEntry.getRevision(),
                            logEntryPath.getPath(), logEntryPath.getPath()));
                }
            }
        }
    });

    log.info("Found {} changes for day {} with connection {}", revisions.values().size(), day,
            connectionSettings);
    return new DailyRevisions(day, revisions);
}

From source file:uk.ac.ebi.mnb.dialog.tools.RemoveWorstStructures.java

/**
 * @inheritDoc//  ww w.  j a v  a2 s . c  o  m
 */
@Override
public void actionPerformed(ActionEvent e) {

    Collection<Metabolite> metabolites = getSelection().get(Metabolite.class);

    CompoundEdit edit = new CompoundEdit();

    for (Metabolite m : metabolites) {

        if (m.hasAnnotation(MolecularFormula.class) && m.hasAnnotation(Charge.class)) {

            Multimap<Category, ChemicalStructure> map = HashMultimap.create();

            Charge charge = m.getAnnotations(Charge.class).iterator().next();
            Collection<MolecularFormula> formulas = m.getAnnotations(MolecularFormula.class);
            Set<ChemicalStructure> structures = m.getAnnotationsExtending(ChemicalStructure.class);
            Category best = Category.UNKNOWN;

            for (ChemicalStructure structure : structures) {

                Category validity = StructuralValidity.getValidity(formulas, structure, charge).getCategory();

                map.put(validity, structure);

                if (validity.ordinal() > best.ordinal()) {
                    best = validity;
                }
            }

            if (best == Category.CORRECT) {
                map.removeAll(Category.CORRECT);
                Collection<Annotation> worse = new ArrayList<Annotation>(map.values());
                edit.addEdit(new RemoveAnnotationEdit(m, worse));
                for (Annotation annotation : worse)
                    m.removeAnnotation(annotation);
            } else if (best == Category.WARNING) {
                map.removeAll(Category.WARNING);
                Collection<Annotation> worse = new ArrayList<Annotation>(map.values());
                edit.addEdit(new RemoveAnnotationEdit(m, worse));
                for (Annotation annotation : worse)
                    m.removeAnnotation(annotation);
            }
        }
    }

    edit.end();

    getController().getUndoManager().addEdit(edit);
    update(getSelection());

}

From source file:com.candy.middle.FinReportDownload.java

private void getFundaFromNet(String company) {
    // ("http://www.sec.gov/ +  Archives/edgar/data/51143/000104746914001302/0001047469-14-001302-index.htm");
    List<String> filingLst = getCompany10KFilingList(company);
    double beforePg = 0.2;
    double afterPg = 0.8;
    updateProgress(beforePg);//w  w  w .  j  av a 2 s.  c o m
    if (filingLst != null) {
        // create folder if not exist
        String folder = System.getProperty("user.dir") + "/secfiles";
        Path path = FileSystems.getDefault().getPath(folder);
        if (Files.notExists(path)) {
            File fileDir = new File(folder);
            fileDir.mkdirs();
        }
        // we have filing url, need to check each filing
        int numFiling = filingLst.size();
        int currFiling = 0;
        for (String filingUrl : filingLst) {
            currFiling++;
            updateProgress(beforePg + (afterPg - beforePg) * currFiling / numFiling);
            // key = filename, value = http relative path
            Map<String, String> xbrlLst = getXbrlFileSet(filingUrl);
            // is it in DB?
            if (xbrlLst != null && !xbrlLst.isEmpty()) {
                // test any file
                String fileName = xbrlLst.keySet().iterator().next();
                String fnStr = verifyXbrlFile(fileName);
                if (fnStr != null) {
                    if (isRecInDBbyFileName(company, fileName)) {
                        System.out.println("WARN - record exist " + fileName);
                        continue;
                    }
                } else {
                    continue; // invalid filename
                }

            } else {
                System.out.println("ERROR - SEC filling url contains empty fileset " + filingUrl);
                continue;
            }

            // not in db, download from net                
            for (Map.Entry pair : xbrlLst.entrySet()) {
                String fullPath = folder + "/" + pair.getKey();
                if (new File(fullPath).canRead()) {
                    System.out.println("INFO - the file " + pair.getValue() + " exist");
                } else {
                    // download it
                    if (!httpDownload("http://www.sec.gov" + pair.getValue(), fullPath)) {
                        System.out.println("ERROR - unable download " + pair.getValue());
                    } else {
                        System.out.println("DONE - downloaded to " + fullPath);
                    }
                }
            }
            // verify xbrl files
            xset.reset();
            Multimap<REPORT_TYPE, Report> reports = null;
            boolean validXbrlSet = false;
            for (Map.Entry pair : xbrlLst.entrySet()) {
                String fullPath = folder + "/" + pair.getKey();
                if (xset.verifyXbrlFile(fullPath)) {
                    reports = xbrlParser.parse(xset);
                    // save all xbrl mapping to hashmap
                    for (Report rp : reports.values()) {
                        for (XbrlParser.Report.IdNameValue item : rp.getIdNameValues()) {
                            xbrlMapping.put(item.getIdHref(), item.getDisplay());
                        }
                    }
                    writeToDB(company, reports, xset.getDateStr());
                    validXbrlSet = true;
                    break;
                }
            }
            if (!validXbrlSet) {
                for (Map.Entry pair : xbrlLst.entrySet()) {
                    System.out.println("ERROR - the xbrl file " + pair.getKey() + " is invalid");
                }
            }
        } // end for
          // write xbrl2display to DB
        xbrl2DisplayProc.writeMultiRecords(xbrlMapping);
    }
}

From source file:net.shibboleth.idp.saml.impl.attribute.mapping.RequestedAttributesMapper.java

/**
 * Constructor to create the mapping from an existing resolver. <br/>
 * This code inverts the {@link AttributeEncoder} (internal attribute -> SAML Attributes) into
 * {@link AttributeMapper} (SAML [RequestedAttributes] -> internal [Requested] Attributes). <br/>
 * to generate the {@link AbstractSAMLAttributeMapper} (with no
 * {@link AbstractSAMLAttributeMapper#getAttributeIds(). These are accumulated into a {@link Multimap}, where the
 * key is the {@link AbstractSAMLAttributeMapper} and the values are the (IdP) attribute names. The collection of
 * {@link AttributeMapper}s can then be extracted from the map, and the appropriate internal names added (these
 * being the value of the {@link Multimap})
 * //from   ww  w .j ava2s  .c om
 * @param resolver The resolver
 */
public RequestedAttributesMapper(AttributeResolver resolver) {

    super();
    setId(resolver.getId());

    final Multimap<AbstractSAMLAttributeMapper<RequestedAttribute, IdPRequestedAttribute>, String> theMappers;

    theMappers = HashMultimap.create();

    for (AttributeDefinition attributeDef : resolver.getAttributeDefinitions().values()) {
        for (AttributeEncoder encode : attributeDef.getAttributeEncoders()) {
            if (encode instanceof AttributeMapperFactory) {
                // There is an appropriate reverse mappers
                AttributeMapperFactory factory = (AttributeMapperFactory) encode;
                AbstractSAMLAttributeMapper<RequestedAttribute, IdPRequestedAttribute> mapper = factory
                        .getRequestedMapper();

                theMappers.put(mapper, attributeDef.getId());
            }
        }
    }

    final List<AttributeMapper<RequestedAttribute, IdPRequestedAttribute>> mappers = new ArrayList<AttributeMapper<RequestedAttribute, IdPRequestedAttribute>>(
            theMappers.values().size());

    for (Entry<AbstractSAMLAttributeMapper<RequestedAttribute, IdPRequestedAttribute>, Collection<String>> entry : theMappers
            .asMap().entrySet()) {

        AbstractSAMLAttributeMapper<RequestedAttribute, IdPRequestedAttribute> mapper = entry.getKey();
        mapper.setAttributeIds(new ArrayList<String>(entry.getValue()));
        mappers.add(mapper);
    }

    setMappers(mappers);
}

From source file:ca.sqlpower.dao.SPSessionPersister.java

public static void undoForSession(SPObject root, List<PersistedSPObject> creations,
        Multimap<String, PersistedSPOProperty> properties, List<RemovedObjectEntry> removals,
        SessionPersisterSuperConverter converter) throws SPPersistenceException {

    List<PersistedObjectEntry> c = new LinkedList<PersistedObjectEntry>();
    List<PersistedPropertiesEntry> p = new LinkedList<PersistedPropertiesEntry>();
    LinkedHashMap<String, RemovedObjectEntry> r = new LinkedHashMap<String, RemovedObjectEntry>();

    for (PersistedSPObject pso : creations) {
        c.add(new PersistedObjectEntry(pso.getParentUUID(), pso.getUUID()));
    }//from   ww  w .j  a  va2s . c  o m
    for (PersistedSPOProperty property : properties.values()) {
        p.add(new PersistedPropertiesEntry(property.getUUID(), property.getPropertyName(),
                property.getDataType(), property.getOldValue()));
    }
    for (RemovedObjectEntry removal : removals) {
        r.put(removal.getRemovedChild().getUUID(), removal);
    }

    undoForSession(root, c, p, r, converter);
}

From source file:com.google.eclipse.protobuf.scoping.ProtobufImportScope.java

@Override
protected Iterable<IEObjectDescription> getAliasedElements(Iterable<IEObjectDescription> candidates) {
    Multimap<QualifiedName, IEObjectDescription> keyToDescription = LinkedHashMultimap.create();
    Multimap<QualifiedName, ImportNormalizer> keyToNormalizer = HashMultimap.create();

    for (IEObjectDescription imported : candidates) {
        QualifiedName fullyQualifiedName = imported.getName();
        for (ImportNormalizer normalizer : normalizers) {
            QualifiedName alias = normalizer.deresolve(fullyQualifiedName);
            if (alias != null) {
                QualifiedName key = alias;
                if (isIgnoreCase()) {
                    key = key.toLowerCase();
                }//from w  w w.  ja  v  a  2 s  .c  o m
                keyToDescription.put(key, new AliasedEObjectDescription(alias, imported));
                keyToNormalizer.put(key, normalizer);
            }
        }
    }
    for (QualifiedName name : keyToNormalizer.keySet()) {
        if (keyToNormalizer.get(name).size() > 1)
            keyToDescription.removeAll(name);
    }
    return keyToDescription.values();
}

From source file:brooklyn.entity.nosql.cassandra.CassandraDatacenterImpl.java

@Override
public void init() {
    super.init();

    /*//from  ww w  . j ava 2 s. c  o  m
     * subscribe to hostname, and keep an accurate set of current seeds in a sensor;
     * then at nodes we set the initial seeds to be the current seeds when ready (non-empty)
     */
    subscribeToMembers(this, Attributes.HOSTNAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            seedTracker.onHostnameChanged(event.getSource(), event.getValue());
        }
    });
    subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
        @Override
        public void onEvent(SensorEvent<Entity> event) {
            seedTracker.onMemberRemoved(event.getValue());
        }
    });
    subscribeToMembers(this, Attributes.SERVICE_UP, new SensorEventListener<Boolean>() {
        @Override
        public void onEvent(SensorEvent<Boolean> event) {
            seedTracker.onServiceUpChanged(event.getSource(), event.getValue());
        }
    });
    subscribeToMembers(this, Attributes.SERVICE_STATE_ACTUAL, new SensorEventListener<Lifecycle>() {
        @Override
        public void onEvent(SensorEvent<Lifecycle> event) {
            // trigger a recomputation also when lifecycle state changes, 
            // because it might not have ruled a seed as inviable when service up went true 
            // because service state was not yet running
            seedTracker.onServiceUpChanged(event.getSource(), Lifecycle.RUNNING == event.getValue());
        }
    });

    // Track the datacenters for this cluster
    subscribeToMembers(this, CassandraNode.DATACENTER_NAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            Entity member = event.getSource();
            String dcName = event.getValue();
            if (dcName != null) {
                Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
                Multimap<String, Entity> mutableDatacenterUsage = (datacenterUsage == null)
                        ? LinkedHashMultimap.<String, Entity>create()
                        : LinkedHashMultimap.create(datacenterUsage);
                Optional<String> oldDcName = getKeyOfVal(mutableDatacenterUsage, member);
                if (!(oldDcName.isPresent() && dcName.equals(oldDcName.get()))) {
                    mutableDatacenterUsage.values().remove(member);
                    mutableDatacenterUsage.put(dcName, member);
                    setAttribute(DATACENTER_USAGE, mutableDatacenterUsage);
                    setAttribute(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
                }
            }
        }

        private <K, V> Optional<K> getKeyOfVal(Multimap<K, V> map, V val) {
            for (Map.Entry<K, V> entry : map.entries()) {
                if (Objects.equal(val, entry.getValue())) {
                    return Optional.of(entry.getKey());
                }
            }
            return Optional.absent();
        }
    });
    subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
        @Override
        public void onEvent(SensorEvent<Entity> event) {
            Entity entity = event.getSource();
            Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
            if (datacenterUsage != null && datacenterUsage.containsValue(entity)) {
                Multimap<String, Entity> mutableDatacenterUsage = LinkedHashMultimap.create(datacenterUsage);
                mutableDatacenterUsage.values().remove(entity);
                setAttribute(DATACENTER_USAGE, mutableDatacenterUsage);
                setAttribute(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
            }
        }
    });

    getMutableEntityType().addEffector(EXECUTE_SCRIPT, new EffectorBody<String>() {
        @Override
        public String call(ConfigBag parameters) {
            return executeScript((String) parameters.getStringKey("commands"));
        }
    });
}

From source file:org.apache.brooklyn.entity.nosql.cassandra.CassandraDatacenterImpl.java

@Override
public void init() {
    super.init();

    /*//from ww w  .j  a  va2  s  .c  o  m
     * subscribe to hostname, and keep an accurate set of current seeds in a sensor;
     * then at nodes we set the initial seeds to be the current seeds when ready (non-empty)
     */
    subscriptions().subscribeToMembers(this, Attributes.HOSTNAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            seedTracker.onHostnameChanged(event.getSource(), event.getValue());
        }
    });
    subscriptions().subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
        @Override
        public void onEvent(SensorEvent<Entity> event) {
            seedTracker.onMemberRemoved(event.getValue());
        }
    });
    subscriptions().subscribeToMembers(this, Attributes.SERVICE_UP, new SensorEventListener<Boolean>() {
        @Override
        public void onEvent(SensorEvent<Boolean> event) {
            seedTracker.onServiceUpChanged(event.getSource(), event.getValue());
        }
    });
    subscriptions().subscribeToMembers(this, Attributes.SERVICE_STATE_ACTUAL,
            new SensorEventListener<Lifecycle>() {
                @Override
                public void onEvent(SensorEvent<Lifecycle> event) {
                    // trigger a recomputation also when lifecycle state changes, 
                    // because it might not have ruled a seed as inviable when service up went true 
                    // because service state was not yet running
                    seedTracker.onServiceUpChanged(event.getSource(), Lifecycle.RUNNING == event.getValue());
                }
            });

    // Track the datacenters for this cluster
    subscriptions().subscribeToMembers(this, CassandraNode.DATACENTER_NAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            Entity member = event.getSource();
            String dcName = event.getValue();
            if (dcName != null) {
                Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
                Multimap<String, Entity> mutableDatacenterUsage = (datacenterUsage == null)
                        ? LinkedHashMultimap.<String, Entity>create()
                        : LinkedHashMultimap.create(datacenterUsage);
                Optional<String> oldDcName = getKeyOfVal(mutableDatacenterUsage, member);
                if (!(oldDcName.isPresent() && dcName.equals(oldDcName.get()))) {
                    mutableDatacenterUsage.values().remove(member);
                    mutableDatacenterUsage.put(dcName, member);
                    sensors().set(DATACENTER_USAGE, mutableDatacenterUsage);
                    sensors().set(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
                }
            }
        }

        private <K, V> Optional<K> getKeyOfVal(Multimap<K, V> map, V val) {
            for (Map.Entry<K, V> entry : map.entries()) {
                if (Objects.equal(val, entry.getValue())) {
                    return Optional.of(entry.getKey());
                }
            }
            return Optional.absent();
        }
    });
    subscriptions().subscribe(this, DynamicGroup.MEMBER_REMOVED, new SensorEventListener<Entity>() {
        @Override
        public void onEvent(SensorEvent<Entity> event) {
            Entity entity = event.getSource();
            Multimap<String, Entity> datacenterUsage = getAttribute(DATACENTER_USAGE);
            if (datacenterUsage != null && datacenterUsage.containsValue(entity)) {
                Multimap<String, Entity> mutableDatacenterUsage = LinkedHashMultimap.create(datacenterUsage);
                mutableDatacenterUsage.values().remove(entity);
                sensors().set(DATACENTER_USAGE, mutableDatacenterUsage);
                sensors().set(DATACENTERS, Sets.newLinkedHashSet(mutableDatacenterUsage.keySet()));
            }
        }
    });

    getMutableEntityType().addEffector(EXECUTE_SCRIPT, new EffectorBody<String>() {
        @Override
        public String call(ConfigBag parameters) {
            return executeScript((String) parameters.getStringKey("commands"));
        }
    });
}

From source file:org.apache.hadoop.hive.ql.exec.tez.CustomPartitionVertex.java

@Override
public void onRootVertexInitialized(String inputName, InputDescriptor inputDescriptor, List<Event> events) {
    numInputsSeenSoFar++;/*  w  ww  .  j ava2s.c om*/
    LOG.info("On root vertex initialized " + inputName);
    try {
        // This is using the payload from the RootVertexInitializer corresponding
        // to InputName. Ideally it should be using it's own configuration class -
        // but that
        // means serializing another instance.
        MRInputUserPayloadProto protoPayload = MRInputHelpers
                .parseMRInputPayload(inputDescriptor.getUserPayload());
        this.conf = TezUtils.createConfFromByteString(protoPayload.getConfigurationBytes());

        /*
         * Currently in tez, the flow of events is thus:
         * "Generate Splits -> Initialize Vertex" (with parallelism info obtained
         * from the generate splits phase). The generate splits phase groups
         * splits using the TezGroupedSplitsInputFormat. However, for bucket map
         * joins the grouping done by this input format results in incorrect
         * results as the grouper has no knowledge of buckets. So, we initially
         * set the input format to be HiveInputFormat (in DagUtils) for the case
         * of bucket map joins so as to obtain un-grouped splits. We then group
         * the splits corresponding to buckets using the tez grouper which returns
         * TezGroupedSplits.
         */

        // This assumes that Grouping will always be used.
        // Enabling grouping on the payload.
        MRInputUserPayloadProto updatedPayload = MRInputUserPayloadProto.newBuilder(protoPayload)
                .setGroupingEnabled(true).build();
        inputDescriptor
                .setUserPayload(UserPayload.create(updatedPayload.toByteString().asReadOnlyByteBuffer()));
    } catch (IOException e) {
        e.printStackTrace();
        throw new RuntimeException(e);
    }

    boolean dataInformationEventSeen = false;
    Map<String, Set<FileSplit>> pathFileSplitsMap = new TreeMap<String, Set<FileSplit>>();

    for (Event event : events) {
        if (event instanceof InputConfigureVertexTasksEvent) {
            // No tasks should have been started yet. Checked by initial state
            // check.
            LOG.info("Got a input configure vertex event for input: " + inputName);
            Preconditions.checkState(dataInformationEventSeen == false);
            InputConfigureVertexTasksEvent cEvent = (InputConfigureVertexTasksEvent) event;

            // The vertex cannot be configured until all DataEvents are seen - to
            // build the routing table.
            configureVertexTaskEvent = cEvent;
            LOG.info("Configure task for input name: " + inputName + " num tasks: "
                    + configureVertexTaskEvent.getNumTasks());
        }
        if (event instanceof InputUpdatePayloadEvent) {
            // this event can never occur. If it does, fail.
            Preconditions.checkState(false);
        } else if (event instanceof InputDataInformationEvent) {
            dataInformationEventSeen = true;
            InputDataInformationEvent diEvent = (InputDataInformationEvent) event;
            FileSplit fileSplit;
            try {
                fileSplit = getFileSplitFromEvent(diEvent);
            } catch (IOException e) {
                throw new RuntimeException("Failed to get file split for event: " + diEvent, e);
            }
            Set<FileSplit> fsList = pathFileSplitsMap
                    .get(Utilities.getBucketFileNameFromPathSubString(fileSplit.getPath().getName()));
            if (fsList == null) {
                fsList = new TreeSet<FileSplit>(new PathComparatorForSplit());
                pathFileSplitsMap.put(
                        Utilities.getBucketFileNameFromPathSubString(fileSplit.getPath().getName()), fsList);
            }
            fsList.add(fileSplit);
        }
    }

    LOG.info("Path file splits map for input name: " + inputName + " is " + pathFileSplitsMap);

    Multimap<Integer, InputSplit> bucketToInitialSplitMap = getBucketSplitMapForPath(pathFileSplitsMap);

    try {
        int totalResource = context.getTotalAvailableResource().getMemory();
        int taskResource = context.getVertexTaskResource().getMemory();
        float waves = conf.getFloat(TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES,
                TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES_DEFAULT);

        int availableSlots = totalResource / taskResource;

        LOG.info("Grouping splits. " + availableSlots + " available slots, " + waves
                + " waves. Bucket initial splits map: " + bucketToInitialSplitMap);
        JobConf jobConf = new JobConf(conf);
        ShimLoader.getHadoopShims().getMergedCredentials(jobConf);

        Multimap<Integer, InputSplit> bucketToGroupedSplitMap = HashMultimap.<Integer, InputSplit>create();
        boolean secondLevelGroupingDone = false;
        if ((mainWorkName.isEmpty()) || (inputName.compareTo(mainWorkName) == 0)) {
            SplitLocationProvider splitLocationProvider = Utils.getSplitLocationProvider(conf, LOG);
            for (Integer key : bucketToInitialSplitMap.keySet()) {
                InputSplit[] inputSplitArray = (bucketToInitialSplitMap.get(key).toArray(new InputSplit[0]));
                Multimap<Integer, InputSplit> groupedSplit = grouper.generateGroupedSplits(jobConf, conf,
                        inputSplitArray, waves, availableSlots, inputName, mainWorkName.isEmpty(),
                        splitLocationProvider);
                if (mainWorkName.isEmpty() == false) {
                    Multimap<Integer, InputSplit> singleBucketToGroupedSplit = HashMultimap
                            .<Integer, InputSplit>create();
                    singleBucketToGroupedSplit.putAll(key, groupedSplit.values());
                    groupedSplit = grouper.group(jobConf, singleBucketToGroupedSplit, availableSlots,
                            HiveConf.getFloatVar(conf, HiveConf.ConfVars.TEZ_SMB_NUMBER_WAVES), null);
                    secondLevelGroupingDone = true;
                }
                bucketToGroupedSplitMap.putAll(key, groupedSplit.values());
            }
            processAllEvents(inputName, bucketToGroupedSplitMap, secondLevelGroupingDone);
        } else {
            SplitLocationProvider splitLocationProvider = Utils.getSplitLocationProvider(conf, LOG);
            // do not group across files in case of side work because there is only 1 KV reader per
            // grouped split. This would affect SMB joins where we want to find the smallest key in
            // all the bucket files.
            for (Integer key : bucketToInitialSplitMap.keySet()) {
                InputSplit[] inputSplitArray = (bucketToInitialSplitMap.get(key).toArray(new InputSplit[0]));
                Multimap<Integer, InputSplit> groupedSplit = grouper.generateGroupedSplits(jobConf, conf,
                        inputSplitArray, waves, availableSlots, inputName, false, splitLocationProvider);
                bucketToGroupedSplitMap.putAll(key, groupedSplit.values());
            }
            /*
             * this is the small table side. In case of SMB join, we need to send each split to the
             * corresponding bucket-based task on the other side. In case a split needs to go to
             * multiple downstream tasks, we need to clone the event and send it to the right
             * destination.
             */
            LOG.info("This is the side work - multi-mr work.");
            processAllSideEventsSetParallelism(inputName, bucketToGroupedSplitMap);
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}