Example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

List of usage examples for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair.

Prototype

public ImmutablePair(final L left, final R right) 

Source Link

Document

Create a new pair instance.

Usage

From source file:com.streamsets.pipeline.stage.it.DriftIT.java

public void testPartitionMismatch(List<PartitionConfig> partitionConfigList) throws Exception {
    HiveMetadataProcessorBuilder processorBuilder = new HiveMetadataProcessorBuilder().table("tbl_partition");
    if (!partitionConfigList.isEmpty()) {
        processorBuilder.partitions(partitionConfigList);
    }/*from  w  ww. ja  v  a2 s  .  c o  m*/
    HiveMetadataProcessor processor = processorBuilder.build();

    HiveMetastoreTarget hiveTarget = new HiveMetastoreTargetBuilder().build();
    List<Record> records = new LinkedList<>();

    Map<String, Field> map = new LinkedHashMap<>();
    map.put("city", Field.create("San Jose"));
    Record record = RecordCreator.create();
    record.set(Field.create(map));
    records.add(record);

    try {
        processRecords(processor, hiveTarget, records);
        Assert.fail("Specifying no partitions to partitioned table should fail");
    } catch (StageException e) {
        Assert.assertEquals("Error codes mismatch", Errors.HIVE_27, e.getErrorCode());
    }

    assertQueryResult("select * from tbl_partition", new QueryValidator() {
        @Override
        public void validateResultSet(ResultSet rs) throws Exception {
            // Table structure should not be altered
            assertResultSetStructure(rs, new ImmutablePair("tbl_partition.city", Types.VARCHAR),
                    new ImmutablePair("tbl_partition.dt1", Types.VARCHAR),
                    new ImmutablePair("tbl_partition.dt2", Types.VARCHAR));
            // Alter Table query failed, so no data should be added to the table
            Assert.assertFalse("Table tbl_partition should not contain rows", rs.next());
        }
    });
}

From source file:blusunrize.immersiveengineering.api.energy.wires.ImmersiveNetHandler.java

public Set<AbstractConnection> getIndirectEnergyConnections(BlockPos node, World world,
        boolean ignoreIsEnergyOutput) {
    int dimension = world.provider.getDimension();
    if (!ignoreIsEnergyOutput && indirectConnections.containsKey(dimension)
            && indirectConnections.get(dimension).containsKey(node))
        return indirectConnections.get(dimension).get(node);
    else if (ignoreIsEnergyOutput && indirectConnectionsIgnoreOut.containsKey(dimension)
            && indirectConnectionsIgnoreOut.get(dimension).containsKey(node))
        return indirectConnectionsIgnoreOut.get(dimension).get(node);

    PriorityQueue<Pair<IImmersiveConnectable, Float>> queue = new PriorityQueue<>(
            Comparator.comparingDouble(Pair::getRight));
    Set<AbstractConnection> closedList = newSetFromMap(new ConcurrentHashMap<AbstractConnection, Boolean>());
    List<BlockPos> checked = new ArrayList<>();
    HashMap<BlockPos, BlockPos> backtracker = new HashMap<>();

    checked.add(node);//from ww  w . j  a v a2 s . c om
    Set<Connection> conL = getConnections(world, node);
    if (conL != null)
        for (Connection con : conL) {
            IImmersiveConnectable end = toIIC(con.end, world);
            if (end != null) {
                queue.add(new ImmutablePair<>(end, con.getBaseLoss()));
                backtracker.put(con.end, node);
            }
        }

    IImmersiveConnectable next;
    final int closedListMax = 1200;

    while (closedList.size() < closedListMax && !queue.isEmpty()) {
        Pair<IImmersiveConnectable, Float> pair = queue.poll();
        next = pair.getLeft();
        float loss = pair.getRight();
        BlockPos nextPos = toBlockPos(next);
        if (!checked.contains(nextPos) && queue.stream().noneMatch((p) -> p.getLeft().equals(nextPos))) {
            boolean isOutput = next.isEnergyOutput();
            if (ignoreIsEnergyOutput || isOutput) {
                BlockPos last = toBlockPos(next);
                WireType minimumType = null;
                int distance = 0;
                List<Connection> connectionParts = new ArrayList<>();
                while (last != null) {
                    BlockPos prev = last;
                    last = backtracker.get(last);
                    if (last != null) {

                        Set<Connection> conLB = getConnections(world, last);
                        if (conLB != null)
                            for (Connection conB : conLB)
                                if (conB.end.equals(prev)) {
                                    connectionParts.add(0, conB);
                                    distance += conB.length;
                                    if (minimumType == null
                                            || conB.cableType.getTransferRate() < minimumType.getTransferRate())
                                        minimumType = conB.cableType;
                                    break;
                                }
                    }
                }
                closedList.add(new AbstractConnection(toBlockPos(node), toBlockPos(next), minimumType, distance,
                        isOutput, connectionParts.toArray(new Connection[connectionParts.size()])));
            }

            Set<Connection> conLN = getConnections(world, toBlockPos(next));
            if (conLN != null)
                for (Connection con : conLN)
                    if (next.allowEnergyToPass(con)) {
                        IImmersiveConnectable end = toIIC(con.end, world);

                        Optional<Pair<IImmersiveConnectable, Float>> existing = queue.stream()
                                .filter((p) -> p.getLeft() == end).findAny();
                        float newLoss = con.getBaseLoss() + loss;
                        if (end != null && !checked.contains(con.end)
                                && existing.map(Pair::getRight).orElse(Float.MAX_VALUE) > newLoss) {
                            existing.ifPresent(p1 -> queue.removeIf((p2) -> p1.getLeft() == p2.getLeft()));
                            queue.add(new ImmutablePair<>(end, newLoss));
                            backtracker.put(con.end, toBlockPos(next));
                        }
                    }
            checked.add(toBlockPos(next));
        }
    }
    if (FMLCommonHandler.instance().getEffectiveSide() == Side.SERVER) {
        if (ignoreIsEnergyOutput) {
            if (!indirectConnectionsIgnoreOut.containsKey(dimension))
                indirectConnectionsIgnoreOut.put(dimension, new ConcurrentHashMap<>());
            Map<BlockPos, Set<AbstractConnection>> conns = indirectConnectionsIgnoreOut.get(dimension);
            if (!conns.containsKey(node))
                conns.put(node, newSetFromMap(new ConcurrentHashMap<>()));
            conns.get(node).addAll(closedList);
        } else {
            if (!indirectConnections.containsKey(dimension))
                indirectConnections.put(dimension, new ConcurrentHashMap<>());
            Map<BlockPos, Set<AbstractConnection>> conns = indirectConnections.get(dimension);
            if (!conns.containsKey(node))
                conns.put(node, newSetFromMap(new ConcurrentHashMap<>()));
            conns.get(node).addAll(closedList);
        }
    }
    return closedList;
}

From source file:com.evolveum.midpoint.model.impl.integrity.ShadowIntegrityCheckResultHandler.java

private void checkShadow(ShadowCheckResult checkResult, PrismObject<ShadowType> shadow, Task workerTask,
        OperationResult result) throws SchemaException {
    ShadowType shadowType = shadow.asObjectable();
    ObjectReferenceType resourceRef = shadowType.getResourceRef();
    if (LOGGER.isTraceEnabled()) {
        LOGGER.trace("Checking shadow {} (resource {})", ObjectTypeUtil.toShortString(shadowType),
                resourceRef != null ? resourceRef.getOid() : "(null)");
    }//from   ww  w  .  ja v  a  2  s. c  om
    statistics.incrementShadows();

    if (resourceRef == null) {
        checkResult.recordError(Statistics.NO_RESOURCE_OID, new SchemaException("No resourceRef"));
        return;
    }
    String resourceOid = resourceRef.getOid();
    if (resourceOid == null) {
        checkResult.recordError(Statistics.NO_RESOURCE_OID, new SchemaException("Null resource OID"));
        return;
    }
    PrismObject<ResourceType> resource = resources.get(resourceOid);
    if (resource == null) {
        statistics.incrementResources();
        try {
            resource = provisioningService.getObject(ResourceType.class, resourceOid, null, workerTask, result);
        } catch (ObjectNotFoundException e) {
            checkResult.recordError(Statistics.CANNOT_GET_RESOURCE,
                    new ObjectNotFoundException("Resource object does not exist: " + e.getMessage(), e));
            return;
        } catch (SchemaException e) {
            checkResult.recordError(Statistics.CANNOT_GET_RESOURCE,
                    new ObjectNotFoundException("Resource object has schema problems: " + e.getMessage(), e));
            return;
        } catch (CommonException | RuntimeException e) {
            checkResult.recordError(Statistics.CANNOT_GET_RESOURCE, new ObjectNotFoundException(
                    "Resource object cannot be fetched for some reason: " + e.getMessage(), e));
            return;
        }
        resources.put(resourceOid, resource);
    }
    checkResult.setResource(resource);

    ShadowKindType kind = shadowType.getKind();
    if (kind == null) {
        // TODO or simply assume account?
        checkResult.recordError(Statistics.NO_KIND_SPECIFIED, new SchemaException("No kind specified"));
        return;
    }

    if (checkExtraData) {
        checkOrFixShadowActivationConsistency(checkResult, shadow, fixExtraData);
    }

    PrismObject<ShadowType> fetchedShadow = null;
    if (checkFetch) {
        fetchedShadow = fetchShadow(checkResult, shadow, resource, workerTask, result);
        if (fetchedShadow != null) {
            shadow.setUserData(KEY_EXISTS_ON_RESOURCE, "true");
        }
    }

    if (checkOwners) {
        List<PrismObject> owners = searchOwners(shadow, result);
        if (owners != null) {
            shadow.setUserData(KEY_OWNERS, owners);
            if (owners.size() > 1) {
                checkResult.recordError(Statistics.MULTIPLE_OWNERS,
                        new SchemaException("Multiple owners: " + owners));
            }
        }

        if (shadowType.getSynchronizationSituation() == SynchronizationSituationType.LINKED
                && (owners == null || owners.isEmpty())) {
            checkResult.recordError(Statistics.LINKED_WITH_NO_OWNER,
                    new SchemaException("Linked shadow with no owner"));
        }
        if (shadowType.getSynchronizationSituation() != SynchronizationSituationType.LINKED && owners != null
                && !owners.isEmpty()) {
            checkResult.recordError(Statistics.NOT_LINKED_WITH_OWNER,
                    new SchemaException("Shadow with an owner but not marked as linked (marked as "
                            + shadowType.getSynchronizationSituation() + ")"));
        }
    }

    String intent = shadowType.getIntent();
    if (checkIntents && (intent == null || intent.isEmpty())) {
        checkResult.recordWarning(Statistics.NO_INTENT_SPECIFIED, "None or empty intent");
    }
    if (fixIntents && (intent == null || intent.isEmpty())) {
        doFixIntent(checkResult, fetchedShadow, shadow, resource, workerTask, result);
    }

    Pair<String, ShadowKindType> key = new ImmutablePair<>(resourceOid, kind);
    ObjectTypeContext context = contextMap.get(key);
    if (context == null) {
        context = new ObjectTypeContext();
        context.setResource(resource);
        RefinedResourceSchema resourceSchema;
        try {
            resourceSchema = RefinedResourceSchema.getRefinedSchema(context.getResource(), LayerType.MODEL,
                    prismContext);
        } catch (SchemaException e) {
            checkResult.recordError(Statistics.CANNOT_GET_REFINED_SCHEMA,
                    new SchemaException("Couldn't derive resource schema: " + e.getMessage(), e));
            return;
        }
        if (resourceSchema == null) {
            checkResult.recordError(Statistics.NO_RESOURCE_REFINED_SCHEMA,
                    new SchemaException("No resource schema"));
            return;
        }
        context.setObjectClassDefinition(resourceSchema.getRefinedDefinition(kind, shadowType));
        if (context.getObjectClassDefinition() == null) {
            // TODO or warning only?
            checkResult.recordError(Statistics.NO_OBJECT_CLASS_REFINED_SCHEMA, new SchemaException(
                    "No refined object class definition for kind=" + kind + ", intent=" + intent));
            return;
        }
        contextMap.put(key, context);
    }

    try {
        provisioningService.applyDefinition(shadow, result);
    } catch (SchemaException | ObjectNotFoundException | CommunicationException | ConfigurationException e) {
        checkResult.recordError(Statistics.OTHER_FAILURE,
                new SystemException("Couldn't apply definition to shadow from repo", e));
        return;
    }

    Set<RefinedAttributeDefinition<?>> identifiers = new HashSet<>();
    Collection<? extends RefinedAttributeDefinition<?>> primaryIdentifiers = context.getObjectClassDefinition()
            .getIdentifiers();
    identifiers.addAll(primaryIdentifiers);
    identifiers.addAll(context.getObjectClassDefinition().getSecondaryIdentifiers());

    PrismContainer<ShadowAttributesType> attributesContainer = shadow.findContainer(ShadowType.F_ATTRIBUTES);
    if (attributesContainer == null) {
        // might happen on unfinished shadows?
        checkResult.recordError(Statistics.OTHER_FAILURE, new SchemaException("No attributes container"));
        return;
    }

    for (RefinedAttributeDefinition<?> identifier : identifiers) {
        PrismProperty property = attributesContainer.getValue().findProperty(identifier.getName());
        if (property == null || property.size() == 0) {
            checkResult.recordWarning(Statistics.OTHER_FAILURE,
                    "No value for identifier " + identifier.getName());
            continue;
        }
        if (property.size() > 1) {
            // we don't expect multi-valued identifiers
            checkResult.recordError(Statistics.OTHER_FAILURE, new SchemaException("Multi-valued identifier "
                    + identifier.getName() + " with values " + property.getValues()));
            continue;
        }
        // size == 1
        String value = (String) property.getValue().getValue();
        if (value == null) {
            checkResult.recordWarning(Statistics.OTHER_FAILURE,
                    "Null value for identifier " + identifier.getName());
            continue;
        }
        if (checkUniqueness) {
            if (!checkDuplicatesOnPrimaryIdentifiersOnly || primaryIdentifiers.contains(identifier)) {
                addIdentifierValue(checkResult, context, identifier.getName(), value, shadow);
            }
        }
        if (checkNormalization) {
            doCheckNormalization(checkResult, identifier, value, context);
        }
    }

    if (checkResult.getFixDeltas().size() > 0) {
        try {
            applyFix(checkResult, shadow, workerTask, result);
            checkResult.setFixApplied(true);
        } catch (CommonException e) {
            checkResult.recordError(Statistics.CANNOT_APPLY_FIX,
                    new SystemException("Couldn't apply the shadow fix", e));
            return;
        }
    }
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

/**
 * If scale is ongoing, try to delete the epoch node.
 *
 * @param epoch epoch//w ww . j  a va2  s.  c  o  m
 * @return true if we are able to delete the epoch, false otherwise.
 */
@Override
public CompletableFuture<Boolean> scaleTryDeleteEpoch(final int epoch) {
    return getHistoryTableFromStore()
            .thenCompose(historyTable -> getSegmentTableFromStore()
                    .thenApply(segmentTable -> new ImmutablePair<>(historyTable, segmentTable)))
            .thenCompose(pair -> {
                Data<T> segmentTable = pair.getRight();
                Data<T> historyTable = pair.getLeft();
                CompletableFuture<Boolean> result = new CompletableFuture<>();

                if (TableHelper.isScaleOngoing(historyTable.getData(), segmentTable.getData())) {
                    deleteEpochNode(epoch).whenComplete((r, e) -> {
                        if (e != null) {
                            Throwable ex = ExceptionHelpers.getRealException(e);
                            if (ex instanceof StoreException.DataNotEmptyException) {
                                // cant delete as there are transactions still running under epoch node
                                result.complete(false);
                            } else {
                                result.completeExceptionally(ex);
                            }
                        } else {
                            result.complete(true);
                        }
                    });
                } else {
                    result.complete(false);
                }
                return result;
            });
}

From source file:io.pravega.controller.store.stream.tables.TableHelper.java

/**
 * Return the active epoch.//from  w  ww .jav a 2s  .  c  o m
 * @param historyTableData history table
 * @return active epoch
 */
public static Pair<Integer, List<Integer>> getLatestEpoch(byte[] historyTableData) {
    HistoryRecord historyRecord = HistoryRecord.readLatestRecord(historyTableData, false).get();
    return new ImmutablePair<>(historyRecord.getEpoch(), historyRecord.getSegments());
}

From source file:io.pravega.controller.store.stream.tables.TableHelper.java

/**
 * Method to compute segments created and deleted in latest scale event.
 *
 * @param historyTable history table// w  w w .ja  v a2 s  . c om
 * @return pair of segments sealed and segments created in last scale event.
 */
public static Pair<List<Integer>, List<Integer>> getLatestScaleData(final byte[] historyTable) {
    final Optional<HistoryRecord> current = HistoryRecord.readLatestRecord(historyTable, false);
    ImmutablePair<List<Integer>, List<Integer>> result;
    if (current.isPresent()) {
        final Optional<HistoryRecord> previous = HistoryRecord.fetchPrevious(current.get(), historyTable);
        result = previous
                .map(historyRecord -> new ImmutablePair<>(
                        diff(historyRecord.getSegments(), current.get().getSegments()),
                        diff(current.get().getSegments(), historyRecord.getSegments())))
                .orElseGet(() -> new ImmutablePair<>(Collections.emptyList(), current.get().getSegments()));
    } else {
        result = new ImmutablePair<>(Collections.emptyList(), Collections.emptyList());
    }
    return result;
}

From source file:com.intuit.wasabi.repository.cassandra.impl.CassandraAssignmentsRepository.java

/**
 * Populate existing user assignments for given user, application & context.
 * This method make use of provided experimentMap to eliminate the call to database to fetch experiment object.
 *
 * @param userID        User Id/*from w  ww.  jav a  2 s .  co m*/
 * @param appLabel      Application Label
 * @param context       Environment context
 * @param experimentMap experiment map to fetch experiment label
 * @return List of assignments in term of pair of Experiment & Bucket label.
 */
@Override
@Timed
public List<Pair<Experiment, String>> getAssignments(User.ID userID, Application.Name appLabel, Context context,
        Map<Experiment.ID, Experiment> experimentMap) {
    final Stream<ExperimentUserByUserIdContextAppNameExperimentId> experimentUserStream = getUserIndexStream(
            userID.toString(), appLabel.toString(), context.getContext());
    List<Pair<Experiment, String>> result = new ArrayList<>();
    experimentUserStream.forEach((ExperimentUserByUserIdContextAppNameExperimentId t) -> {
        Experiment exp = experimentMap.get(Experiment.ID.valueOf(t.getExperimentId()));
        if (nonNull(exp)) {
            result.add(new ImmutablePair<>(exp, Optional.ofNullable(t.getBucket()).orElseGet(() -> "null")));
        } else {
            LOGGER.debug("{} experiment id is not present in the experimentMap...", t.getExperimentId());
        }
    });
    return result;
}

From source file:com.yahoo.bard.webservice.web.endpoints.JobsServlet.java

/**
 * Add pagination details to ResponseContext.
 *
 * @param responseContext  ResponseContext object contains all the meta info of the resultSet
 * @param uriInfo  uriInfo object to get uriBuilder
 * @param pages  Paginated resultSet/*from w  w  w . j  a v a  2s .co  m*/
 *
 * @return Updated ResponseContext contains pagination info
 */
protected ResponseContext addPaginationInfoToResponseContext(ResponseContext responseContext, UriInfo uriInfo,
        Pagination<Result> pages) {
    LinkedHashMap<String, URI> bodyLinks = Arrays.stream(PaginationLink.values())
            .map(link -> new ImmutablePair<>(link.getBodyName(), link.getPage(pages)))
            .filter(pair -> pair.getRight().isPresent())
            .map(pair -> Utils.withRight(pair, pair.getRight().getAsInt()))
            .map(pair -> Utils.withRight(pair,
                    uriInfo.getRequestUriBuilder().replaceQueryParam("page", pair.getRight())))
            .map(pair -> Utils.withRight(pair, pair.getRight().build()))
            .collect(StreamUtils.toLinkedMap(Pair::getLeft, Pair::getRight));
    responseContext.put(ResponseContextKeys.PAGINATION_LINKS_CONTEXT_KEY.getName(), bodyLinks);
    responseContext.put(ResponseContextKeys.PAGINATION_CONTEXT_KEY.getName(), pages);
    return responseContext;
}

From source file:com.pinterest.terrapin.controller.HdfsManagerTest.java

@Test
public void testFileSetDelete() throws Exception {
    List<String> resources = Lists.newArrayList();
    // This version is currently serving.
    resources.add(getResource(FILESET, 100));
    // This version if offlined but the helix resource and HDFS data is still there.
    resources.add(getResource(FILESET, 7400));
    when(mockHelixAdmin.getResourcesInCluster(CLUSTER_NAME)).thenReturn(resources);

    FileSetInfo fsInfo = new FileSetInfo(FILESET, TerrapinUtil.helixResourceToHdfsDir(resources.get(0)), 2,
            (List) Lists.newArrayList(), new Options());
    fsInfo.deleted = true;/*from ww w .  j a va  2 s . c o m*/
    when(mockZkManager.getCandidateHdfsDirMap())
            .thenReturn((Map) ImmutableMap.of(FILESET, new ImmutablePair(fsInfo, null)));
    setupBaseDirListing(Lists.newArrayList(FILESET));
    setupListingForFileSet(FILESET, resources, 2);

    String servingResource = resources.get(0);
    String oldResource = resources.get(1);
    testRoutingTableProvider.setOnlinePartitionMap((Map) ImmutableMap.of(servingResource,
            ImmutableMap.of(TerrapinUtil.getViewPartitionName(servingResource, 0),
                    ImmutableList.of(hostConfig1, hostConfig2)),
            TerrapinUtil.getViewPartitionName(servingResource, 1), ImmutableList.of(hostConfig1, hostConfig3)));

    HdfsManager.Rebalancer rebalancer = hdfsManager.createAndGetRebalancer();
    rebalancer.reconcileAndRebalance();

    // In the first rebalance, the serving resource will get offlined while the older
    // resource will be deleted (since its already offlined.
    checkIdealStateModified(servingResource, (Map) Maps.newHashMap(), 2);
    checkIdealStateModified(servingResource, (Map) Maps.newHashMap(), 2);

    checkHdfsBlocksNotRetrieved(servingResource, 2);
    checkHdfsDataNotDeleted(servingResource);
    checkViewInfoNotDeleted(servingResource);
    checkResourceNotDeleted(servingResource);

    checkIdealStateNotModified(oldResource);
    checkHdfsBlocksNotRetrieved(oldResource, 2);
    checkHdfsDataDeleted(oldResource);
    checkViewInfoDeleted(oldResource);
    checkResourceDeleted(oldResource);

    // Make sure that the fileset is not deleted yet.
    verify(mockDfsClient, times(0)).delete(eq(Constants.HDFS_DATA_DIR + "/" + FILESET), eq(true));
    verify(mockZkManager, times(0)).deleteFileSetInfo(eq(FILESET));
    verify(mockZkManager, times(0)).unlockFileSet(eq(FILESET));

    // In the 2nd rebalance, the fileset will be deleted.
    // Change HDFS listing & helix listing since one of the resources was deleted.
    when(mockHelixAdmin.getResourcesInCluster(eq(CLUSTER_NAME))).thenReturn(resources.subList(0, 1));
    setupListingForFileSet(FILESET, resources.subList(0, 1), 2);
    // The serving resource has been offlined.
    testRoutingTableProvider.setOnlinePartitionMap((Map) Maps.newHashMap());

    rebalancer.reconcileAndRebalance();

    // Note that this check means that the ideal state was NOT modified since these
    // mock calls from the previous reconcileAndRebalance call.
    checkIdealStateModified(servingResource, (Map) Maps.newHashMap(), 2);
    checkHdfsBlocksNotRetrieved(servingResource, 2);
    checkViewInfoDeleted(servingResource);
    checkResourceDeleted(servingResource);
    checkHdfsDataDeleted(servingResource);

    verify(mockDfsClient, times(1)).delete(eq(Constants.HDFS_DATA_DIR + "/" + FILESET), eq(true));
    verify(mockZkManager, times(1)).deleteFileSetInfo(eq(FILESET));
    verify(mockZkManager, times(1)).unlockFileSet(eq(FILESET));
}

From source file:ilcc.ccgparser.incderivation.RevInc.java

private Pair<CCGJTreeNode, ArcJAction> checkLeftRevealDep(CCGJTreeNode left, CCGJTreeNode right,
        String rescatstr, HashMap<Integer, CCGNodeDepInfo> nccgNodeDeps) {
    CCGJTreeNode lleft, result = null;//from  w ww. java2  s.  com
    RuleType rule = RuleType.other;
    int lid = left.getConllNode().getNodeId(), rid = right.getConllNode().getNodeId();
    CCGcat llcat, rlcat, lcat, rcat;
    String key1 = lid + "--" + rid, key2 = rid + "--" + lid;
    boolean isconj = false;
    rcat = right.getCCGcat();
    if ((drvDeps.containsKey(key1) || drvDeps.containsKey(key2))) {
        if ((right.getCCGcat().matches("(S\\NP)\\(S\\NP)")
                || right.getCCGcat().matches("(S\\NP)\\((S\\NP)/NP)"))) {
            lcat = left.getCCGcat().matches("S") ? left.getCCGcat() : CCGcat.ccgCatFromString("S");
            rcat = CCGcat.typeChangingRule(right.getCCGcat(), "(S\\NP)\\(S\\NP)");
            rlcat = CCGcat.lexCat(left.getWrdStr(), lcat.catString() + "\\NP", lid);
        } else if ((rcat.matches("S\\NP") && rcat.toString().endsWith("[conj]"))) {
            lcat = left.getCCGcat().matches("S") ? left.getCCGcat() : CCGcat.ccgCatFromString("S");
            rlcat = CCGcat.typeChangingRule(lcat, "S\\NP");
            isconj = true;
        } else
            return null;

        Integer vert = lid;
        Integer lvert = depGraph.getLeftMost(vert, "N|NP");

        if (lvert == null)
            llcat = CCGcat.ccgCatFromString("NP");
        else
            llcat = CCGcat.typeChangingRule(sent.getNode(lvert - 1).getCCGcat(), "NP");

        HashMap<String, CCGDepInfo> depsMap = new HashMap<>();

        CCGJRuleInfo tinfo = ccgCombinators.checkCCGRules(rlcat, rcat);
        if (tinfo != null) {
            depsMap = Commons.getDepsMap(rlcat, rcat, tinfo.getResultCat(), depsMap);
            CCGJRuleInfo info = ccgCombinators.checkCCGRules(llcat, tinfo.getResultCat());
            if (info != null) {
                //depsMap = getDepsMap(llcat, tinfo.getResultCat(), info.getResultCat(), depsMap);
                if (ftrue || checkWithGoldDeps(depsMap)) {
                    Commons.updateDepTree(tinfo, left.getConllNode().getNodeId(),
                            right.getConllNode().getNodeId(), depGraph);
                    ArcJAction act = ArcJAction.make(SRAction.LREVEAL, 0,
                            (result == null) ? null : result.getCCGcat().toString(), rule);
                    if (isconj)
                        result = Commons.applyBinaryUpdate(left, right, info.getResultCat(), act,
                                RuleType.lreveal, true);
                    else
                        result = Commons.applyBinaryUpdate(left, right, left.getCCGcat(), act, RuleType.lreveal,
                                true);
                    updateccgNodeDeps(left, right, SRAction.LREVEAL, nccgNodeDeps, depsMap, false);
                    Commons.updateSysDeps(depsMap, sysccgDeps);
                    actionMap.put(SRAction.LREVEAL, actionMap.get(SRAction.LREVEAL) + 1);
                    rule = RuleType.lreveal;
                }
            }
        }
    }
    return new ImmutablePair(result, ArcJAction.make(SRAction.LREVEAL, 0,
            (result == null) ? null : result.getCCGcat().toString(), rule));
}