Example usage for org.apache.commons.collections4 MapUtils isEmpty

List of usage examples for org.apache.commons.collections4 MapUtils isEmpty

Introduction

In this page you can find the example usage for org.apache.commons.collections4 MapUtils isEmpty.

Prototype

public static boolean isEmpty(final Map<?, ?> map) 

Source Link

Document

Null-safe check if the specified map is empty.

Usage

From source file:io.cloudslang.lang.tools.build.validation.StaticValidatorImpl.java

private void validateInOutParams(Map<String, String> metadataInOutParams,
        List<? extends InOutParam> inOutParams, String errorMessagePrefix, Queue<RuntimeException> exceptions) {
    for (InOutParam inOutParam : ListUtils.emptyIfNull(inOutParams)) {
        if (MapUtils.isEmpty(metadataInOutParams)) {
            exceptions.add(// w  w  w  .  java 2 s . co  m
                    new MetadataMissingException(errorMessagePrefix + "s are missing description entirely."));
        } else if (metadataInOutParams.get(inOutParam.getName()) == null
                && (!(inOutParam instanceof Input) || !((Input) inOutParam).isPrivateInput())) {
            exceptions.add(new MetadataMissingException(
                    errorMessagePrefix + " '" + inOutParam.getName() + "' is missing description."));
        }
    }
}

From source file:com.movies.bean.ActorListBean.java

@Override
public void searchPeople(ActionEvent event) {
    try {/*from   w  w  w  .j a  va  2s.c om*/
        createParameterMap();
        if (MapUtils.isEmpty(parameterMap)) {
            actors = actorService.getAllActorsWithMovies();
        } else {
            actors = actorService.getActorsByCriteria(parameterMap);
        }
    } catch (Exception e) {
        e.printStackTrace();
        JsfUtil.addErrorMessage("Error : " + e.getMessage());
    }
}

From source file:com.jkoolcloud.tnt4j.streams.utils.LoggerUtils.java

private static void setLog4jConfig(byte[] data, EventSink logger) {
    Properties loggerProps = new Properties();
    InputStream is = new ByteArrayInputStream(data);
    try {/*from   w w  w . j a  v a  2 s  .c o  m*/
        loggerProps.load(is);
    } catch (Exception exc) {
        logger.log(OpLevel.ERROR, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                "LoggerUtils.log4j.load.error"), exc);
    } finally {
        Utils.close(is);
    }

    if (MapUtils.isEmpty(loggerProps)) {
        logger.log(OpLevel.INFO, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                "LoggerUtils.log4j.empty.configuration"));
    } else {
        logger.log(OpLevel.DEBUG, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                "LoggerUtils.log4j.reconfiguring"), loggerProps.size());
        try {
            // org.apache.log4j.PropertyConfigurator.configure(loggerProps);

            invoke("org.apache.log4j.PropertyConfigurator", "configure", new Class[] { Properties.class }, // NON-NLS
                    loggerProps);
        } catch (Exception exc) {
            logger.log(OpLevel.ERROR, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                    "LoggerUtils.log4j.reconfiguring.fail"), exc);
        }

        logger.log(OpLevel.DEBUG, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                "LoggerUtils.log4j.reconfiguring.end"));
    }
}

From source file:it.reply.orchestrator.service.commands.PrefilterCloudProviders.java

@Override
protected RankCloudProvidersMessage customExecute(RankCloudProvidersMessage rankCloudProvidersMessage)
        throws Exception {
    // TODO Filter cloud providers (i.e. based on OneData)

    Deployment deployment = deploymentRepository.findOne(rankCloudProvidersMessage.getDeploymentId());

    // Filter out providers that do not support the requested images
    ArchiveRoot ar = toscaService.parseTemplate(deployment.getTemplate());
    Set<CloudProvider> providersToDiscard = Sets.newHashSet();
    Set<CloudService> servicesToDiscard = Sets.newHashSet();

    if (!CollectionUtils.isEmpty(rankCloudProvidersMessage.getPlacementPolicies())) {
        this.discardOnPlacementPolicies(rankCloudProvidersMessage.getPlacementPolicies(),
                rankCloudProvidersMessage.getCloudProviders().values(),
                rankCloudProvidersMessage.getSlamPreferences().getSla(), servicesToDiscard);
    }/*from   www  . j av  a 2s .  c  o m*/

    discardProvidersAndServices(providersToDiscard, servicesToDiscard, rankCloudProvidersMessage);

    if (!MapUtils.isEmpty(rankCloudProvidersMessage.getOneDataRequirements())) {
        OneData inputRequirement = rankCloudProvidersMessage.getOneDataRequirements().get("input");
        discardOnOneDataRequirements(inputRequirement, rankCloudProvidersMessage.getCloudProviders().values(),
                providersToDiscard, servicesToDiscard);
        OneData outputRequirement = rankCloudProvidersMessage.getOneDataRequirements().get("output");
        discardOnOneDataRequirements(outputRequirement, rankCloudProvidersMessage.getCloudProviders().values(),
                providersToDiscard, servicesToDiscard);
    }

    discardProvidersAndServices(providersToDiscard, servicesToDiscard, rankCloudProvidersMessage);

    // Filter provider for Chronos
    // FIXME: It's just a demo hack to for Chronos jobs default provider override!!
    if (deployment.getDeploymentProvider().equals(DeploymentProvider.CHRONOS)) {
        for (CloudProvider cloudProvider : rankCloudProvidersMessage.getCloudProviders().values()) {
            if (!cloudProvider.getName().equalsIgnoreCase(chronosCloudProviderName)) {
                LOG.debug(
                        "Discarded provider {} because it doesn't match Chronos default provider {}"
                                + " for deployment {}",
                        cloudProvider.getId(), chronosCloudProviderName, deployment.getId());
                addProviderToDiscard(providersToDiscard, servicesToDiscard, cloudProvider);
            }
        }
    }

    discardProvidersAndServices(providersToDiscard, servicesToDiscard, rankCloudProvidersMessage);

    // Filter provider by image contextualization check
    for (CloudProvider cloudProvider : rankCloudProvidersMessage.getCloudProviders().values()) {
        for (CloudService cloudService : cloudProvider.getCmbdProviderServicesByType(Type.COMPUTE)) {
            try {
                toscaService.contextualizeImages(deployment.getDeploymentProvider(), ar, cloudProvider,
                        cloudService.getId(), false);
            } catch (Exception ex) {
                // Failed to match all required images -> discard provider
                LOG.debug(
                        "Discarded service {} of provider {} because it doesn't match images requirements"
                                + " for deployment {}: {}",
                        cloudService.getId(), cloudProvider.getId(), deployment.getId(), ex.getMessage());
                addServiceToDiscard(servicesToDiscard, cloudService);
                cloudProvider.getCmdbProviderImages().remove(cloudService.getId());
            }
        }
    }

    discardProvidersAndServices(providersToDiscard, servicesToDiscard, rankCloudProvidersMessage);

    return rankCloudProvidersMessage;
}

From source file:com.jkoolcloud.tnt4j.streams.parsers.AbstractActivityMapParser.java

@Override
protected ActivityContext prepareItem(TNTInputStream<?, ?> stream, Object data) throws ParseException {
    Map<String, ?> dataMap = getDataMap(data);
    if (MapUtils.isEmpty(dataMap)) {
        logger().log(OpLevel.DEBUG,/*w  ww .  j a  va 2 s . c om*/
                StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME, "ActivityParser.no.fields"));
        return null;
    }

    ActivityContext cData = new ActivityContext(stream, data, dataMap);
    cData.setMessage(getRawDataAsMessage(dataMap));

    return cData;
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.trace.MsgTraceReporter.java

/**
 * Constructs a new MsgTraceReporter.//from www .  j av  a2s  .  c o  m
 */
public MsgTraceReporter(final Properties kafkaProperties) {
    stream = new KafkaMsgTraceStream();
    StreamsAgent.runFromAPI(stream);
    LOGGER.log(OpLevel.DEBUG, StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
            "MsgTraceReporter.stream.started", stream.getName());
    TimerTask mrt = new TimerTask() {
        @Override
        public void run() {
            Map<String, Map<String, ?>> consumersCfg = InterceptionsManager.getInstance()
                    .getInterceptorsConfig(TNTKafkaCInterceptor.class);
            Map<String, ?> cConfig = MapUtils.isEmpty(consumersCfg) ? null
                    : consumersCfg.entrySet().iterator().next().getValue();
            pollConfigQueue(cConfig, kafkaProperties, traceConfig);
        }
    };
    traceConfig.put(TraceCommandDeserializer.MASTER_CONFIG, new TraceCommandDeserializer.TopicTraceCommand());
    long period = TimeUnit.SECONDS.toMillis(POOL_TIME_SECONDS);
    LOGGER.log(OpLevel.DEBUG, StreamsResources.getBundle(KafkaStreamConstants.RESOURCE_BUNDLE_NAME),
            "MsgTraceReporter.schedule.commands.polling", TNT_TRACE_CONFIG_TOPIC, period, period);
    pollTimer.scheduleAtFixedRate(mrt, period, period);
}

From source file:it.reply.orchestrator.service.commands.UpdateDeployment.java

protected void generateOneDataParameters(RankCloudProvidersMessage rankCloudProvidersMessage,
        DeploymentMessage deploymentMessage) {
    // Just copy requirements to parameters (in the future the Orchestrator will need to edit I/O
    // providers, but not for now)
    // deploymentMessage.getOneDataParameters().putAll(deploymentMessage.getOneDataRequirements());

    // No Requirements -> Service space
    if (MapUtils.isEmpty(deploymentMessage.getOneDataRequirements())) {
        deploymentMessage//w w w  . ja va  2 s  .  c  o m
                .setOneDataParameters(ImmutableMap.of("service", generateStubOneData(deploymentMessage)));
        LOG.warn("GENERATING STUB ONE DATA FOR SERVICE"
                + " (remove once OneData parameters generation is completed!)");
    } else {
        LOG.debug("User specified I/O OneData requirements; service space will not be generated.");
        Map<String, OneData> oneDataRequirements = rankCloudProvidersMessage.getOneDataRequirements();
        {
            OneData oneDataInput = oneDataRequirements.get("input");
            if (oneDataInput != null) {
                if (oneDataInput.isSmartScheduling()) {
                    oneDataInput.setProviders(oneDataInput.getProviders().stream()
                            .filter(info -> Objects.equals(info.cloudProviderId,
                                    deploymentMessage.getChosenCloudProvider().getId()))
                            .collect(Collectors.toList()));
                }
                deploymentMessage.getOneDataParameters().put("input", oneDataInput);

            }
        }
        {
            OneData oneDataOutput = oneDataRequirements.get("output");
            if (oneDataOutput != null) {
                if (oneDataOutput.isSmartScheduling()) {
                    oneDataOutput.setProviders(oneDataOutput.getProviders().stream()
                            .filter(info -> Objects.equals(info.cloudProviderId,
                                    deploymentMessage.getChosenCloudProvider().getId()))
                            .collect(Collectors.toList()));
                }
                deploymentMessage.getOneDataParameters().put("output", oneDataOutput);
            }
        }
    }
}

From source file:fr.landel.utils.commons.CastUtilsTest.java

/**
 * Check cast hash map/*from  w ww. j  a  va 2s . c  o m*/
 */
@Test
public void testGetMap() {
    Map<Object, Object> map = new HashMap<>();
    map.put("key2", "value");
    map.put("key1", null);
    map.put(null, "value2");

    assertTrue(MapUtils.isEmpty(CastUtils.getHashMap(null, String.class, String.class)));
    assertTrue(MapUtils.isEmpty(CastUtils.getLinkedHashMap(null, String.class, String.class)));
    assertTrue(MapUtils.isEmpty(CastUtils.getHashtable(null, String.class, String.class)));
    assertTrue(MapUtils.isEmpty(CastUtils.getTreeMap(null, String.class, String.class)));
    assertTrue(MapUtils.isEmpty(CastUtils.getTreeMap(null, String.class, String.class, COMPARATOR)));

    Map<String, String> result = CastUtils.getHashMap(map, String.class, String.class);
    assertEquals("value", result.get("key2"));
    assertEquals("value2", result.get(null));
    assertNull(result.get("key1"));

    result = CastUtils.getHashtable(map, String.class, String.class);
    assertEquals("value", result.get("key2"));
    assertNull(result.get("key1"));

    result = CastUtils.getTreeMap(map, String.class, String.class, COMPARATOR);
    Iterator<Entry<String, String>> entries = result.entrySet().iterator();
    assertEquals("value", entries.next().getValue());
    assertNull(entries.next().getValue());
    assertEquals("value2", entries.next().getValue());

    // No comparator: TreeMap is not null safe
    result = CastUtils.getTreeMap(map, String.class, String.class);
    assertEquals("value", result.get("key2"));
    assertNull(result.get("key1"));
    // assertEquals("value2", result.get(null));
}

From source file:fr.landel.utils.assertor.utils.AssertorMap.java

/**
 * Prepare the next step to validate if the {@link Map} is {@code null} or
 * empty/*from  w w w .  jav a 2  s .c  o  m*/
 * 
 * <p>
 * precondition: none
 * </p>
 * 
 * @param step
 *            the current step
 * @param message
 *            the message if invalid
 * @param <M>
 *            the {@link Map} type
 * @param <K>
 *            the {@link Map} key elements type
 * @param <V>
 *            the {@link Map} value elements type
 * @return the next step
 */
public static <M extends Map<K, V>, K, V> StepAssertor<M> isEmpty(final StepAssertor<M> step,
        final MessageAssertor message) {

    final BiPredicate<M, Boolean> checker = (map, not) -> MapUtils.isEmpty(map);

    return new StepAssertor<>(step, checker, false, message, MSG.MAP.EMPTY, false);
}

From source file:io.cloudslang.lang.tools.build.tester.SlangTestRunner.java

/**
 *
 * @param projectPath the project path/*from   ww w  .  ja  v  a  2  s.c o  m*/
 * @param testCases the test cases
 * @param compiledFlows the compiled flows
 * @param runTestsResults is updated to reflect skipped, failed passes test cases.
 */
public void runTestsSequential(String projectPath, Map<String, SlangTestCase> testCases,
        Map<String, CompilationArtifact> compiledFlows, IRunTestResults runTestsResults) {

    if (MapUtils.isEmpty(testCases)) {
        return;
    }
    printTestForActualRunSummary(TestCaseRunMode.SEQUENTIAL, testCases);

    for (Map.Entry<String, SlangTestCase> testCaseEntry : testCases.entrySet()) {
        SlangTestCase testCase = testCaseEntry.getValue();

        loggingService.logEvent(Level.INFO, "Running test: " + SlangTestCase.generateTestCaseReference(testCase)
                + " - " + testCase.getDescription());
        try {
            CompilationArtifact compiledTestFlow = getCompiledTestFlow(compiledFlows, testCase);
            runTest(testCase, compiledTestFlow, projectPath);
            runTestsResults.addPassedTest(testCase.getName(), new TestRun(testCase, null));
        } catch (RuntimeException e) {
            runTestsResults.addFailedTest(testCase.getName(), new TestRun(testCase, e.getMessage()));
        }
    }
}