Example usage for org.apache.commons.lang3.tuple Pair of

List of usage examples for org.apache.commons.lang3.tuple Pair of

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair of.

Prototype

public static <L, R> Pair<L, R> of(final L left, final R right) 

Source Link

Document

Obtains an immutable pair of from two objects inferring the generic types.

This factory allows the pair to be created using inference to obtain the generic types.

Usage

From source file:eionet.gdem.web.listeners.JobScheduler.java

/**
 * @see javax.servlet.ServletContextListener#contextInitialized(javax.servlet.ServletContextEvent) {@inheritDoc}
 *///from   w w  w.  j  av  a2s  . c o  m
@Override
public void contextInitialized(ServletContextEvent sce) {
    intervalJobs = new Pair[] {
            Pair.of(new Integer(Properties.wqCheckInterval), newJob(WQCheckerJob.class)
                    .withIdentity(WQCheckerJob.class.getSimpleName(), WQCheckerJob.class.getName()).build()),
            Pair.of(new Integer(Properties.wqCleanInterval), newJob(WQCleanerJob.class)
                    .withIdentity(WQCleanerJob.class.getSimpleName(), WQCleanerJob.class.getName()).build()),
            Pair.of(new Integer(Properties.ddTablesUpdateInterval),
                    newJob(DDTablesCacheUpdater.class).withIdentity(DDTablesCacheUpdater.class.getSimpleName(),
                            DDTablesCacheUpdater.class.getName()).build()) };
    // schedule interval jobs
    for (Pair<Integer, JobDetail> job : intervalJobs) {

        try {
            scheduleIntervalJob(job.getLeft(), job.getRight());
            LOGGER.debug(job.getRight().getKey().getName() + " scheduled, interval=" + job.getLeft());
        } catch (Exception e) {
            LOGGER.error(Markers.fatal, "Error when scheduling " + job.getRight().getKey().getName(), e);
        }
    }
    WorkqueueManager.resetActiveJobs();
}

From source file:io.knotx.adapter.common.http.HttpClientFacade.java

private Pair<ClientRequest, ServiceMetadata> prepareRequestData(AdapterRequest adapterRequest) {
    final Pair<ClientRequest, ServiceMetadata> serviceData;

    final ClientRequest serviceRequest = buildServiceRequest(adapterRequest.getRequest(),
            adapterRequest.getParams());
    final Optional<ServiceMetadata> serviceMetadata = findServiceMetadata(serviceRequest.getPath());

    if (serviceMetadata.isPresent()) {
        serviceData = Pair.of(serviceRequest, serviceMetadata.get());
    } else {//  ww w. j  a v  a  2  s .c  om
        final String error = String.format("No matching service definition for the requested path '%s'",
                serviceRequest.getPath());
        throw new UnsupportedServiceException(error);
    }
    return serviceData;
}

From source file:caveworld.core.CaverManager.java

@Override
public Map<Integer, Pair<String, Integer>> getMinerRanks() {
    Map<Integer, Pair<String, Integer>> result = Maps.newHashMap();

    for (MinerRank rank : MinerRank.values()) {
        result.put(rank.getRank(), Pair.of(rank.getName(), rank.getPhase()));
    }//  ww w .j a  va2  s .c  o m

    return result;
}

From source file:com.cisco.oss.foundation.message.HornetQMessagingFactory.java

public static List<Pair<ClientSession, SessionFailureListener>> getSession(
        Class<? extends SessionFailureListener> sessionFailureListener) {

    if (sessionThreadLocal.get() == null) {
        try {//from w ww .  ja v a2s . c om
            LOGGER.debug("creating a new session");
            List<Pair<ClientSession, SessionFailureListener>> hornetQSessions = new ArrayList<>();
            for (ServerLocator serverLocator : serverLocators) {
                ClientSession hornetQSession = serverLocator.createSessionFactory().createSession(true, true);
                SessionFailureListener listener = null;
                if (sessionFailureListener != null) {
                    listener = sessionFailureListener.newInstance();
                    hornetQSession.addFailureListener(listener);
                }
                hornetQSession.start();
                sessions.add(hornetQSession);
                hornetQSessions.add(Pair.of(hornetQSession, listener));
            }
            sessionThreadLocal.set(hornetQSessions);

        } catch (Exception e) {
            LOGGER.error("can't create hornetq session: {}", e, e);
            throw new QueueException(e);
        }
    }
    return sessionThreadLocal.get();

}

From source file:at.beris.virtualfile.FileContext.java

public Set<Protocol> enabledProtocols() {
    Map<Protocol, Pair<String, String>> protocolClassMap = new HashMap<>();
    protocolClassMap.put(Protocol.SFTP, Pair.of("JSch", "com.jcraft.jsch.JSch"));
    protocolClassMap.put(Protocol.FTP, Pair.of("Apache Commons Net", "org.apache.commons.net.ftp.FTP"));

    Set<Protocol> enabledProtocols = new HashSet<>();
    enabledProtocols.add(Protocol.FILE);

    for (Map.Entry<Protocol, Pair<String, String>> entry : protocolClassMap.entrySet()) {
        Protocol protocol = entry.getKey();
        Pair<String, String> protocolLibrary = entry.getValue();
        try {/*from w w w. ja va 2 s. c  om*/
            if (Class.forName(protocolLibrary.getRight()) != null)
                enabledProtocols.add(protocol);
        } catch (ClassNotFoundException e) {
        }
        if (!enabledProtocols.contains(protocol))
            LOGGER.info(protocolLibrary.getLeft() + " not installed. No support for protocol " + protocol);
    }

    return Collections.unmodifiableSet(enabledProtocols);
}

From source file:com.vmware.admiral.request.compute.NetworkProfileQueryUtils.java

private static void getContextComputeNetworks(ServiceHost host, URI referer, String contextId,
        BiConsumer<Set<String>, Throwable> consumer,
        Consumer<HashMap<String, ComputeNetwork>> callbackFunction) {
    HashMap<String, ComputeNetwork> contextNetworks = new HashMap<>();
    if (StringUtil.isNullOrEmpty(contextId)) {
        callbackFunction.accept(contextNetworks);
        return;/*from  w w  w  .jav a2  s . c  om*/
    }

    // Get all ComputeNetworks that have the same context id
    List<ComputeNetwork> computeNetworks = new ArrayList<>();
    QueryTask.Query.Builder builder = QueryTask.Query.Builder.create().addKindFieldClause(ComputeNetwork.class);
    builder.addCompositeFieldClause(ComputeState.FIELD_NAME_CUSTOM_PROPERTIES, FIELD_NAME_CONTEXT_ID_KEY,
            contextId);
    QueryUtils.QueryByPages<ComputeNetwork> query = new QueryUtils.QueryByPages<>(host, builder.build(),
            ComputeNetwork.class, null);
    query.queryDocuments(ns -> computeNetworks.add(ns)).whenComplete((v, e) -> {
        if (e != null) {
            consumer.accept(null, e);
            return;
        }
        // Get ComputeNetworkDescription of every network
        List<DeferredResult<Pair<String, ComputeNetwork>>> list = computeNetworks.stream()
                .map(cn -> host.sendWithDeferredResult(
                        Operation.createGet(host, cn.descriptionLink).setReferer(referer),
                        ComputeNetworkDescription.class).thenCompose(cnd -> {
                            DeferredResult<Pair<String, ComputeNetwork>> r = new DeferredResult<>();
                            r.complete(Pair.of(cnd.name, cn));
                            return r;
                        }))
                .collect(Collectors.toList());
        // Create a map of ComputeNetworkDescription.name to ComputeNetworkState
        DeferredResult.allOf(list).whenComplete((all, t) -> {
            all.forEach(p -> contextNetworks.put(p.getKey(), p.getValue()));
            callbackFunction.accept(contextNetworks);
        });
    });
}

From source file:it.polimi.diceH2020.SPACE4CloudWS.solvers.solversImpl.DagSimSolver.DagSimSolver.java

@Override
protected Pair<List<File>, List<File>> createWorkingFiles(SolutionPerJob solPerJob) throws IOException {
    final String experiment = String.format("%s, class %s, provider %s, VM %s, # %d", solPerJob.getParentID(),
            solPerJob.getId(), dataProcessor.getProviderName(), solPerJob.getTypeVMselected().getId(),
            solPerJob.getNumberVM());/*from  w  w  w . j a  v a2  s.c om*/
    logger.debug(String.format("Generating DagSim model for %s", experiment));

    DagSimFileBuilder builder = new DagSimFileBuilder().setContainers(solPerJob.getNumberContainers())
            .setUsers(solPerJob.getNumberUsers()).setExponentialThinkTime(solPerJob.getJob().getThink())
            .setMaxJobs(((DagSimSettings) connSettings).getEvents())
            .setQuantile(((DagSimSettings) connSettings).getConfidence().getQuantile());

    List<File> replayerFiles = retrieveInputFiles(solPerJob, ".txt");
    Map<String, Set<String>> successors = dataService.getData().getMapDags().get(solPerJob.getId())
            .getSuccessors();
    Map<String, Set<String>> predecessors = flipDirectedEdges(successors);
    Set<String> vertices = obtainVertices(successors);
    JobProfile profile = dataService.getProfile(solPerJob.getId(), solPerJob.getTypeVMselected().getId());

    for (String vertex : vertices) {
        Stage currentStage = new Stage().setName(vertex);
        String tasksLabel = String.format("nTask_%s", vertex);
        long numTasks = Math.round(profile.get(tasksLabel));
        currentStage.setTasks((int) numTasks);

        Set<String> possiblySuccessors = successors.get(vertex);
        if (possiblySuccessors != null) {
            for (String successor : possiblySuccessors) {
                currentStage.addSuccessor(successor);
            }
        }

        Set<String> possiblyPredecessors = predecessors.get(vertex);
        if (possiblyPredecessors != null) {
            for (String predecessor : possiblyPredecessors) {
                currentStage.addPredecessor(predecessor);
            }
        }

        replayerFiles.stream().map(File::getName).filter(name -> name.contains(vertex)).forEach(name -> {
            File remote = new File(retrieveRemoteSubDirectory(solPerJob), name);
            String remoteName = remote.getPath();
            currentStage.setDistribution(new Empirical().setFileName(remoteName));
        });

        builder.addStage(currentStage);
    }

    String content = builder.build();
    File modelFile = fileUtility
            .provideTemporaryFile(String.format("%s-%s-", solPerJob.getParentID(), solPerJob.getId()), ".lua");
    fileUtility.writeContentToFile(content, modelFile);

    List<File> list = new LinkedList<>();
    list.add(modelFile);

    return Pair.of(list, replayerFiles);
}

From source file:com.qualys.jserf.ChannelManger.java

private void connect() throws InterruptedException, IOException {
    connected.set(false);// w w w . j a  v a2 s  .  c o m
    if (connectionStateChangeCallback != null) {
        connectionStateChangeCallback.handleConnectionStateChange(false);
    }
    connecting.set(true);
    this.currentChannel = null;

    try {
        final Channel channel = bootstrap.connect(serfHost, serfPort).sync().channel();
        connecting.set(false);
        SerfRequest handshake = SerfRequests.handshake(new SerfResponseCallBack<EmptyResponseBody>() {
            @Override
            public void call(SerfResponse response) {
                log.debug("Received handshake response with sequence={} and error={}", response.getSequence(),
                        response.getHeader().getError());
                if (!response.isErrored()) {
                    currentChannel = channel;
                    connected.set(true);
                    if (connectionStateChangeCallback != null) {
                        connectionStateChangeCallback.handleConnectionStateChange(true);
                    }
                }
            }
        });
        callBacksBySequence.put(handshake.getHeader().seq,
                Pair.of(handshake.getHeader().toCommand(), handshake.getCallBack()));
        try {
            log.debug("Sending handshake to {}:{} with sequence={}", serfHost, serfPort,
                    handshake.getHeader().seq);
            channel.write(messagePack.write(handshake.getHeader()));
            channel.writeAndFlush(messagePack.write(handshake.getBody()));
            log.debug("Sent handshake to {}:{} with sequence={}", serfHost, serfPort,
                    handshake.getHeader().seq);
        } catch (IOException e) {
            log.warn("Caught IOException while trying to write request. Removing handshake callback", e);
            callBacksBySequence.invalidate(handshake.getHeader().seq);
            throw e;
        }
    } catch (IOException e) {
        log.warn("Caught IOException while trying to connect to {}:{}", serfHost, serfPort);
        throw e;
    }
}

From source file:com.hurence.logisland.connect.opc.CommonOpcSourceTask.java

@Override
public void start(Map<String, String> props) {
    setConfigurationProperties(props);/*from w  ww  .  j a  v  a  2  s.  c om*/

    transferQueue = new LinkedTransferQueue<>();
    opcOperations = new SmartOpcOperations<>(createOpcOperations());
    ConnectionProfile connectionProfile = createConnectionProfile();
    host = connectionProfile.getConnectionUri().getHost();
    tagInfoMap = CommonUtils.parseTagsFromProperties(props).stream()
            .collect(Collectors.toMap(TagInfo::getTagId, Function.identity()));
    minWaitTime = Math.min(10, tagInfoMap.values().stream().map(TagInfo::getSamplingInterval)
            .mapToLong(Duration::toMillis).min().getAsLong());
    opcOperations.connect(connectionProfile);
    if (!opcOperations.awaitConnected()) {
        throw new ConnectException("Unable to connect");
    }

    //set up polling source emission
    pollingScheduler = Executors.newSingleThreadScheduledExecutor();
    streamingThread = Executors.newSingleThreadExecutor();
    Map<Duration, List<TagInfo>> pollingMap = tagInfoMap.values().stream()
            .filter(tagInfo -> StreamingMode.POLL.equals(tagInfo.getStreamingMode()))
            .collect(Collectors.groupingBy(TagInfo::getSamplingInterval));
    final Map<String, OpcData> lastValues = Collections.synchronizedMap(new HashMap<>());
    pollingMap.forEach((k, v) -> pollingScheduler.scheduleAtFixedRate(() -> {
        final Instant now = Instant.now();
        v.stream().map(TagInfo::getTagId).map(lastValues::get).filter(Functions.not(Objects::isNull))
                .map(data -> Pair.of(now, data)).forEach(transferQueue::add);

    }, 0, k.toNanos(), TimeUnit.NANOSECONDS));
    //then subscribe for all
    final SubscriptionConfiguration subscriptionConfiguration = new SubscriptionConfiguration()
            .withDefaultSamplingInterval(Duration.ofMillis(10_000));
    tagInfoMap.values().forEach(tagInfo -> subscriptionConfiguration
            .withTagSamplingIntervalForTag(tagInfo.getTagId(), tagInfo.getSamplingInterval()));
    running.set(true);
    streamingThread.submit(() -> {
        while (running.get()) {
            try {
                createSessionIfNeeded();
                if (session == null) {
                    return;
                }

                session.stream(subscriptionConfiguration,
                        tagInfoMap.keySet().toArray(new String[tagInfoMap.size()])).forEach(opcData -> {
                            if (tagInfoMap.get(opcData.getTag()).getStreamingMode()
                                    .equals(StreamingMode.SUBSCRIBE)) {
                                transferQueue.add(Pair.of(
                                        hasServerSideSampling() ? opcData.getTimestamp() : Instant.now(),
                                        opcData));
                            } else {
                                lastValues.put(opcData.getTag(), opcData);
                            }
                        });
            } catch (Exception e) {
                if (running.get()) {
                    logger.warn("Stream interrupted while reading from " + host, e);
                    safeCloseSession();
                    lastValues.clear();

                }
            }
        }
    });

}

From source file:com.spotify.heroic.cluster.ClusterManagerModule.java

@Provides
@ClusterScope//from  w w  w .j  a v  a2 s.c o  m
public List<Pair<String, RpcProtocolComponent>> protocolComponents(final NodeMetadataProvider metadataProvider,
        @Named("local") final ClusterNode localClusterNode, final PrimaryComponent primary,
        final MetricComponent metric, final MetadataComponent metadata, final SuggestComponent suggest) {
    final ImmutableList.Builder<Pair<String, RpcProtocolComponent>> protocolComponents = ImmutableList
            .builder();

    /* build up a local component which defines all dependencies for a child component */
    final RpcProtocolModule.Dependencies dependencies = DaggerRpcProtocolModule_Dependencies.builder()
            .primaryComponent(primary).metricComponent(metric).metadataComponent(metadata)
            .suggestComponent(suggest).provided(new RpcProtocolModule.Provided() {
                @Override
                public NodeMetadataProvider metadataProvider() {
                    return metadataProvider;
                }

                @Override
                public ClusterNode localClusterNode() {
                    return localClusterNode;
                }
            }).build();

    for (final RpcProtocolModule m : protocols) {
        protocolComponents.add(Pair.of(m.scheme(), m.module(dependencies)));
    }

    return protocolComponents.build();
}