Example usage for java.util.concurrent.atomic AtomicReference AtomicReference

List of usage examples for java.util.concurrent.atomic AtomicReference AtomicReference

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference AtomicReference.

Prototype

public AtomicReference() 

Source Link

Document

Creates a new AtomicReference with null initial value.

Usage

From source file:de.codesourcery.eve.skills.market.impl.EveCentralMarketDataProvider.java

private static Map<InventoryType, PriceInfoQueryResult> runOnEventThread(final PriceCallable r)
        throws PriceInfoUnavailableException {
    if (SwingUtilities.isEventDispatchThread()) {
        return r.call();
    }//from w w w  . j  a va  2s  . c o  m

    final AtomicReference<Map<InventoryType, PriceInfoQueryResult>> result = new AtomicReference<Map<InventoryType, PriceInfoQueryResult>>();
    try {
        SwingUtilities.invokeAndWait(new Runnable() {

            @Override
            public void run() {
                try {
                    result.set(r.call());
                } catch (PriceInfoUnavailableException e) {
                    throw new RuntimeException(e);
                }
            }
        });
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    } catch (InvocationTargetException e) {
        Throwable wrapped = e.getTargetException();
        if (wrapped instanceof RuntimeException) {
            if (wrapped.getCause() instanceof PriceInfoUnavailableException) {
                throw (PriceInfoUnavailableException) wrapped.getCause();
            }
            throw (RuntimeException) wrapped;
        } else if (e.getTargetException() instanceof Error) {
            throw (Error) wrapped;
        }
        throw new RuntimeException(e.getTargetException());
    }
    return result.get();
}

From source file:com.networknt.client.oauth.OauthHelper.java

/**
 * Get the certificate from key distribution service of OAuth 2.0 provider with the kid.
 *
 * @param keyRequest One of the sub classes to get the key for access token or sign token.
 * @param envTag the environment tag from the server.yml for the cluster lookup.
 * @return String of the certificate/*  w  w  w .j  av a2 s  .c o  m*/
 * @throws ClientException throw exception if communication with the service fails.
 */
public static String getKey(KeyRequest keyRequest, String envTag) throws ClientException {
    final Http2Client client = Http2Client.getInstance();
    final CountDownLatch latch = new CountDownLatch(1);
    final ClientConnection connection;
    try {
        if (keyRequest.getServerUrl() != null) {
            connection = client.connect(new URI(keyRequest.getServerUrl()), Http2Client.WORKER, Http2Client.SSL,
                    Http2Client.BUFFER_POOL,
                    keyRequest.enableHttp2 ? OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)
                            : OptionMap.EMPTY)
                    .get();
        } else if (keyRequest.getServiceId() != null) {
            Cluster cluster = SingletonServiceFactory.getBean(Cluster.class);
            String url = cluster.serviceToUrl("https", keyRequest.getServiceId(), envTag, null);
            connection = client
                    .connect(new URI(url), Http2Client.WORKER, Http2Client.SSL, Http2Client.BUFFER_POOL,
                            keyRequest.enableHttp2 ? OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)
                                    : OptionMap.EMPTY)
                    .get();
        } else {
            // both server_url and serviceId are empty in the config.
            logger.error("Error: both server_url and serviceId are not configured in client.yml for "
                    + keyRequest.getClass());
            throw new ClientException("both server_url and serviceId are not configured in client.yml for "
                    + keyRequest.getClass());
        }
    } catch (Exception e) {
        throw new ClientException(e);
    }
    final AtomicReference<ClientResponse> reference = new AtomicReference<>();
    try {
        ClientRequest request = new ClientRequest().setPath(keyRequest.getUri()).setMethod(Methods.GET);

        if (keyRequest.getClientId() != null) {
            request.getRequestHeaders().put(Headers.AUTHORIZATION,
                    getBasicAuthHeader(keyRequest.getClientId(), keyRequest.getClientSecret()));
        }
        request.getRequestHeaders().put(Headers.HOST, "localhost");
        adjustNoChunkedEncoding(request, "");
        connection.sendRequest(request, client.createClientCallback(reference, latch));
        latch.await();
    } catch (Exception e) {
        logger.error("Exception: ", e);
        throw new ClientException(e);
    } finally {
        IoUtils.safeClose(connection);
    }
    return reference.get().getAttachment(Http2Client.RESPONSE_BODY);
}

From source file:com.datamelt.nifi.processors.ExecuteRuleEngine.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // map used to store the attribute name and its value from the content of the flow file
    final Map<String, String> propertyMap = new HashMap<>();

    // get a logger instance
    final ComponentLog logger = getLogger();

    // a header from the content if present
    final AtomicReference<HeaderRow> header = new AtomicReference<>();

    AtomicBoolean error = new AtomicBoolean();

    // get the flow file
    FlowFile flowFile = session.get();//  w w  w  .j a v  a2 s. c o m
    if (flowFile == null) {
        return;
    }

    // list of rows from splitting the original flow file content
    ArrayList<RuleEngineRow> flowFileRows = new ArrayList<RuleEngineRow>();

    // list of rows containing the detailed results of the ruleengine
    ArrayList<RuleEngineRow> flowFileDetails = new ArrayList<RuleEngineRow>();

    boolean headerPresent = context.getProperty(ATTRIBUTE_HEADER_PRESENT).getValue().equals("true");

    // put the name of the ruleengine zip file in the list of properties
    propertyMap.put(PROPERTY_RULEENGINE_ZIPFILE_NAME,
            context.getProperty(ATTRIBUTE_RULEENGINE_ZIPFILE).getValue());

    final int batchSize = Integer.parseInt(context.getProperty(BATCH_SIZE_NAME).getValue());

    // read flow file into input stream
    session.read(flowFile, new InputStreamCallback() {
        public void process(InputStream in) throws IOException {
            try {
                // iterator over the lines from the input stream
                LineIterator iterator = IOUtils.lineIterator(in, "utf-8");

                // check if configuration indicates that a header row is present in the flow file content
                if (headerPresent) {
                    logger.debug("configuration indicates a header row is present in flow file content");

                    // if there is at least one row of data and the header is not defined yet
                    if (iterator.hasNext() && header.get() == null) {
                        // set the header from the content
                        header.set(new HeaderRow(iterator.nextLine(), separator));
                    }
                }
                // if no header row is present in the flow file content
                else {
                    logger.debug("configuration indicates no header row is present in flow file content");

                    // use the header from the field names
                    header.set(headerFromFieldNames);
                }

                // loop over all rows of data
                while (iterator.hasNext()) {
                    // we handle the error per row of data
                    error.set(false);

                    // get a row to process
                    String row = iterator.nextLine();

                    // check that we have data
                    if (row != null && !row.trim().equals("")) {
                        RowFieldCollection rowFieldCollection = null;
                        try {
                            rowFieldCollection = getRowFieldCollection(row, header.get());

                            logger.debug("RowFieldCollection header contains: "
                                    + rowFieldCollection.getHeader().getNumberOfFields() + " fields");
                            logger.debug("RowFieldCollection contains: "
                                    + rowFieldCollection.getNumberOfFields() + " fields");

                            // run the ruleengine with the given data from the flow file
                            logger.debug("running business ruleengine...");

                            // run the business logic/rules against the data
                            ruleEngine.run("flowfile", rowFieldCollection);

                            // add some debugging output that might be useful
                            logger.debug("number of rulegroups: " + ruleEngine.getNumberOfGroups());
                            logger.debug(
                                    "number of rulegroups passed: " + ruleEngine.getNumberOfGroupsPassed());
                            logger.debug(
                                    "number of rulegroups failed: " + ruleEngine.getNumberOfGroupsFailed());
                            logger.debug(
                                    "number of rulegroups skipped: " + ruleEngine.getNumberOfGroupsSkipped());
                            logger.debug("number of rules: " + ruleEngine.getNumberOfRules());
                            logger.debug("number of rules passed: " + ruleEngine.getNumberOfRulesPassed());
                            logger.debug("number of rules failed: " + ruleEngine.getNumberOfRulesFailed());
                            logger.debug("number of actions: " + ruleEngine.getNumberOfActions());

                            // add some properties of the ruleengine execution to the map
                            addRuleEngineProperties(propertyMap);
                        } catch (Exception ex) {
                            error.set(true);
                            logger.error(ex.getMessage(), ex);
                        }

                        // if no error occurred we create a save the data for the creation of the flow files
                        if (!error.get()) {
                            // process only if the collection of fields was changed by
                            // a ruleengine action. this means the data was updated so
                            // we will have to re-write/re-create the flow file content.
                            if (rowFieldCollection.isCollectionUpdated()) {
                                // put an indicator that the data was modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "true");

                                logger.debug(
                                        "data was modified - updating flow file content with ruleengine results");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows
                                        .add(new RuleEngineRow(getResultRow(rowFieldCollection), propertyMap));
                            } else {
                                // put an indicator that the data was NOT modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "false");

                                logger.debug("data was not modified - using original content");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows.add(new RuleEngineRow(row, propertyMap));
                            }

                            if (flowFileRows.size() >= batchSize) {
                                // generate flow files from the individual rows
                                List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session,
                                        flowFileRows, header.get(), headerPresent);
                                // transfer all individual rows to success relationship
                                if (splitFlowFiles.size() > 0) {
                                    session.transfer(splitFlowFiles, SUCCESS);
                                }
                            }

                            // if the user configured detailed results 
                            if (context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS).getValue()
                                    .equals("true")) {
                                // get the configured output type
                                String outputType = context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS_TYPE)
                                        .getValue();
                                logger.debug("configuration set to output detailed results with type ["
                                        + outputType + "]");

                                // we need to create a flow file only, if the ruleengine results are according to the output type settings
                                if (outputType.equals(OUTPUT_TYPE_ALL_GROUPS_ALL_RULES)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_PASSED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0
                                                || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_PASSED_RULES)
                                                        && ruleEngine.getNumberOfGroupsPassed() > 0))) {
                                    // create the content for the flow file
                                    String content = getFlowFileRuleEngineDetailsContent(header.get(),
                                            headerPresent, outputType, row);

                                    // add results to the list
                                    flowFileDetails.add(new RuleEngineRow(content, propertyMap));

                                    if (flowFileDetails.size() >= batchSize) {
                                        List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(
                                                context, session, flowFileDetails, header.get(), headerPresent);
                                        // transfer all individual rows to detailed relationship
                                        if (detailsFlowFiles.size() > 0) {
                                            session.transfer(detailsFlowFiles, DETAILED_RESULTS);
                                        }
                                    }
                                }
                            }
                            // clear the collections of ruleengine results
                            ruleEngine.getRuleExecutionCollection().clear();
                        }
                        // if we have an error we create a flow file from the current row of data and send it to the failure relationsship
                        else {
                            FlowFile failureFlowFile = generateFailureFlowFile(context, session, row,
                                    header.get(), headerPresent);
                            session.transfer(failureFlowFile, FAILURE);
                        }
                    }
                }

                LineIterator.closeQuietly(iterator);
            } catch (Exception ex) {
                ex.printStackTrace();
                logger.error("error running the business ruleengine", ex);
            }
        }
    });

    // generate flow files from the individual rows
    List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(),
            headerPresent);

    // generate flow files from the individual rows
    List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(context, session, flowFileDetails,
            header.get(), headerPresent);

    // transfer the original flow file
    session.transfer(flowFile, ORIGINAL);

    // transfer all individual rows to success relationship
    if (splitFlowFiles.size() > 0) {
        session.transfer(splitFlowFiles, SUCCESS);
    }

    // transfer all individual rows to success relationship
    if (detailsFlowFiles.size() > 0) {
        session.transfer(detailsFlowFiles, DETAILED_RESULTS);
    }
}

From source file:com.wk.lodge.composite.web.tomcat.IntegrationCompositeTests.java

@Test
public void testStop() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicReference<Throwable> failure = new AtomicReference<>();

    URI uri = new URI("ws://localhost:" + port + "/composite");
    WebSocketStompClient stompClient = new WebSocketStompClient(uri, this.headers, sockJsClient);
    stompClient.setMessageConverter(new MappingJackson2MessageConverter());

    stompClient.connect(new StompMessageHandler() {

        private StompSession stompSession;

        @Override/* w w w.  ja  v  a2  s  .c om*/
        public void afterConnected(StompSession stompSession, StompHeaderAccessor headers) {
            this.stompSession = stompSession;
            String topicUuid = simulateJoinEvent();

            this.stompSession.subscribe("/user/queue/device", null);
            this.stompSession.subscribe(String.format("/topic/%s", topicUuid), null);

            try {
                HashMap<String, Object> stop = new HashMap<String, Object>();
                stop.put("type", "stop");
                this.stompSession.send(String.format("/app/%s", topicUuid), stop);

            } catch (Throwable t) {
                failure.set(t);
                latch.countDown();
            }
        }

        @Override
        public void handleMessage(Message<byte[]> message) {
            try {
                String json = parseMessageJson(message);
                new JsonPathExpectationsHelper("type").exists(json);
                new JsonPathExpectationsHelper("type").assertValue(json, "stop");
                new JsonPathExpectationsHelper("serverTime").exists(json);
            } catch (Throwable t) {
                failure.set(t);
            } finally {
                this.stompSession.disconnect();
                latch.countDown();
            }
        }

        @Override
        public void handleError(Message<byte[]> message) {
            StompHeaderAccessor accessor = StompHeaderAccessor.wrap(message);
            String error = "[Producer] " + accessor.getShortLogMessage(message.getPayload());
            logger.error(error);
            failure.set(new Exception(error));
        }

        @Override
        public void handleReceipt(String receiptId) {
        }

        @Override
        public void afterDisconnected() {
        }

    });

    if (!latch.await(10, TimeUnit.SECONDS)) {
        fail("Stop response not received");
    } else if (failure.get() != null) {
        throw new AssertionError("", failure.get());
    }

}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

private void createWithoutValidate(JsonObject data, RBatch redissonBatch,
        AsyncResultHandler<String> resultHandler) {
    AtomicReference<String> id = new AtomicReference<>();
    Async.waterfall().<String>task(this::newId).<String>task((i, t) -> {
        id.set(i);/*from   w w w  . j a  va  2  s. c o  m*/
        ensureUniqueAndIndexing(id.get(), data, true, rs -> {
            if (rs.succeeded() && rs.result() == null) {
                t.handle(Future.succeededFuture());
            } else if (rs.result() != null) {
                t.handle(Future.failedFuture(new RepositoryException(rs.result())));
            } else {
                t.handle(Future.failedFuture(rs.cause()));
            }
        });
    }).<Boolean>task((rs, t) -> {
        persist(id.get(), data.put("id", id.get()), redissonBatch, t);
    }).run(run -> {
        resultHandler
                .handle(run.succeeded() ? Future.succeededFuture(id.get()) : Future.failedFuture(run.cause()));
    });
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java

/**
 * Given a workspace, returns all pending changes for that workspace by
 * calling QueryPendingSets on the server.
 *///  ww  w . j a  v a2  s  . c om
@Override
public PendingChange[] queryServerPendingChanges(final Workspace workspace, final ItemSpec[] itemSpecs,
        final boolean generateDownloadUrls, final String[] itemPropertyFilters) {
    Failure[] failures;
    PendingSet[] pendingSets;

    if (workspace.getLocation().equals(WorkspaceLocation.LOCAL)) {
        final _Repository4Soap_QueryPendingSetsWithLocalWorkspacesResponse response;

        try {
            response = getRepository4().queryPendingSetsWithLocalWorkspaces(null, null, workspace.getName(),
                    workspace.getOwnerName(), (_ItemSpec[]) WrapperUtils.unwrap(_ItemSpec.class, itemSpecs),
                    generateDownloadUrls, null /*
                                                * TODO pass itemPropertyFilters, but servers <=
                                                * 2011-10-19 throw null ref exception if you do
                                                */);
        } catch (final ProxyException e) {
            throw VersionControlExceptionMapper.map(e);
        }

        pendingSets = (PendingSet[]) WrapperUtils.wrap(PendingSet.class,
                response.getQueryPendingSetsWithLocalWorkspacesResult());

        failures = (Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures());
    } else {
        final AtomicReference<Failure[]> failuresHolder = new AtomicReference<Failure[]>();

        pendingSets = super.queryPendingSets(null, null, workspace.getName(), workspace.getOwnerName(),
                itemSpecs, generateDownloadUrls, failuresHolder, false, null);

        failures = failuresHolder.get();
    }

    getVersionControlClient().reportFailures(workspace, failures);

    if (pendingSets.length == 0) {
        return new PendingChange[0];
    } else {
        return pendingSets[0].getPendingChanges();
    }
}

From source file:de.schildbach.pte.AbstractEfaProvider.java

protected NearbyLocationsResult xmlCoordRequest(final EnumSet<LocationType> types, final int lat, final int lon,
        final int maxDistance, final int maxStations) throws IOException {
    final HttpUrl.Builder url = coordEndpoint.newBuilder();
    appendXmlCoordRequestParameters(url, types, lat, lon, maxDistance, maxStations);
    final AtomicReference<NearbyLocationsResult> result = new AtomicReference<>();

    final HttpClient.Callback callback = new HttpClient.Callback() {
        @Override/*from   w ww . ja  v a 2  s .  c  o  m*/
        public void onSuccessful(final CharSequence bodyPeek, final ResponseBody body) throws IOException {
            try {
                final XmlPullParser pp = parserFactory.newPullParser();
                pp.setInput(body.byteStream(), null); // Read encoding from XML declaration
                final ResultHeader header = enterItdRequest(pp);

                XmlPullUtil.enter(pp, "itdCoordInfoRequest");

                XmlPullUtil.enter(pp, "itdCoordInfo");

                XmlPullUtil.enter(pp, "coordInfoRequest");
                XmlPullUtil.skipExit(pp, "coordInfoRequest");

                final List<Location> locations = new ArrayList<>();

                if (XmlPullUtil.optEnter(pp, "coordInfoItemList")) {
                    while (XmlPullUtil.test(pp, "coordInfoItem")) {
                        final String type = XmlPullUtil.attr(pp, "type");
                        final LocationType locationType;
                        if ("STOP".equals(type))
                            locationType = LocationType.STATION;
                        else if ("POI_POINT".equals(type))
                            locationType = LocationType.POI;
                        else
                            throw new IllegalStateException("unknown type: " + type);

                        String id = XmlPullUtil.optAttr(pp, "stateless", null);
                        if (id == null)
                            id = XmlPullUtil.attr(pp, "id");

                        final String name = normalizeLocationName(XmlPullUtil.optAttr(pp, "name", null));
                        final String place = normalizeLocationName(XmlPullUtil.optAttr(pp, "locality", null));

                        XmlPullUtil.enter(pp, "coordInfoItem");

                        // FIXME this is always only one coordinate
                        final List<Point> path = processItdPathCoordinates(pp);
                        final Point coord = path != null ? path.get(0) : null;

                        XmlPullUtil.skipExit(pp, "coordInfoItem");

                        if (name != null)
                            locations.add(new Location(locationType, id, coord, place, name));
                    }

                    XmlPullUtil.skipExit(pp, "coordInfoItemList");
                }

                result.set(new NearbyLocationsResult(header, locations));
            } catch (final XmlPullParserException x) {
                throw new ParserException("cannot parse xml: " + bodyPeek, x);
            }
        }
    };

    if (httpPost)
        httpClient.getInputStream(callback, url.build(), url.build().encodedQuery(),
                "application/x-www-form-urlencoded", httpReferer);
    else
        httpClient.getInputStream(callback, url.build(), httpReferer);

    return result.get();
}

From source file:com.networknt.client.oauth.OauthHelper.java

/**
 * De-reference a simple web token to JWT token from OAuth 2.0 provider. This is normally called from the light-router.
 *
 * @param derefRequest a DerefRequest object that is constructed from the client.yml file.
 * @param envTag an environment tag from the server.yml for cluster service lookup.
 * @return String of JWT token//ww w  .j  a  va  2 s . c  o  m
 * @throws ClientException when error occurs.
 */
public static String derefToken(DerefRequest derefRequest, String envTag) throws ClientException {
    final Http2Client client = Http2Client.getInstance();
    final CountDownLatch latch = new CountDownLatch(1);
    final ClientConnection connection;
    try {
        if (derefRequest.getServerUrl() != null) {
            connection = client.connect(new URI(derefRequest.getServerUrl()), Http2Client.WORKER,
                    Http2Client.SSL, Http2Client.BUFFER_POOL,
                    derefRequest.enableHttp2 ? OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)
                            : OptionMap.EMPTY)
                    .get();
        } else if (derefRequest.getServiceId() != null) {
            Cluster cluster = SingletonServiceFactory.getBean(Cluster.class);
            String url = cluster.serviceToUrl("https", derefRequest.getServiceId(), envTag, null);
            connection = client
                    .connect(new URI(url), Http2Client.WORKER, Http2Client.SSL, Http2Client.BUFFER_POOL,
                            derefRequest.enableHttp2 ? OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)
                                    : OptionMap.EMPTY)
                    .get();
        } else {
            // both server_url and serviceId are empty in the config.
            logger.error("Error: both server_url and serviceId are not configured in client.yml for "
                    + derefRequest.getClass());
            throw new ClientException("both server_url and serviceId are not configured in client.yml for "
                    + derefRequest.getClass());
        }
    } catch (Exception e) {
        logger.error("Exception: ", e);
        throw new ClientException(e);
    }
    final AtomicReference<ClientResponse> reference = new AtomicReference<>();
    try {
        ClientRequest request = new ClientRequest().setPath(derefRequest.getUri()).setMethod(Methods.GET);
        request.getRequestHeaders().put(Headers.AUTHORIZATION,
                getBasicAuthHeader(derefRequest.getClientId(), derefRequest.getClientSecret()));
        request.getRequestHeaders().put(Headers.HOST, "localhost");
        connection.sendRequest(request, client.createClientCallback(reference, latch));
        latch.await();
    } catch (Exception e) {
        logger.error("Exception: ", e);
        throw new ClientException(e);
    } finally {
        IoUtils.safeClose(connection);
    }
    return reference.get().getAttachment(Http2Client.RESPONSE_BODY);
}

From source file:org.elasticsearch.xpack.watcher.common.http.HttpClientTests.java

public void testThatHttpClientFailsOnNonHttpResponse() throws Exception {
    ExecutorService executor = Executors.newSingleThreadExecutor();
    AtomicReference<Exception> hasExceptionHappened = new AtomicReference();
    try (ServerSocket serverSocket = new MockServerSocket(0, 50, InetAddress.getByName("localhost"))) {
        executor.execute(() -> {/*from   w w w .  j av  a 2s  .co  m*/
            try (Socket socket = serverSocket.accept()) {
                BufferedReader in = new BufferedReader(
                        new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
                in.readLine();
                socket.getOutputStream().write("This is not a HTTP response".getBytes(StandardCharsets.UTF_8));
                socket.getOutputStream().flush();
            } catch (Exception e) {
                hasExceptionHappened.set(e);
                logger.error((Supplier<?>) () -> new ParameterizedMessage("Error in writing non HTTP response"),
                        e);
            }
        });
        HttpRequest request = HttpRequest.builder("localhost", serverSocket.getLocalPort()).path("/").build();
        expectThrows(ClientProtocolException.class, () -> httpClient.execute(request));
        assertThat("A server side exception occured, but shouldn't", hasExceptionHappened.get(),
                is(nullValue()));
    } finally {
        terminate(executor);
    }
}

From source file:com.wk.lodge.composite.web.tomcat.IntegrationCompositeTests.java

@Test
public void testUpdate() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicReference<Throwable> failure = new AtomicReference<>();

    URI uri = new URI("ws://localhost:" + port + "/composite");
    WebSocketStompClient stompClient = new WebSocketStompClient(uri, this.headers, sockJsClient);
    stompClient.setMessageConverter(new MappingJackson2MessageConverter());

    stompClient.connect(new StompMessageHandler() {

        private StompSession stompSession;

        @Override//w w w  . j av  a2 s  . c o  m
        public void afterConnected(StompSession stompSession, StompHeaderAccessor headers) {
            this.stompSession = stompSession;
            String topicUuid = simulateJoinEvent();

            this.stompSession.subscribe("/user/queue/device", null);
            this.stompSession.subscribe(String.format("/topic/%s", topicUuid), null);

            try {
                HashMap<String, Object> update = new HashMap<String, Object>();
                update.put("data", "TEST-UPDATE Data");
                update.put("type", "update");
                this.stompSession.send(String.format("/app/%s", topicUuid), update);
            } catch (Throwable t) {
                failure.set(t);
                latch.countDown();
            }

        }

        @Override
        public void handleMessage(Message<byte[]> message) throws MessagingException {
            try {
                String json = parseMessageJson(message);
                new JsonPathExpectationsHelper("type").exists(json);
                new JsonPathExpectationsHelper("type").assertValue(json, "update");
                new JsonPathExpectationsHelper("serverTime").exists(json);
                new JsonPathExpectationsHelper("data").assertValue(json, "TEST-UPDATE Data");
            } catch (Throwable t) {
                failure.set(t);
            } finally {
                this.stompSession.disconnect();
                latch.countDown();
            }
        }

        @Override
        public void handleError(Message<byte[]> message) {
            StompHeaderAccessor accessor = StompHeaderAccessor.wrap(message);
            String error = "[Producer] " + accessor.getShortLogMessage(message.getPayload());
            logger.error(error);
            failure.set(new Exception(error));
        }

        @Override
        public void handleReceipt(String receiptId) {
        }

        @Override
        public void afterDisconnected() {
        }

    });

    if (!latch.await(10, TimeUnit.SECONDS)) {
        fail("Update response not received");
    } else if (failure.get() != null) {
        throw new AssertionError("", failure.get());
    }
}