Example usage for com.fasterxml.jackson.databind ObjectMapper writeValueAsBytes

List of usage examples for com.fasterxml.jackson.databind ObjectMapper writeValueAsBytes

Introduction

In this page you can find the example usage for com.fasterxml.jackson.databind ObjectMapper writeValueAsBytes.

Prototype

@SuppressWarnings("resource")
public byte[] writeValueAsBytes(Object value) throws JsonProcessingException 

Source Link

Document

Method that can be used to serialize any Java value as a byte array.

Usage

From source file:com.netflix.suro.TestSuroServer.java

@Test
public void test() throws Exception {
    ObjectMapper jsonMapper = new DefaultObjectMapper();

    try {//from www. ja  v a2  s  .  co m
        // create the client
        final Properties clientProperties = new Properties();
        clientProperties.setProperty(ClientConfig.LB_TYPE, "static");
        clientProperties.setProperty(ClientConfig.LB_SERVER, "localhost:" + suroServer.getServerPort());
        clientProperties.setProperty(ClientConfig.CLIENT_TYPE, "sync");

        SuroClient client = new SuroClient(clientProperties);

        for (int i = 0; i < 10; ++i) {
            client.send(new Message("topic1", Integer.toString(i).getBytes()));
        }
        for (int i = 0; i < 5; ++i) {
            client.send(new Message("topic2", Integer.toString(i).getBytes()));
        }
        for (int i = 0; i < 20; ++i) {
            client.send(new Message("topic3", Integer.toString(i).getBytes()));
        }

        for (int i = 0; i < 30; ++i) {
            Map<String, Object> message = makeMessage("foo/bar", "value" + i);
            client.send(new Message("topic4", jsonMapper.writeValueAsBytes(message)));
        }

        int count = 10;
        while (!answer() && count > 0) {
            Thread.sleep(1000);
            --count;
        }

        assertTrue(answer());
        client.shutdown();

    } catch (Exception e) {
        System.err.println("SuroServer startup failed: " + e.getMessage());
        System.exit(-1);
    }
}

From source file:org.wso2.carbon.apimgt.impl.definitions.APIDefinitionUsingOASParser.java

/**
 * Creates a json string using the swagger object. 
 *
 * @param swaggerObj swagger object/*  w ww  .ja  va2  s  .  com*/
 * @return json string using the swagger object
 * @throws APIManagementException error while creating swagger json
 */
private String getSwaggerJsonString(Swagger swaggerObj) throws APIManagementException {
    ObjectMapper mapper = new ObjectMapper();
    mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
    mapper.enable(SerializationFeature.INDENT_OUTPUT);

    //this is to ignore "originalRef" in schema objects
    mapper.addMixIn(RefModel.class, IgnoreOriginalRefMixin.class);
    mapper.addMixIn(RefProperty.class, IgnoreOriginalRefMixin.class);
    mapper.addMixIn(RefPath.class, IgnoreOriginalRefMixin.class);
    mapper.addMixIn(RefParameter.class, IgnoreOriginalRefMixin.class);
    mapper.addMixIn(RefResponse.class, IgnoreOriginalRefMixin.class);

    //this is to ignore "responseSchema" in response schema objects
    mapper.addMixIn(Response.class, ResponseSchemaMixin.class);
    try {
        return new String(mapper.writeValueAsBytes(swaggerObj));
    } catch (JsonProcessingException e) {
        throw new APIManagementException("Error while generating Swagger json from model", e);
    }
}

From source file:gov.bnl.channelfinder.ChannelsResource.java

/**
 * PUT method for creating multiple channel instances.
 *
 * @param data XmlChannels data (from payload)
 * @return HTTP Response/*from   ww w  . j a  v a  2 s.c  om*/
 * @throws IOException when audit or log fail
 */
@PUT
@Consumes({ "application/json" })
public Response create(List<XmlChannel> data) throws IOException {
    Client client = getNewClient();
    UserManager um = UserManager.getInstance();
    um.setUser(securityContext.getUserPrincipal(), securityContext.isUserInRole("Administrator"));
    ObjectMapper mapper = new ObjectMapper();
    try {
        long start = System.currentTimeMillis();
        data = validateChannels(data, client);
        audit.info(um.getUserName() + "|" + uriInfo.getPath() + "|PUT|validation : "
                + (System.currentTimeMillis() - start));
        start = System.currentTimeMillis();
        BulkRequestBuilder bulkRequest = client.prepareBulk();
        for (XmlChannel channel : data) {
            bulkRequest.add(client.prepareUpdate("channelfinder", "channel", channel.getName())
                    .setDoc(mapper.writeValueAsBytes(channel))
                    .setUpsert(new IndexRequest("channelfinder", "channel", channel.getName())
                            .source(mapper.writeValueAsBytes(channel))));
        }
        String prepare = "|Prepare: " + (System.currentTimeMillis() - start) + "|";
        start = System.currentTimeMillis();
        bulkRequest.setRefresh(true);
        BulkResponse bulkResponse = bulkRequest.execute().actionGet();
        String execute = "|Execute: " + (System.currentTimeMillis() - start) + "|";
        start = System.currentTimeMillis();
        if (bulkResponse.hasFailures()) {
            audit.severe(bulkResponse.buildFailureMessage());
            throw new Exception();
        } else {
            Response r = Response.noContent().build();
            audit.info(um.getUserName() + "|" + uriInfo.getPath() + "|POST|OK|" + r.getStatus() + prepare
                    + execute + "|data=" + (data));
            return r;
        }
    } catch (IllegalArgumentException e) {
        return handleException(um.getUserName(), "PUT", Response.Status.BAD_REQUEST, e);
    } catch (Exception e) {
        return handleException(um.getUserName(), "PUT", Response.Status.INTERNAL_SERVER_ERROR, e);
    } finally {
        client.close();
    }
}

From source file:com.metamx.rdiclient.RdiClientImplTest.java

@Test
public void testFlushCountBasedFlushing() throws Exception {
    final ObjectMapper objectMapper = new ObjectMapper().registerModule(new JodaModule());
    final RdiClientImpl<byte[]> rdiClient = makeRdiClient(new PassthroughSerializer(), 1);

    mockClient.setGoHandler(new GoHandler() {
        @Override//from w w  w  . ja  v a  2s  .  c  o m
        protected <Intermediate, Final> ListenableFuture<Final> go(Request<Intermediate, Final> request)
                throws Exception {
            Assert.assertEquals(new URL(TARGET_URL), request.getUrl());
            Preconditions.checkArgument(request.getHandler() instanceof StatusResponseHandler,
                    "WTF?! Expected StatusResponseHandler.");
            return Futures.immediateFuture((Final) okResponse());
        }
    }.times(2));

    rdiClient.start();
    final List<ListenableFuture<RdiResponse>> futures = Lists.newArrayList();
    for (MmxAuctionSummary event : Arrays.asList(sampleEventBasic, sampleEventBasic)) {
        futures.add(rdiClient.send(objectMapper.writeValueAsBytes(event)));
    }
    final List<RdiResponse> responses = Futures.allAsList(futures).get();
    Assert.assertEquals(Lists.newArrayList(RdiResponse.create(), RdiResponse.create()), responses);
    rdiClient.close();
    Assert.assertTrue("mockClient succeeded", mockClient.succeeded());
}

From source file:com.basistech.rosette.dm.json.array.CompareJsons.java

private static void runWithFormat(AnnotatedText[] texts, FactoryFactory factoryFactory, String format)
        throws IOException {

    MetricRegistry metrics = new MetricRegistry();
    ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS).build();

    // 'normal' means 'classic json textual format', as opposed to array.
    ObjectMapper normalMapper = AnnotatedDataModelModule
            .setupObjectMapper(new ObjectMapper(factoryFactory.newFactory()));
    ObjectMapper arrayMapper = AnnotatedDataModelArrayModule
            .setupObjectMapper(new ObjectMapper(factoryFactory.newFactory()));

    // times with Metrics
    Timer normalSerialTime = metrics.timer(String.format("%s-normal-serial", format));
    Timer arraySerialTime = metrics.timer(String.format("%s-array-serial", format));
    Timer normalDeserialTime = metrics.timer(String.format("%s-normal-deserial", format));
    Timer arrayDeserialTime = metrics.timer(String.format("%s-array-deserial", format));

    CompressionStats normalSizeStats = new CompressionStats();
    CompressionStats normalGzipStats = new CompressionStats();
    CompressionStats normalSnappyStats = new CompressionStats();

    CompressionStats arraySizeStats = new CompressionStats();
    CompressionStats arrayGzipStats = new CompressionStats();
    CompressionStats arraySnappyStats = new CompressionStats();

    Timer normalGzipCompressTime = metrics.timer(String.format("%s-gzip-compress-normal", format));
    Timer normalGzipDecompressTime = metrics.timer(String.format("%s-gzip-decompress-normal", format));
    Timer normalSnappyCompressTime = metrics.timer(String.format("%s-snappy-compress-normal", format));
    Timer normalSnappyDecompressTime = metrics.timer(String.format("%s-snappy-decompress-normal", format));
    Timer arrayGzipCompressTime = metrics.timer(String.format("%s-gzip-compress-array", format));
    Timer arrayGzipDecompressTime = metrics.timer(String.format("%s-gzip-decompress-array", format));
    Timer arraySnappyCompressTime = metrics.timer(String.format("%s-snappy-compress-array", format));
    Timer arraySnappyDecompressTime = metrics.timer(String.format("%s-snappy-decompress-array", format));

    for (AnnotatedText text : texts) {
        // text and array time and size
        Timer.Context ctxt = normalSerialTime.time();
        byte[] textJson = normalMapper.writeValueAsBytes(text);
        ctxt.stop();//from   www  .j a  v a 2  s . c  om
        normalSizeStats.increment(textJson.length);

        // Time reading it back ...
        ctxt = normalDeserialTime.time();
        normalMapper.readValue(textJson, AnnotatedText.class);
        ctxt.stop();

        ctxt = arraySerialTime.time();
        byte[] arrayJson = arrayMapper.writeValueAsBytes(text);
        ctxt.stop();
        arraySizeStats.increment(arrayJson.length);

        // time reading it back
        ctxt = arrayDeserialTime.time();
        arrayMapper.readValue(arrayJson, AnnotatedText.class);
        ctxt.stop();

        // gzip time and space
        ctxt = normalGzipCompressTime.time();
        byte[] compressed = gzipCompress(textJson);
        int compLen = compressed.length;
        ctxt.stop();
        normalGzipStats.increment(compressionRatio(textJson.length, compLen));

        // decompression?
        ctxt = normalGzipDecompressTime.time();
        gzipDecompress(compressed);
        ctxt.stop();

        ctxt = arrayGzipCompressTime.time();
        compressed = gzipCompress(arrayJson);
        compLen = compressed.length;
        ctxt.stop();
        arrayGzipStats.increment(compressionRatio(arrayJson.length, compLen));

        // decompression?
        ctxt = arrayGzipDecompressTime.time();
        gzipDecompress(compressed);
        ctxt.stop();

        // snappy time and space
        ctxt = normalSnappyCompressTime.time();
        compressed = snappyCompress(textJson);
        compLen = compressed.length;
        ctxt.stop();
        normalSnappyStats.increment(compressionRatio(textJson.length, compLen));

        // decompression?
        ctxt = normalSnappyDecompressTime.time();
        snappyDecompress(compressed);
        ctxt.stop();

        ctxt = arraySnappyCompressTime.time();
        compressed = snappyCompress(arrayJson);
        compLen = compressed.length;
        ctxt.stop();
        arraySnappyStats.increment(compressionRatio(arrayJson.length, compLen));

        // decompression?
        ctxt = arraySnappyDecompressTime.time();
        snappyDecompress(compressed);
        ctxt.stop();
    }

    System.out.println("\nStatistics for " + format);
    System.out.println();
    System.out.format("Normal Size: mean %.2f stddev %.2f\n", normalSizeStats.getMean(),
            normalSizeStats.getStandardDeviation());
    System.out.format("Normal GZIP Compression ratio: mean %.2f stddev %.2f\n", normalGzipStats.getMean(),
            normalGzipStats.getStandardDeviation());
    System.out.format("Normal Snappy Compression ratio: mean %.2f stddev %.2f\n", normalSnappyStats.getMean(),
            normalSnappyStats.getStandardDeviation());

    System.out.format("Array Size: mean %.2f stddev %.2f\n", arraySizeStats.getMean(),
            arraySizeStats.getStandardDeviation());
    System.out.format("Array GZIP Compression ratio: mean %.2f stddev %.2f\n", arrayGzipStats.getMean(),
            arrayGzipStats.getStandardDeviation());
    System.out.format("Array Snappy Compression ratio: mean %.2f stddev %.2f\n", arraySnappyStats.getMean(),
            arraySnappyStats.getStandardDeviation());
    System.out.println();
    reporter.report();
    reporter.stop();

}

From source file:com.jivesoftware.os.amza.service.AmzaTestCluster.java

public AmzaNode newNode(final RingMember localRingMember, final RingHost localRingHost) throws Exception {

    AmzaNode service = cluster.get(localRingMember);
    if (service != null) {
        return service;
    }//from  w w  w . j  a  v a2 s.com

    AmzaServiceConfig config = new AmzaServiceConfig();
    config.workingDirectories = new String[] {
            workingDirctory.getAbsolutePath() + "/" + localRingHost.getHost() + "-" + localRingHost.getPort() };
    config.aquariumLivelinessFeedEveryMillis = 500;
    config.maxUpdatesBeforeDeltaStripeCompaction = 10;
    config.deltaStripeCompactionIntervalInMillis = 1000;
    config.flushHighwatersAfterNUpdates = 10;

    config.initialBufferSegmentSize = 1_024;
    config.maxBufferSegmentSize = 10 * 1_024;

    config.updatesBetweenLeaps = 10;
    config.useMemMap = true;

    SnowflakeIdPacker idPacker = new SnowflakeIdPacker();
    OrderIdProviderImpl orderIdProvider = new OrderIdProviderImpl(
            new ConstantWriterIdProvider(localRingHost.getPort()), idPacker, new JiveEpochTimestampProvider());

    AvailableRowsTaker availableRowsTaker = (localRingMember1, localTimestampedRingHost, remoteRingMember,
            remoteRingHost, system, takeSessionId, takeSharedKey, timeoutMillis, updatedPartitionsStream,
            pingStream) -> {

        AmzaNode amzaNode = cluster.get(remoteRingMember);
        if (amzaNode == null) {
            throw new IllegalStateException("Service doesn't exist for " + remoteRingMember);
        } else {
            amzaNode.takePartitionUpdates(localRingMember1, localTimestampedRingHost, system, takeSessionId,
                    takeSharedKey, timeoutMillis, (versionedPartitionName, txId) -> {
                        if (versionedPartitionName != null) {
                            updatedPartitionsStream.available(versionedPartitionName, txId);
                        }
                    }, () -> {
                        LOG.debug("Special delivery! Special delivery!");
                        return null;
                    }, () -> {
                        LOG.debug("Ping pong!");
                        return null;
                    });
        }
    };

    RowsTaker updateTaker = new RowsTaker() {

        @Override
        public RowsTaker.StreamingRowsResult rowsStream(RingMember localRingMember, RingMember remoteRingMember,
                RingHost remoteRingHost, VersionedPartitionName remoteVersionedPartitionName,
                long takeSessionId, long takeSharedKey, long remoteTxId, long localLeadershipToken, long limit,
                RowStream rowStream) {

            AmzaNode amzaNode = cluster.get(remoteRingMember);
            if (amzaNode == null) {
                throw new IllegalStateException("Service doesn't exist for " + localRingMember);
            } else {
                StreamingTakesConsumer.StreamingTakeConsumed consumed = amzaNode.rowsStream(localRingMember,
                        remoteVersionedPartitionName, takeSessionId, takeSharedKey, remoteTxId,
                        localLeadershipToken, limit, rowStream);
                HashMap<RingMember, Long> otherHighwaterMarks = consumed.isOnline ? new HashMap<>() : null;
                return new StreamingRowsResult(null, null, consumed.leadershipToken, consumed.partitionVersion,
                        otherHighwaterMarks);
            }
        }

        @Override
        public boolean rowsTaken(RingMember localRingMember, RingMember remoteRingMember,
                RingHost remoteRingHost, long takeSessionId, long takeSharedKey,
                VersionedPartitionName remoteVersionedPartitionName, long remoteTxId,
                long localLeadershipToken) {
            AmzaNode amzaNode = cluster.get(remoteRingMember);
            if (amzaNode == null) {
                throw new IllegalStateException("Service doesn't exists for " + localRingMember);
            } else {
                try {
                    amzaNode.remoteMemberTookToTxId(localRingMember, takeSessionId, takeSharedKey,
                            remoteVersionedPartitionName, remoteTxId, localLeadershipToken);
                    return true;
                } catch (Exception x) {
                    throw new RuntimeException("Issue while applying acks.", x);
                }
            }
        }

        @Override
        public boolean pong(RingMember localRingMember, RingMember remoteRingMember, RingHost remoteRingHost,
                long takeSessionId, long takeSharedKey) {
            AmzaNode amzaNode = cluster.get(remoteRingMember);
            if (amzaNode == null) {
                throw new IllegalStateException("Service doesn't exists for " + localRingMember);
            } else {
                try {
                    amzaNode.remoteMemberPong(localRingMember, takeSessionId, takeSharedKey);
                    return true;
                } catch (Exception x) {
                    throw new RuntimeException("Issue while replying to pings.", x);
                }
            }
        }

        @Override
        public boolean invalidate(RingMember localRingMember, RingMember remoteRingMember,
                RingHost remoteRingHost, long takeSessionId, long takeSharedKey,
                VersionedPartitionName remoteVersionedPartitionName) {
            return true;
        }
    };

    final ObjectMapper mapper = new ObjectMapper();
    PartitionPropertyMarshaller partitionPropertyMarshaller = new PartitionPropertyMarshaller() {

        @Override
        public PartitionProperties fromBytes(byte[] bytes) {
            try {
                return mapper.readValue(bytes, PartitionProperties.class);
            } catch (IOException ex) {
                throw new RuntimeException(ex);
            }
        }

        @Override
        public byte[] toBytes(PartitionProperties partitionProperties) {
            try {
                return mapper.writeValueAsBytes(partitionProperties);
            } catch (JsonProcessingException ex) {
                throw new RuntimeException(ex);
            }
        }
    };

    AmzaInterner amzaInterner = new AmzaInterner();
    AmzaStats amzaSystemStats = new AmzaStats();
    AmzaStats amzaStats = new AmzaStats();
    SickThreads sickThreads = new SickThreads();
    SickPartitions sickPartitions = new SickPartitions();
    Optional<TakeFailureListener> absent = Optional.<TakeFailureListener>absent();

    BinaryPrimaryRowMarshaller primaryRowMarshaller = new BinaryPrimaryRowMarshaller(); // hehe you cant change this :)
    BinaryHighwaterRowMarshaller highwaterRowMarshaller = new BinaryHighwaterRowMarshaller(amzaInterner);
    AquariumStats aquariumStats = new AquariumStats();

    AmzaService amzaService = new AmzaServiceInitializer().initialize(config, amzaInterner, aquariumStats,
            amzaSystemStats, amzaStats,
            new HealthTimer(CountersAndTimers.getOrCreate("test"), "test", new NoOpHealthChecker<>("test")),
            cluster::size, sickThreads, sickPartitions, primaryRowMarshaller, highwaterRowMarshaller,
            localRingMember, localRingHost, Collections.emptySet(), orderIdProvider, idPacker,
            partitionPropertyMarshaller, (workingIndexDirectories, indexProviderRegistry,
                    ephemeralRowIOProvider, persistentRowIOProvider, partitionStripeFunction) -> {

                indexProviderRegistry
                        .register(
                                new BerkeleyDBWALIndexProvider(BerkeleyDBWALIndexProvider.INDEX_CLASS_NAME,
                                        partitionStripeFunction, workingIndexDirectories),
                                persistentRowIOProvider);

                LABPointerIndexConfig labConfig = BindInterfaceToConfiguration
                        .bindDefault(LABPointerIndexConfig.class);

                indexProviderRegistry.register(new LABPointerIndexWALIndexProvider(amzaInterner, labConfig,
                        Executors.newCachedThreadPool(), Executors.newCachedThreadPool(),
                        Executors.newCachedThreadPool(), Executors.newCachedThreadPool(),
                        LABPointerIndexWALIndexProvider.INDEX_CLASS_NAME, partitionStripeFunction,
                        workingIndexDirectories), persistentRowIOProvider);

            }, availableRowsTaker, () -> updateTaker, () -> updateTaker, absent, (changes) -> {
            }, (threadCount, name) -> {
                return Executors.newCachedThreadPool();
            });

    amzaService.start(localRingMember, localRingHost);

    try {
        //amzaService.getRingWriter().addRingMember(AmzaRingReader.SYSTEM_RING, localRingMember); // ?? Hacky
        TimestampedRingHost timestampedRingHost = amzaService.getRingReader().getRingHost();
        amzaService.getRingWriter().addRingMember("test".getBytes(), localRingMember); // ?? Hacky
        if (lastAmzaService != null) {
            TimestampedRingHost lastTimestampedRingHost = lastAmzaService.getRingReader().getRingHost();
            amzaService.getRingWriter().register(lastAmzaService.getRingReader().getRingMember(),
                    lastTimestampedRingHost.ringHost, lastTimestampedRingHost.timestampId, false);
            amzaService.getRingWriter().addRingMember("test".getBytes(),
                    lastAmzaService.getRingReader().getRingMember()); // ?? Hacky

            lastAmzaService.getRingWriter().register(localRingMember, localRingHost,
                    timestampedRingHost.timestampId, false);
            lastAmzaService.getRingWriter().addRingMember("test".getBytes(), localRingMember); // ?? Hacky
        }
        lastAmzaService = amzaService;
    } catch (Exception x) {
        x.printStackTrace();
        System.out.println("FAILED CONNECTING RING");
        System.exit(1);
    }

    service = new AmzaNode(amzaInterner, localRingMember, localRingHost, amzaService, orderIdProvider,
            sickThreads, sickPartitions);

    cluster.put(localRingMember, service);

    System.out.println("Added serviceHost:" + localRingMember + " to the cluster.");
    return service;
}

From source file:io.druid.server.AsyncQueryForwardingServlet.java

@Override
protected void service(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    final boolean isSmile = SmileMediaTypes.APPLICATION_JACKSON_SMILE.equals(request.getContentType())
            || APPLICATION_SMILE.equals(request.getContentType());
    final ObjectMapper objectMapper = isSmile ? smileMapper : jsonMapper;
    request.setAttribute(OBJECTMAPPER_ATTRIBUTE, objectMapper);

    final String requestURI = request.getRequestURI();
    final String method = request.getMethod();
    final Server targetServer;

    // The Router does not have the ability to look inside SQL queries and route them intelligently, so just treat
    // them as a generic request.
    final boolean isQueryEndpoint = requestURI.startsWith("/druid/v2")
            && !requestURI.startsWith("/druid/v2/sql");

    final boolean isAvatica = requestURI.startsWith("/druid/v2/sql/avatica");

    if (isAvatica) {
        Map<String, Object> requestMap = objectMapper.readValue(request.getInputStream(),
                JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT);
        String connectionId = getAvaticaConnectionId(requestMap);
        targetServer = hostFinder.findServerAvatica(connectionId);
        byte[] requestBytes = objectMapper.writeValueAsBytes(requestMap);
        request.setAttribute(AVATICA_QUERY_ATTRIBUTE, requestBytes);
    } else if (isQueryEndpoint && HttpMethod.DELETE.is(method)) {
        // query cancellation request
        targetServer = hostFinder.pickDefaultServer();

        for (final Server server : hostFinder.getAllServers()) {
            // send query cancellation to all brokers this query may have gone to
            // to keep the code simple, the proxy servlet will also send a request to the default targetServer.
            if (!server.getHost().equals(targetServer.getHost())) {
                // issue async requests
                Response.CompleteListener completeListener = result -> {
                    if (result.isFailed()) {
                        log.warn(result.getFailure(), "Failed to forward cancellation request to [%s]",
                                server.getHost());
                    }/*from  www. j a  v a2s  . c om*/
                };

                Request broadcastReq = broadcastClient
                        .newRequest(rewriteURI(request, server.getScheme(), server.getHost()))
                        .method(HttpMethod.DELETE).timeout(CANCELLATION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);

                copyRequestHeaders(request, broadcastReq);
                broadcastReq.send(completeListener);
            }
            interruptedQueryCount.incrementAndGet();
        }
    } else if (isQueryEndpoint && HttpMethod.POST.is(method)) {
        // query request
        try {
            Query inputQuery = objectMapper.readValue(request.getInputStream(), Query.class);
            if (inputQuery != null) {
                targetServer = hostFinder.pickServer(inputQuery);
                if (inputQuery.getId() == null) {
                    inputQuery = inputQuery.withId(UUID.randomUUID().toString());
                }
            } else {
                targetServer = hostFinder.pickDefaultServer();
            }
            request.setAttribute(QUERY_ATTRIBUTE, inputQuery);
        } catch (IOException e) {
            log.warn(e, "Exception parsing query");
            final String errorMessage = e.getMessage() == null ? "no error message" : e.getMessage();
            requestLogger
                    .log(new RequestLogLine(DateTimes.nowUtc(), request.getRemoteAddr(), null, new QueryStats(
                            ImmutableMap.<String, Object>of("success", false, "exception", errorMessage))));
            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
            response.setContentType(MediaType.APPLICATION_JSON);
            objectMapper.writeValue(response.getOutputStream(), ImmutableMap.of("error", errorMessage));

            return;
        } catch (Exception e) {
            handleException(response, objectMapper, e);
            return;
        }
    } else {
        targetServer = hostFinder.pickDefaultServer();
    }

    request.setAttribute(HOST_ATTRIBUTE, targetServer.getHost());
    request.setAttribute(SCHEME_ATTRIBUTE, targetServer.getScheme());

    doService(request, response);
}

From source file:org.commonjava.maven.galley.transport.htcli.UploadMetadataGenTransferDecorator.java

private void writeMetadata(final Transfer target, final ObjectMapper mapper,
        final Map<String, List<String>> requestHeaders) {
    Transfer metaTxfr = target.getSiblingMeta(HttpExchangeMetadata.FILE_EXTENSION);
    if (metaTxfr == null) {
        if (target.isDirectory()) {
            logger.trace("DIRECTORY. Using HTTP exchange metadata file INSIDE directory called: {}",
                    HttpExchangeMetadata.FILE_EXTENSION);
            metaTxfr = target.getChild(HttpExchangeMetadata.FILE_EXTENSION);
        } else {//  w  w  w  .java 2  s.c o  m
            logger.trace("SKIP: Cannot retrieve HTTP exchange metadata Transfer instance for: {}", target);
            return;
        }
    }

    final HttpExchangeMetadata metadata = new HttpExchangeMetadataFromRequestHeader(requestHeaders);
    OutputStream out = null;
    try {
        final Transfer finalMeta = metaTxfr;
        out = metaTxfr.openOutputStream(TransferOperation.GENERATE, false);
        logger.trace("Writing HTTP exchange metadata:\n\n{}\n\n", new Object() {
            @Override
            public String toString() {
                try {
                    return mapper.writeValueAsString(metadata);
                } catch (final JsonProcessingException e) {
                    logger.warn(String.format("Failed to write HTTP exchange metadata: %s. Reason: %s",
                            finalMeta, e.getMessage()), e);
                }

                return "ERROR RENDERING METADATA";
            }
        });

        out.write(mapper.writeValueAsBytes(metadata));
    } catch (final IOException e) {
        if (logger.isTraceEnabled()) {
            logger.trace(String.format("Failed to write metadata for HTTP exchange to: %s. Reason: %s",
                    metaTxfr, e.getMessage()), e);
        } else {
            logger.warn("Failed to write metadata for HTTP exchange to: {}. Reason: {}", metaTxfr,
                    e.getMessage());
        }
    } finally {
        IOUtils.closeQuietly(out);
    }
}

From source file:gov.bnl.channelfinder.ChannelsResource.java

private Response renameChannel(UserManager um, Client client, String chan, XmlChannel data) {
    GetResponse response = client.prepareGet("channelfinder", "channel", chan).execute().actionGet();
    if (!response.isExists()) {
        handleException(um.getUserName(), "POST", Response.Status.NOT_FOUND,
                "Specified channel '" + chan + "' does not exist");
    }/*w  w w. j  av a 2 s .  c om*/
    ObjectMapper mapper = new ObjectMapper();
    try {
        XmlChannel originalChannel = mapper.readValue(response.getSourceAsBytes(), XmlChannel.class);
        originalChannel.setName(data.getName());
        Collection<String> propNames = ChannelUtil.getPropertyNames(data);
        data.getProperties().addAll(originalChannel.getProperties().stream().filter(p -> {
            return !propNames.contains(p.getName());
        }).collect(Collectors.toList()));
        originalChannel.setProperties(data.getProperties());
        Collection<String> tagNames = ChannelUtil.getTagNames(data);
        data.getTags().addAll(originalChannel.getTags().stream().filter(t -> {
            return !tagNames.contains(t.getName());
        }).collect(Collectors.toList()));
        originalChannel.setTags(data.getTags());
        BulkRequestBuilder bulkRequest = client.prepareBulk();
        bulkRequest.add(new DeleteRequest("channelfinder", "channel", chan));
        IndexRequest indexRequest = new IndexRequest("channelfinder", "channel", originalChannel.getName())
                .source(mapper.writeValueAsBytes(originalChannel));
        bulkRequest.add(indexRequest);
        bulkRequest.setRefresh(true);
        BulkResponse bulkResponse = bulkRequest.execute().actionGet();
        if (bulkResponse.hasFailures()) {
            audit.severe(bulkResponse.buildFailureMessage());
            if (bulkResponse.buildFailureMessage().contains("DocumentMissingException")) {
                return handleException(um.getUserName(), "POST", Response.Status.NOT_FOUND,
                        bulkResponse.buildFailureMessage());
            } else {
                return handleException(um.getUserName(), "POST", Response.Status.INTERNAL_SERVER_ERROR,
                        bulkResponse.buildFailureMessage());
            }
        } else {
            Response r = Response.ok(originalChannel).build();
            audit.info(um.getUserName() + "|" + uriInfo.getPath() + "|POST|OK|" + r.getStatus() + "|data=");
            return r;
        }
    } catch (IOException e) {
        return handleException(um.getUserName(), "POST", Response.Status.INTERNAL_SERVER_ERROR, e);
    }
}

From source file:com.nesscomputing.httpclient.testing.JaxRsResponseHttpResponseGenerator.java

/**
 * Create a new response generator//from ww  w .  ja v  a2  s.  c  o m
 * @param mapper Jackson mapper for converting entities to JSON, may be null.  Attempts to
 * serialize to JSON with a null mapper will fail spectacularly.
 * @param response The response to send back on fetches.
 */
public JaxRsResponseHttpResponseGenerator(@Nullable ObjectMapper mapper, Response response) {
    this.response = response;
    Object entity = response.getEntity();
    charset = Charsets.UTF_8.name();
    MediaType contentTypeIn = null;
    List<Object> typeHeaders = response.getMetadata().get("Content-Type");
    if (typeHeaders == null) { // Damn you to hell, people who reinvent Multimap poorly!
        typeHeaders = Collections.emptyList();
    }
    Preconditions.checkArgument(typeHeaders.size() <= 1, "multiple Content-Type headers?!");
    final byte[] encoded;
    if (!typeHeaders.isEmpty()) {
        contentTypeIn = MediaType.valueOf(typeHeaders.get(0).toString());
    }
    if (entity == null) {
        if (contentTypeIn == null) {
            contentTypeIn = MediaType.TEXT_PLAIN_TYPE;
        }
        encoded = new byte[0];
    } else if (entity instanceof String) {
        if (contentTypeIn == null) {
            contentTypeIn = MediaType.TEXT_PLAIN_TYPE;
        }
        encoded = ((String) entity).getBytes(Charsets.UTF_8);
    } else if (entity instanceof byte[]) {
        if (contentTypeIn == null) {
            contentTypeIn = MediaType.APPLICATION_OCTET_STREAM_TYPE;
        }
        encoded = (byte[]) entity;
    } else {
        Preconditions.checkNotNull(mapper, "Mapper null and unknown type " + entity.getClass() + " provided");
        if (contentTypeIn == null) {
            contentTypeIn = MediaType.APPLICATION_JSON_TYPE;
        }
        try {
            encoded = mapper.writeValueAsBytes(entity);
        } catch (IOException e) {
            throw new IllegalStateException(e);
        }
    }
    responseBody = new ByteArrayInputStream(encoded);
    contentLength = encoded.length;
    this.contentType = contentTypeIn;
}