Example usage for com.fasterxml.jackson.core JsonProcessingException getMessage

List of usage examples for com.fasterxml.jackson.core JsonProcessingException getMessage

Introduction

In this page you can find the example usage for com.fasterxml.jackson.core JsonProcessingException getMessage.

Prototype

@Override
public String getMessage() 

Source Link

Document

Default method overridden so that we can add location information

Usage

From source file:com.dlmu.bat.client.receiver.LocalFileSpanReceiver.java

@Override
public void receiveSpan(BaseSpan span) {
    TimerContext context = Metrics.newTimer("receiveSpanTimer", Collections.<String, String>emptyMap()).time();
    try {//from   w w w  .ja  v  a2s .c om
        // Serialize the span data into a byte[].  Note that we're not holding the lock here, to improve concurrency.
        byte jsonBuf[] = null;
        try {
            jsonBuf = JSON_WRITER.writeValueAsBytes(span);
        } catch (JsonProcessingException e) {
            logger.error("receiveSpan(path=" + path + ", span=" + span + "): Json processing error: "
                    + e.getMessage());
            return;
        }

        // Grab the bufferLock and put our jsonBuf into the list of buffers to flush.
        byte toFlush[][] = null;
        bufferLock.lock();
        try {
            if (bufferedSpans == null) {
                logger.debug("receiveSpan(path=" + path + ", span=" + span + "): LocalFileSpanReceiver for "
                        + path + " is closed.");
                return;
            }
            bufferedSpans[bufferedSpansIndex] = jsonBuf;
            bufferedSpansIndex++;
            if (bufferedSpansIndex == bufferedSpans.length) {
                // If we've hit the limit for the number of buffers to flush,
                // swap out the existing bufferedSpans array for a new array, and
                // prepare to flush those spans to disk.
                toFlush = bufferedSpans;
                bufferedSpansIndex = 0;
                bufferedSpans = new byte[bufferedSpans.length][];
            }
        } finally {
            bufferLock.unlock();
        }
        if (toFlush != null) {
            // We released the bufferLock above, to avoid blocking concurrent
            // receiveSpan calls.  But now, we must take the channelLock, to make
            // sure that we have sole access to the output channel.  If we did not do
            // this, we might get interleaved output.
            //
            // There is a small chance that another thread doing a flush of more
            // recent spans could get ahead of us here, and take the lock before we
            // do.  This is ok, since spans don't have to be written out in order.
            channelLock.lock();
            try {
                doFlush(toFlush, toFlush.length);
            } catch (IOException ioe) {
                logger.error("Error flushing buffers to " + path + ": " + ioe.getMessage());
            } finally {
                channelLock.unlock();
            }
        }
    } finally {
        context.stop();
    }
}

From source file:org.wikidata.wdtk.dumpfiles.JsonDumpFileProcessor.java

/**
 * Process dump file data from the given input stream. This method uses the
 * efficient Jackson {@link MappingIterator}. However, this class cannot
 * recover from processing errors. If an error occurs in one entity, the
 * (presumably) less efficient processing method
 * {@link #processDumpFileContentsRecovery(InputStream)} is used instead.
 *
 * @see MwDumpFileProcessor#processDumpFileContents(InputStream, MwDumpFile)
 *///from  ww  w  .  j a v  a2  s. c om
@Override
public void processDumpFileContents(InputStream inputStream, MwDumpFile dumpFile) {

    logger.info("Processing JSON dump file " + dumpFile.toString());

    try {
        try {
            MappingIterator<JacksonTermedStatementDocument> documentIterator = documentReader
                    .readValues(inputStream);
            documentIterator.getParser().disable(Feature.AUTO_CLOSE_SOURCE);

            while (documentIterator.hasNextValue()) {
                JacksonTermedStatementDocument document = documentIterator.nextValue();
                handleDocument(document);
            }
            documentIterator.close();
        } catch (JsonProcessingException e) {
            logJsonProcessingException(e);
            processDumpFileContentsRecovery(inputStream);
        }
    } catch (IOException e) {
        throw new RuntimeException("Cannot read JSON input: " + e.getMessage(), e);
    }

}

From source file:org.apache.htrace.core.LocalFileSpanReceiver.java

@Override
public void receiveSpan(Span span) {
    // Serialize the span data into a byte[].  Note that we're not holding the
    // lock here, to improve concurrency.
    byte jsonBuf[] = null;
    try {//from   w ww.  j  av  a  2 s  . co  m
        jsonBuf = JSON_WRITER.writeValueAsBytes(span);
    } catch (JsonProcessingException e) {
        LOG.error("receiveSpan(path=" + path + ", span=" + span + "): " + "Json processing error: "
                + e.getMessage());
        return;
    }

    // Grab the bufferLock and put our jsonBuf into the list of buffers to
    // flush. 
    byte toFlush[][] = null;
    bufferLock.lock();
    try {
        if (bufferedSpans == null) {
            LOG.debug("receiveSpan(path=" + path + ", span=" + span + "): " + "LocalFileSpanReceiver for "
                    + path + " is closed.");
            return;
        }
        bufferedSpans[bufferedSpansIndex] = jsonBuf;
        bufferedSpansIndex++;
        if (bufferedSpansIndex == bufferedSpans.length) {
            // If we've hit the limit for the number of buffers to flush, 
            // swap out the existing bufferedSpans array for a new array, and
            // prepare to flush those spans to disk.
            toFlush = bufferedSpans;
            bufferedSpansIndex = 0;
            bufferedSpans = new byte[bufferedSpans.length][];
        }
    } finally {
        bufferLock.unlock();
    }
    if (toFlush != null) {
        // We released the bufferLock above, to avoid blocking concurrent
        // receiveSpan calls.  But now, we must take the channelLock, to make
        // sure that we have sole access to the output channel.  If we did not do
        // this, we might get interleaved output.
        //
        // There is a small chance that another thread doing a flush of more
        // recent spans could get ahead of us here, and take the lock before we
        // do.  This is ok, since spans don't have to be written out in order.
        channelLock.lock();
        try {
            doFlush(toFlush, toFlush.length);
        } catch (IOException ioe) {
            LOG.error("Error flushing buffers to " + path + ": " + ioe.getMessage());
        } finally {
            channelLock.unlock();
        }
    }
}

From source file:org.venice.piazza.servicecontroller.taskmanaged.ServiceTaskManager.java

/**
 * Adds a Job to the Service's queue.//from   w  w w .ja v a  2 s  . co m
 * 
 * @param job
 *            The Job to be executed. This information contains the serviceId, which is used to lookup the
 *            appropriate Service Queue.
 */
public void addJobToQueue(ExecuteServiceJob job) {
    // Add the Job to the Jobs queue
    ServiceJob serviceJob = new ServiceJob(job.getJobId(), job.getData().getServiceId());
    mongoAccessor.addJobToServiceQueue(job.getData().getServiceId(), serviceJob);
    // Update the Job Status as Pending to Kafka
    StatusUpdate statusUpdate = new StatusUpdate();
    statusUpdate.setStatus(StatusUpdate.STATUS_PENDING);
    ProducerRecord<String, String> statusUpdateRecord;
    try {
        statusUpdateRecord = new ProducerRecord<String, String>(
                String.format("%s-%s", JobMessageFactory.UPDATE_JOB_TOPIC_NAME, SPACE), job.getJobId(),
                objectMapper.writeValueAsString(statusUpdate));
        producer.send(statusUpdateRecord);
    } catch (JsonProcessingException exception) {
        String error = "Error Sending Pending Job Status to Job Manager: " + exception.getMessage();
        LOGGER.error(error, exception);
        piazzaLogger.log(error, Severity.ERROR);
    }
}

From source file:org.apache.pulsar.functions.worker.WorkerService.java

public void start(URI dlogUri) throws InterruptedException {
    log.info("Starting worker {}...", workerConfig.getWorkerId());

    this.brokerAdmin = Utils.getPulsarAdminClient(workerConfig.getPulsarWebServiceUrl(),
            workerConfig.getClientAuthenticationPlugin(), workerConfig.getClientAuthenticationParameters(),
            workerConfig.getTlsTrustCertsFilePath(), workerConfig.isTlsAllowInsecureConnection());

    final String functionWebServiceUrl = StringUtils.isNotBlank(workerConfig.getFunctionWebServiceUrl())
            ? workerConfig.getFunctionWebServiceUrl()
            : workerConfig.getWorkerWebAddress();
    this.functionAdmin = Utils.getPulsarAdminClient(functionWebServiceUrl,
            workerConfig.getClientAuthenticationPlugin(), workerConfig.getClientAuthenticationParameters(),
            workerConfig.getTlsTrustCertsFilePath(), workerConfig.isTlsAllowInsecureConnection());

    try {// ww  w  . j  a  v a  2  s .  c  o m
        log.info("Worker Configs: {}",
                new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(workerConfig));
    } catch (JsonProcessingException e) {
        log.warn("Failed to print worker configs with error {}", e.getMessage(), e);
    }

    // create the dlog namespace for storing function packages
    this.dlogUri = dlogUri;
    DistributedLogConfiguration dlogConf = Utils.getDlogConf(workerConfig);
    try {
        this.dlogNamespace = NamespaceBuilder.newBuilder().conf(dlogConf)
                .clientId("function-worker-" + workerConfig.getWorkerId()).uri(this.dlogUri).build();
    } catch (Exception e) {
        log.error("Failed to initialize dlog namespace {} for storing function packages", dlogUri, e);
        throw new RuntimeException(e);
    }

    // create the state storage client for accessing function state
    if (workerConfig.getStateStorageServiceUrl() != null) {
        StorageClientSettings clientSettings = StorageClientSettings.newBuilder()
                .serviceUri(workerConfig.getStateStorageServiceUrl()).build();
        this.stateStoreAdminClient = StorageClientBuilder.newBuilder().withSettings(clientSettings)
                .buildAdmin();
    }

    // initialize the function metadata manager
    try {

        ClientBuilder clientBuilder = PulsarClient.builder()
                .serviceUrl(this.workerConfig.getPulsarServiceUrl());
        if (isNotBlank(workerConfig.getClientAuthenticationPlugin())
                && isNotBlank(workerConfig.getClientAuthenticationParameters())) {
            clientBuilder.authentication(workerConfig.getClientAuthenticationPlugin(),
                    workerConfig.getClientAuthenticationParameters());
        }
        clientBuilder.enableTls(workerConfig.isUseTls());
        clientBuilder.allowTlsInsecureConnection(workerConfig.isTlsAllowInsecureConnection());
        clientBuilder.tlsTrustCertsFilePath(workerConfig.getTlsTrustCertsFilePath());
        clientBuilder.enableTlsHostnameVerification(workerConfig.isTlsHostnameVerificationEnable());
        this.client = clientBuilder.build();
        log.info("Created Pulsar client");

        //create scheduler manager
        this.schedulerManager = new SchedulerManager(this.workerConfig, this.client, this.brokerAdmin,
                this.executor);

        //create function meta data manager
        this.functionMetaDataManager = new FunctionMetaDataManager(this.workerConfig, this.schedulerManager,
                this.client);

        this.connectorsManager = new ConnectorsManager(workerConfig);

        //create membership manager
        this.membershipManager = new MembershipManager(this, this.client);

        // create function runtime manager
        this.functionRuntimeManager = new FunctionRuntimeManager(this.workerConfig, this, this.dlogNamespace,
                this.membershipManager, connectorsManager, functionMetaDataManager);

        // Setting references to managers in scheduler
        this.schedulerManager.setFunctionMetaDataManager(this.functionMetaDataManager);
        this.schedulerManager.setFunctionRuntimeManager(this.functionRuntimeManager);
        this.schedulerManager.setMembershipManager(this.membershipManager);

        // initialize function metadata manager
        this.functionMetaDataManager.initialize();

        // initialize function runtime manager
        this.functionRuntimeManager.initialize();

        authenticationService = new AuthenticationService(PulsarConfigurationLoader.convertFrom(workerConfig));

        // Starting cluster services
        log.info("Start cluster services...");
        this.clusterServiceCoordinator = new ClusterServiceCoordinator(this.workerConfig.getWorkerId(),
                membershipManager);

        this.clusterServiceCoordinator.addTask("membership-monitor", this.workerConfig.getFailureCheckFreqMs(),
                () -> membershipManager.checkFailures(functionMetaDataManager, functionRuntimeManager,
                        schedulerManager));

        this.clusterServiceCoordinator.start();

        // Start function runtime manager
        this.functionRuntimeManager.start();

        // indicate function worker service is done intializing
        this.isInitialized = true;

        this.connectorsManager = new ConnectorsManager(workerConfig);

    } catch (Throwable t) {
        log.error("Error Starting up in worker", t);
        throw new RuntimeException(t);
    }
}

From source file:com.vmware.photon.controller.apife.backends.StepSqlBackend.java

private StepEntity getStepEntity(TaskEntity task, StepEntity.State state, Operation operation,
        Map<String, String> stepOptions) {
    StepEntity step = new StepEntity();
    step.setSequence(task.getNextStepSequence());
    step.setState(state);//w ww  .  ja v  a  2s .c o  m
    step.setOperation(operation);
    step.setQueuedTime(DateTime.now().toDate());
    // auto-link step to their task
    step.setTask(task);
    task.addStep(step);

    if (stepOptions != null && !stepOptions.isEmpty()) {
        try {
            step.setOptions(objectMapper.writeValueAsString(stepOptions));
        } catch (JsonProcessingException e) {
            throw new IllegalArgumentException(
                    String.format("Error serializing step %s options: %s", step.getId(), e.getMessage()));
        }
    }

    if (StepEntity.State.COMPLETED.equals(state)) {
        step.setStartedTime(DateTime.now().toDate());
        step.setEndTime(DateTime.now().toDate());
    }
    return step;
}

From source file:com.github.fge.jsonschema.load.URIManager.java

/**
 * Get the content at a given URI as a {@link JsonNode}
 *
 * @param uri the URI//from ww w  .  j a  v a 2 s. co  m
 * @return the content
 * @throws ProcessingException scheme is not registered, failed to get
 * content, or content is not JSON
 */
public JsonNode getContent(final URI uri) throws ProcessingException {
    Preconditions.checkNotNull(uri, "null URI");

    final URI target = schemaRedirects.containsKey(uri) ? schemaRedirects.get(uri) : uri;

    if (!target.isAbsolute())
        throw new ProcessingException(BUNDLE.message("uriNotAbsolute").put("uri", uri));

    final String scheme = target.getScheme();

    final URIDownloader downloader = downloaders.get(scheme);

    if (downloader == null)
        throw new ProcessingException(BUNDLE.message("unhandledScheme").put("uri", uri).put("scheme", scheme));

    final InputStream in;

    try {
        in = downloader.fetch(target);
        return READER.readTree(in);
    } catch (JsonProcessingException e) {
        throw new ProcessingException(
                BUNDLE.message("uriNotJson").put("uri", uri).put("parsingMessage", e.getOriginalMessage()));
    } catch (IOException e) {
        throw new ProcessingException(
                BUNDLE.message("uriIOError").put("uri", uri).put("exceptionMessage", e.getMessage()));
    }
}

From source file:com.fiadot.springjsoncrypt.json.CryptMappingJacson2HttpMessageConverter.java

@Override
protected void writeInternal(Object object, HttpOutputMessage outputMessage)
        throws IOException, HttpMessageNotWritableException {

    JsonEncoding encoding = getJsonEncoding(outputMessage.getHeaders().getContentType());
    // The following has been deprecated as late as Jackson 2.2 (April 2013);
    // preserved for the time being, for Jackson 2.0/2.1 compatibility.
    @SuppressWarnings("deprecation")
    JsonGenerator jsonGenerator = this.objectMapper.getJsonFactory()
            .createJsonGenerator(outputMessage.getBody(), encoding);

    // A workaround for JsonGenerators not applying serialization features
    // https://github.com/FasterXML/jackson-databind/issues/12
    if (this.objectMapper.isEnabled(SerializationFeature.INDENT_OUTPUT)) {
        jsonGenerator.useDefaultPrettyPrinter();
    }/*from   w  w  w  .  j  a v  a 2  s.c om*/

    try {
        if (this.jsonPrefix != null) {
            jsonGenerator.writeRaw(this.jsonPrefix);
        }

        // original source
        // jsonGenerator.
        //this.objectMapper.writeValue(jsonGenerator, object);

        CipherEncryptUtils cryptoUtil = new CipherEncryptUtils("AES", "AES/CBC/PKCS7Padding",
                "ls4h+XaXU+A5m72HRpwkeQ==", "W46YspHuEiQlKDcLTqoySw==");
        String encStr = null;
        try {
            encStr = cryptoUtil.encrypt(this.objectMapper.writeValueAsString(object));
            logger.info("MessageMapper::WriteInternal() encStr=" + encStr);
        } catch (Exception e) {

        }

        outputMessage.getBody().write(encStr.getBytes());
    } catch (JsonProcessingException ex) {
        throw new HttpMessageNotWritableException("Could not write JSON: " + ex.getMessage(), ex);
    }
}

From source file:org.jmxtrans.embedded.config.ConfigurationParser.java

protected void mergeEmbeddedJmxTransConfiguration(@Nonnull String configurationUrl,
        @Nonnull EmbeddedJmxTrans embeddedJmxTrans) throws EmbeddedJmxTransException {
    try {//  w w w  .j  a v a 2 s. com
        if (configurationUrl.startsWith("classpath:")) {
            logger.debug("mergeEmbeddedJmxTransConfiguration({})", configurationUrl);
            String path = configurationUrl.substring("classpath:".length());
            InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(path);
            Preconditions.checkNotNull(in, "No file found for '" + configurationUrl + "'");
            mergeEmbeddedJmxTransConfiguration(in, embeddedJmxTrans);
        } else {
            mergeEmbeddedJmxTransConfiguration(new URL(configurationUrl), embeddedJmxTrans);
        }
    } catch (JsonProcessingException e) {
        throw new EmbeddedJmxTransException(
                "Exception loading configuration'" + configurationUrl + "': " + e.getMessage(), e);
    } catch (Exception e) {
        throw new EmbeddedJmxTransException("Exception loading configuration'" + configurationUrl + "'", e);
    }
}