Example usage for com.google.common.base Stopwatch createStarted

List of usage examples for com.google.common.base Stopwatch createStarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createStarted.

Prototype

@CheckReturnValue
public static Stopwatch createStarted() 

Source Link

Document

Creates (and starts) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:org.obiba.magma.generator.FsGenerator.java

private static void doGenerate(Datasource source, Datasource target) throws Exception {
    Initialisables.initialise(target, source);

    for (ValueTable table : source.getValueTables()) {
        Stopwatch stopwatch = Stopwatch.createStarted();
        ValueTable toCopy = table;/*from   ww  w. j  av a2 s  . co m*/
        if (table.getValueSetCount() == 0) {
            toCopy = new GeneratedValueTable(target, Lists.newArrayList(table.getVariables()),
                    randInt(NB_ENTITIES_MIN, NB_ENTITIES_MAX));
        }
        DatasourceCopier.Builder.newCopier().build().copy(toCopy, table.getName(), target);
        log.info("Data generated and copied for {} ({}) in {}", table.getName(), source.getName(), stopwatch);
    }

    Disposables.dispose(target, source);
}

From source file:io.ecarf.core.cloud.task.common.CreateTermDictionaryTask.java

@SuppressWarnings("unchecked")
@Override/*from  w  ww  . j  a  v  a  2s. co  m*/
public void run() throws IOException {

    log.info("Creating terms dictionary.");
    Stopwatch stopwatch = Stopwatch.createStarted();

    if (StringUtils.isBlank(sourceBucket)) {
        log.warn("sourceBucket is empty, using bucket: " + bucket);
        this.sourceBucket = bucket;
    }

    // 1- Get and combine the terms from all the nodes
    // 2- Get all the terms from the schema file
    // 3- Create a dictionary bootstrapped with RDF & OWL URIs
    // 4- Add all to the dictionary, gzip and upload to cloud storage
    // 5- encode the schema & upload to cloud storage
    // 6- encode the relevant schema terms and upload to cloud storage

    // 1- Get and combine the terms from all the nodes
    for (String instanceId : this.processors) {

        String termsFile = Constants.NODE_TERMS + instanceId + Constants.DOT_SER + Constants.GZIP_EXT;

        String localTermsFile = Utils.TEMP_FOLDER + termsFile;

        log.info("Downloading processor terms file: " + termsFile + ", timer: " + stopwatch);

        try {
            this.getCloudService().downloadObjectFromCloudStorage(termsFile, localTermsFile, bucket);

            log.info("De-serializing compressed processor terms file: " + termsFile + ", timer: " + stopwatch);
            Set<String> nodeTerms = Utils.objectFromFile(localTermsFile, HashSet.class, true);

            if (nodeTerms != null) {
                log.info("Got: " + nodeTerms.size() + " terms for processor: " + instanceId);
                this.allTerms.addAll(nodeTerms);
            }

        } catch (IOException e) {
            // a file not found means the evm didn't find any schema terms so didn't generate any stats
            log.error("failed to download file: " + localTermsFile, e);
            if (!(e.getMessage().indexOf(GoogleMetaData.NOT_FOUND) >= 0)) {
                throw e;
            }
        } catch (ClassNotFoundException e) {
            log.error("failed to de-serialize file: " + localTermsFile, e);
            throw new IOException(e);
        }
    }

    // 2- Get all the terms from the schema file
    String localSchemaFile = Utils.TEMP_FOLDER + schemaFile;

    Path path = Paths.get(localSchemaFile);

    log.info("Getting terms from schema file: " + localSchemaFile + ", timer: " + stopwatch);

    if (!Files.exists(path)) {
        // download the file from the cloud storage
        this.getCloudService().downloadObjectFromCloudStorage(schemaFile, localSchemaFile, sourceBucket);

    } else {
        log.info("Schema file exists locally.");
    }

    NxGzipProcessor processor = new NxGzipProcessor(localSchemaFile);
    ExtractTermsCallback callback = new ExtractTermsCallback();

    processor.read(callback);

    this.allTerms.addAll(callback.getResources());
    this.allTerms.addAll(callback.getBlankNodes());

    log.info("TIMER# Finished processing schema file: " + localSchemaFile + ", timer: " + stopwatch);
    log.info("Number of unique URIs: " + callback.getResources().size());
    log.info("Number of blank nodes: " + callback.getBlankNodes().size());
    log.info("Number of literals: " + callback.getLiteralCount());

    // 3- Create a dictionary bootstrapped with RDF & OWL URIs
    log.info("Creating terms dictionary, timer: " + stopwatch);
    TermDictionaryGuava dictionary = (TermDictionaryGuava) TermDictionaryGuava
            .populateRDFOWLData(new TermDictionaryGuava());

    log.info("Removing RDF & OWL terms from all terms, timer: " + stopwatch);
    // should be faster to remove these terms than looping through all terms and checking
    for (String rdfOwlTerm : SchemaURIType.RDF_OWL_TERMS) {
        this.allTerms.remove(rdfOwlTerm);
    }

    // 4- Add all to the dictionary, gzip and upload to cloud storage
    log.info("Adding terms to dictionary, timer: " + stopwatch);
    for (String term : this.allTerms) {
        dictionary.add(term);
    }

    log.info("Serializing dictionary: " + dictionary + ", timer: " + stopwatch);
    String dictionaryFile = Utils.TEMP_FOLDER + FilenameUtils.DICTIONARY_SER;
    String savedDictionaryFile = dictionary.toFile(dictionaryFile, true);

    log.info("Uploading dictionary to cloud storage, timer: " + stopwatch);
    // upload the file to cloud storage
    this.cloudService.uploadFileToCloudStorage(savedDictionaryFile, bucket);

    this.addOutput("dictionary", FilenameUtils.DICTIONARY_SER + Constants.GZIP_EXT);
    log.info("TIMER# successfully created terms dictionary in: " + stopwatch);

}

From source file:qa.qcri.nadeef.core.pipeline.GuidedRepair.java

/**
 * Execute the operator.//from  w  ww.jav a 2 s .  c  o  m
 *
 * @param emptyInput .
 * @return Number of questions asked to user until there are no more violations.
 */
@Override
@SuppressWarnings("unchecked")
public Collection<TrainingInstance> execute(Optional emptyInput) throws Exception {
    Stopwatch stopwatch = Stopwatch.createStarted();

    Rule rule = getCurrentContext().getRule();
    DBConfig sourceDBConfig = getCurrentContext().getConnectionPool().getSourceDBConfig();
    DBConnectionPool sourceConnectionPool = getCurrentContext().getConnectionPool();
    SQLDialect dialect = sourceDBConfig.getDialect();
    SQLDialectBase dialectManager = SQLDialectFactory.getDialectManagerInstance(dialect);

    List<TrainingInstance> trainingInstances = new ArrayList<>();

    int globalUserInteractionCount = 0;
    int hitCount = 0;

    // TODO: WARN: XXX: find a better way to pass clean table name
    String dirtyTableName = (String) getCurrentContext().getRule().getTableNames().get(0);
    Schema dirtyTableSchema = DBMetaDataTool.getSchema(sourceDBConfig, dirtyTableName);
    String cleanTableName = dirtyTableName.replace("NOISE", "CLEAN").replace("noise", "clean");

    RankingManager rankingManager = new RankingManager(getCurrentContext(), dirtyTableName, cleanTableName);
    AuditManager auditManager = new AuditManager(getCurrentContext());

    GroundTruth userSimulation = new GroundTruth(this.getCurrentContext(), cleanTableName, dirtyTableSchema);

    int offset = 0;

    try {
        while (true) {
            // initialize all counters what we use
            int hitPerAttribute = 0;
            int userInteractionPerAttribute = 0;

            //when next group, offset may still be the last offset of last group, reset it to 0
            offset = 0;
            RepairGroup topGroup = rankingManager.getTopGroup();

            if (topGroup == null) {
                // no more repair groups. break
                break;
            }

            topGroup.populateFixByVOI();

            while (topGroup.hasNext(offset)) {
                Fix solution = topGroup.getTopFix(offset);

                if (auditManager.isAlreadyUpdated(solution.getLeft())) {
                    // if cell is already udpated, we do not update it again. We directly skip it
                    offset++;
                    continue;
                }

                int tupleID = solution.getLeft().getTid();
                String attribute = solution.getLeft().getColumn().getColumnName();

                // user interaction, simulate user interaction by checking from clean dataset, ground truth
                Tuple dirtyTuple = DBConnectionHelper.getDatabaseTuple(sourceConnectionPool, dirtyTableName,
                        dirtyTableSchema, tupleID);
                Object dirtyValue = dirtyTuple.getCell(attribute).getValue();

                Object solutionValue;
                // GurobiSolver returns numerical answers in form of Double. We need to distinguish true integers
                if (dirtyValue instanceof Integer) {
                    solutionValue = Math.round(Double.parseDouble(solution.getRightValue()));
                } else {
                    solutionValue = solution.getRightValue();
                }

                // will be used to update model
                double similarityScore = Metrics.getEqual(dirtyValue.toString(), solutionValue.toString());
                TrainingInstance newTrainingInstance = new TrainingInstance(null, dirtyTuple, attribute,
                        solutionValue.toString(), similarityScore);

                boolean isHit = userSimulation.acceptFix(solution);

                if (isHit) {
                    // HIT :)) dirty cell correctly identified, now update database, reset the offset
                    offset = 0;

                    // increase hit count
                    hitCount++;

                    auditManager.applyFix(solution, null);

                    // add positive training instance
                    newTrainingInstance = new TrainingInstance(TrainingInstance.Label.YES, dirtyTuple,
                            attribute, solutionValue.toString(), similarityScore);

                    // call ConsistencyManager to recompute violatios
                    Cell updatedCell = new Cell.Builder().tid(tupleID)
                            .column(new Column(dirtyTableName, attribute)).value(solutionValue).build();
                    // remove existing violations and find new ones
                    Set<Integer> affectedTuples = ConsistencyManager.getInstance()
                            .checkConsistency(getCurrentContext(), updatedCell);

                    topGroup.populateFixByVOI();
                } else {
                    // just increase the offset to retrieve the nextrepaircell
                    offset++;
                    if (offset > 20) {
                        //System.out.println("Count:" + userInteractionCount + " Offset:" + offset + " tupleid:" + tupleID + " attribute:" + attribute + " currentValue:" + dirtyTuple.getCell(attribute).getValue());
                    }
                    // add negative training instance
                    newTrainingInstance = new TrainingInstance(TrainingInstance.Label.NO, dirtyTuple, attribute,
                            solutionValue.toString(), similarityScore);
                }

                trainingInstances.add(newTrainingInstance);
                userInteractionPerAttribute++;
                globalUserInteractionCount++;

                Integer violationCount = ConsistencyManager.getInstance().countViolation(getCurrentContext());
                // output interaction count and # of violations
                System.out.println("# of violations: " + violationCount + " Interaction count: "
                        + globalUserInteractionCount);

            }
        }
    } catch (Exception e) {
        tracer.error("Guided repair could NOT be completed due to SQL Expcetion: ", e);
        throw e;
    }

    long elapseTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);

    PerfReport.appendMetric(PerfReport.Metric.RepairCallTime, elapseTime);
    PerfReport.appendMetric(PerfReport.Metric.UserInteractionHITCount, hitCount);
    PerfReport.appendMetric(PerfReport.Metric.UserInteractionCount, globalUserInteractionCount);
    stopwatch.stop();
    return trainingInstances;
}

From source file:es.usc.citius.composit.core.composition.search.NaiveForwardServiceDiscoverer.java

public ServiceMatchNetwork<E, T> search(Signature<E> signature) {
    Set<E> availableInputs = new HashSet<E>(signature.getInputs());
    Set<E> newOutputs = new HashSet<E>(signature.getInputs());
    Set<Operation<E>> usedServices = new HashSet<Operation<E>>();
    List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>();

    boolean checkExpectedOutputs = !signature.getOutputs().isEmpty();
    boolean stop;

    Stopwatch timer = Stopwatch.createStarted();
    Stopwatch levelTimer = Stopwatch.createUnstarted();
    int level = 0;
    do {//w w w .  j  a va 2 s .com
        HashSet<Operation<E>> candidates = new HashSet<Operation<E>>();
        levelTimer.start();
        candidates.addAll(discovery.findOperationsConsumingSome(newOutputs));
        log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(),
                levelTimer.toString());
        // Remove services that cannot be invoked with the available inputs
        for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) {
            Operation<E> candidate = it.next();
            Set<E> matched = matcher.partialMatch(availableInputs, candidate.getSignature().getInputs())
                    .getTargetElements();
            // Invokable?
            if (matched.equals(candidate.getSignature().getInputs())) {
                // Invokable operation, check if it was used previously
                boolean isNew = usedServices.add(candidate);
                if (!isNew)
                    it.remove();
            } else {
                it.remove();
            }
        }
        log.info("\t + [{}] operations selected for this level in {}: {}", candidates.size(),
                levelTimer.toString(), candidates);

        // Collect the new outputs of the new candidates
        newOutputs = Operations.outputs(candidates);
        availableInputs.addAll(newOutputs);
        Set<E> matchedOutputs = matcher.partialMatch(availableInputs, signature.getOutputs())
                .getTargetElements();

        // Add the discovered ops
        if (!candidates.isEmpty())
            leveledOps.add(candidates);

        log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size());
        // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied.
        stop = (checkExpectedOutputs) ? candidates.isEmpty() || matchedOutputs.equals(signature.getOutputs())
                : candidates.isEmpty();
        levelTimer.reset();
    } while (!stop);

    // Add the source and sink operations
    Source<E> sourceOp = new Source<E>(signature.getInputs());
    Sink<E> sinkOp = new Sink<E>(signature.getOutputs());
    leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp));
    leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp));
    Stopwatch networkWatch = Stopwatch.createStarted();
    // Create a service match network with the discovered services
    DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps),
            this.matcher);
    log.info(" > Service match network computed in {}", networkWatch.stop().toString());
    log.info("Service Match Network created with {} levels (including source and sink) and {} operations.",
            leveledOps.size(), matchNetwork.listOperations().size());
    log.info("Forward Discovery done in {}", timer.toString());
    return matchNetwork;
}

From source file:brooklyn.test.PerformanceTestUtils.java

/**
 * Creates a background thread that will log.info the CPU fraction usage repeatedly, sampling at the given period.
 * Callers <em>must</em> cancel the returned future, e.g. {@code future.cancel(true)}, otherwise it will keep
 * logging until the JVM exits./*ww w  .j  av  a  2s . c  o m*/
 */
public static Future<?> sampleProcessCpuTime(final Duration period, final String loggingContext) {
    final ExecutorService executor = Executors.newSingleThreadExecutor(new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread thread = new Thread(r, "brooklyn-sampleProcessCpuTime-" + loggingContext);
            thread.setDaemon(true); // let the JVM exit
            return thread;
        }
    });
    Future<?> future = executor.submit(new Runnable() {
        @Override
        public void run() {
            try {
                long prevCpuTime = getProcessCpuTime();
                if (prevCpuTime == -1) {
                    LOG.warn("ProcessCPuTime not available; cannot sample; aborting");
                    return;
                }
                while (true) {
                    Stopwatch stopwatch = Stopwatch.createStarted();
                    Thread.sleep(period.toMilliseconds());
                    long currentCpuTime = getProcessCpuTime();

                    long elapsedTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
                    double fractionCpu = (elapsedTime > 0)
                            ? ((double) currentCpuTime - prevCpuTime)
                                    / TimeUnit.MILLISECONDS.toNanos(elapsedTime)
                            : -1;
                    prevCpuTime = currentCpuTime;

                    LOG.info("CPU fraction over last {} was {} ({})", new Object[] {
                            Time.makeTimeStringRounded(elapsedTime), fractionCpu, loggingContext });
                }
            } catch (InterruptedException e) {
                return; // graceful termination
            } finally {
                executor.shutdownNow();
            }
        }
    });
    return future;
}

From source file:org.apache.jackrabbit.oak.plugins.document.JournalGarbageCollector.java

/**
 * Deletes entries in the journal that are older than the given
 * maxRevisionAge./*from   w  ww.j  a v a2  s. c om*/
 *
 * @param maxRevisionAge entries older than this age will be removed
 * @param unit           the timeunit for maxRevisionAge
 * @return the number of entries that have been removed
 */
public int gc(long maxRevisionAge, int batchSize, TimeUnit unit) {
    long maxRevisionAgeInMillis = unit.toMillis(maxRevisionAge);
    if (log.isDebugEnabled()) {
        log.debug("gc: Journal garbage collection starts with maxAge: {} min., batch size: {}.",
                TimeUnit.MILLISECONDS.toMinutes(maxRevisionAgeInMillis), batchSize);
    }
    Stopwatch sw = Stopwatch.createStarted();

    // the journal has ids of the following format:
    // 1-0000014db9aaf710-00000001
    // whereas the first number is the cluster node id.
    // now, this format prevents from doing a generic
    // query to get all 'old' entries, as the documentstore
    // can only query for a sequential list of entries.
    // (and the cluster node id here partitions the set
    // of entries that we have to delete)
    // To account for that, we simply iterate over all 
    // cluster node ids and clean them up individually.
    // Note that there are possible alternatives, such
    // as: let each node clean up its own old entries
    // but the chosen path is also quite simple: it can
    // be started on any instance - but best on only one.
    // if it's run on multiple concurrently, then they
    // will compete at deletion, which is not optimal
    // due to performance, but does not harm.

    // 1. get the list of cluster node ids
    final List<ClusterNodeInfoDocument> clusterNodeInfos = ClusterNodeInfoDocument.all(ds);
    int numDeleted = 0;
    for (ClusterNodeInfoDocument clusterNodeInfoDocument : clusterNodeInfos) {
        // current algorithm is to simply look at all cluster nodes
        // irrespective of whether they are active or inactive etc.
        // this could be optimized for inactive ones: at some point, all
        // journal entries of inactive ones would have been cleaned up
        // and at that point we could stop including those long-time-inactive ones.
        // that 'long time' aspect would have to be tracked though, to be sure
        // we don't leave garbage.
        // so simpler is to quickly do a query even for long-time inactive ones
        final int clusterNodeId = clusterNodeInfoDocument.getClusterId();

        // 2. iterate over that list and do a query with
        //    a limit of 'batch size'
        boolean branch = false;
        long startPointer = 0;
        while (true) {
            String fromKey = JournalEntry.asId(new Revision(startPointer, 0, clusterNodeId, branch));
            String toKey = JournalEntry.asId(new Revision(System.currentTimeMillis() - maxRevisionAgeInMillis,
                    Integer.MAX_VALUE, clusterNodeId, branch));
            List<JournalEntry> deletionBatch = ds.query(Collection.JOURNAL, fromKey, toKey, batchSize);
            if (deletionBatch.size() > 0) {
                ds.remove(Collection.JOURNAL, asKeys(deletionBatch));
                numDeleted += deletionBatch.size();
            }
            if (deletionBatch.size() < batchSize) {
                if (!branch) {
                    // do the same for branches:
                    // this will start at the beginning again with branch set to true
                    // and eventually finish too
                    startPointer = 0;
                    branch = true;
                    continue;
                }
                break;
            }
            startPointer = deletionBatch.get(deletionBatch.size() - 1).getRevisionTimestamp();
        }
    }

    sw.stop();

    if (numDeleted > 0) {
        log.info("gc: Journal garbage collection took {}, deleted {} entries that were older than {} min.", sw,
                numDeleted, TimeUnit.MILLISECONDS.toMinutes(maxRevisionAgeInMillis));
    }
    return numDeleted;
}

From source file:com.fireball1725.firelib.FireMod.java

@Mod.EventHandler
public final void preInit(FMLPreInitializationEvent event) {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    this.getLogger().info("Pre Initialization (Started)");

    // Check java version to make sure we are on Java 1.8
    if (!SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_1_8)) {
        //throw new OutdatedJavaException(String.format("%s requires Java 8 or newer, Please update your java", ModInfo.MOD_NAME));
    }//w  w  w  .ja v a2  s  .  c  o  m

    this.proxy().registerEventHandler(this);
    proxy().initConfiguration(event);
    proxy().preInitStart(event);
    proxy().registerEventHandler(new RegistrationHelper(this));
    proxy().preInitEnd(event);

    this.getLogger()
            .info("Pre Initialization (Ended after " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + "ms)");
}

From source file:eu.amidst.huginlink.examples.demos.ParallelTANDemo.java

public static void demoPigs() throws IOException, ClassNotFoundException {

    //It needs GBs, so avoid putting this file in a Dropbox folder!!
    //String dataFile = new String("/Users/afa/Pigs.arff");

    BayesianNetwork bn = BayesianNetworkLoader.loadFromFile("networks/dataWeka/Pigs.bn");

    int sampleSize = 10000;
    BayesianNetworkSampler sampler = new BayesianNetworkSampler(bn);

    ArrayList<Integer> vSamplesOnMemory = new ArrayList(Arrays.asList(5000));
    ArrayList<Integer> vNumCores = new ArrayList(Arrays.asList(1, 2, 3, 4));

    for (Integer samplesOnMemory : vSamplesOnMemory) {
        for (Integer numCores : vNumCores) {
            System.out.println(/*  w  w  w .j av  a 2  s .  c o m*/
                    "Learning TAN: " + samplesOnMemory + " samples on memory, " + numCores + " core/s ...");
            DataStream<DataInstance> data = sampler.sampleToDataStream(sampleSize);

            ParallelTAN tan = new ParallelTAN();
            tan.setNumCores(numCores);
            tan.setNumSamplesOnMemory(samplesOnMemory);
            tan.setNameRoot(bn.getVariables().getListOfVariables().get(0).getName());
            tan.setNameTarget(bn.getVariables().getListOfVariables().get(1).getName());
            Stopwatch watch = Stopwatch.createStarted();
            BayesianNetwork model = tan.learn(data);
            System.out.println(watch.stop());
        }
    }
}

From source file:com.isotrol.impe3.pms.core.support.ObjectsLoader.java

/**
 * Constructor.//  ww w  .  j a va  2  s  . c o m
 * @param name Object name for debugging.
 * @param computer Computer function.
 */
private ObjectsLoader(String name, final Function<UUID, T> computer) {
    final CacheLoader<Key, T> loader = new CacheLoader<Key, T>() {
        @Override
        public T load(Key key) throws Exception {
            final Stopwatch w = Stopwatch.createStarted();
            try {
                return computer.apply(key.id);
            } finally {
                long t = w.elapsed(TimeUnit.MILLISECONDS);
                if (t > 500) {
                    System.out
                            .println(String.format("State Loader [%s] took %d ms", ObjectsLoader.this.name, t));
                }
            }
        }
    };
    if (StringUtils.hasText(name)) {
        this.name = name;
    } else {
        this.name = computer.toString();
    }
    this.cache = CacheBuilder.newBuilder().maximumSize(64L).softValues()
            .expireAfterAccess(2 * 3600L, TimeUnit.SECONDS).build(loader);
}

From source file:me.lazerka.gae.jersey.oauth2.facebook.FacebookFetcher.java

String fetch(URL url) throws IOException, InvalidKeyException {
    logger.trace("Requesting endpoint to validate token");

    HTTPRequest httpRequest = new HTTPRequest(url, GET, validateCertificate());

    Stopwatch stopwatch = Stopwatch.createStarted();
    HTTPResponse response = urlFetchService.fetch(httpRequest);
    logger.debug("Remote call took {}ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));

    int responseCode = response.getResponseCode();
    String content = new String(response.getContent(), UTF_8);

    if (responseCode != 200) {
        logger.warn("{}: {}", responseCode, content);

        String msg = "Endpoint response code " + responseCode;

        // Something is wrong with our request.
        // If signature is invalid, then response code is 403.
        if (responseCode >= 400 && responseCode < 500) {
            try {
                JsonNode tree = jackson.readTree(content);
                JsonNode error = tree.findPath("error");
                if (!error.isMissingNode()) {
                    msg += ": " + error.findPath("message").textValue();
                }// w ww.  j  ava2s .co  m
            } catch (IOException e) {
                logger.warn("Cannot parse response as error");
            }
        }

        throw new InvalidKeyException(msg);
    }

    return content;
}