Example usage for org.apache.commons.lang3.exception ExceptionUtils getRootCause

List of usage examples for org.apache.commons.lang3.exception ExceptionUtils getRootCause

Introduction

In this page you can find the example usage for org.apache.commons.lang3.exception ExceptionUtils getRootCause.

Prototype

public static Throwable getRootCause(final Throwable throwable) 

Source Link

Document

Introspects the Throwable to obtain the root cause.

This method walks through the exception chain to the last element, "root" of the tree, using #getCause(Throwable) , and returns that exception.

From version 2.2, this method handles recursive cause structures that might otherwise cause infinite loops.

Usage

From source file:org.alfresco.bm.tools.BMTestRunner.java

/**
 * Execute the default test against the given MongoDB or an in-memory instance
 * /*from   w w  w  . j  a  v a  2  s.c  o m*/
 * @param mongoConfigHost           the MongoDB host to connect to for configuraton data or <tt>null</tt> to use an in-memory version
 * @param mongoTestHost             the MongoDB host to connect to for test data data or <tt>null</tt> to use the same database as the config
 * @param testProperties            any properties to specifically set for the test or <tt>null</tt> if there are none
 */
public void run(String mongoConfigHost, String mongoTestHost, Properties testProperties) throws Exception {
    // Secure the listeners against modification
    List<BMTestRunnerListener> listeners = new ArrayList<BMTestRunnerListener>(this.listeners);

    // If no MongoDB URL is provided, then we have to start one
    MongoDBForTestsFactory mongoDBForTestsFactory = null;
    ClassPathXmlApplicationContext ctx = null;
    try {
        // Ensure that required system properties are present
        System.setProperty(PROP_APP_CONTEXT_PATH, System.getProperty("user.dir"));
        System.setProperty(PROP_APP_DIR, System.getProperty("user.dir"));

        // Create a MongoDB for use if one has not been specified
        if (mongoConfigHost == null) {
            mongoDBForTestsFactory = new MongoDBForTestsFactory();
            String uriWithoutDB = mongoDBForTestsFactory.getMongoURIWithoutDB();
            mongoConfigHost = new MongoClientURI(uriWithoutDB).getHosts().get(0);
        }
        // Fill in the URI for the test MongoDB
        if (mongoTestHost == null) {
            mongoTestHost = mongoConfigHost;
        }

        // Fill in the properties required for the test
        Properties mongoProps = new Properties();
        mongoProps.put(PROP_MONGO_CONFIG_HOST, mongoConfigHost);

        // Construct the application context
        ctx = new ClassPathXmlApplicationContext(new String[] { PATH_APP_CONTEXT }, false);
        // Push cluster properties into the context (must be done AFTER setting parent context)
        ConfigurableEnvironment ctxEnv = ctx.getEnvironment();
        // Mongo properties come first
        ctxEnv.getPropertySources().addFirst(new PropertiesPropertySource("mongo-props", mongoProps));
        // Finally, system properties overrule them all
        ctxEnv.getPropertySources()
                .addFirst(new PropertiesPropertySource("system-props", System.getProperties()));

        // Kick it all off
        try {
            ctx.refresh();
        } catch (Exception e) {
            Throwable root = ExceptionUtils.getRootCause(e);
            if (root != null
                    && (root instanceof MongoSocketException || root instanceof UnknownHostException)) {
                // We deal with this specifically as it's a simple case of not finding the MongoDB
                logger.error("Set the configuration property '" + PROP_MONGO_CONFIG_HOST
                        + "' (<server>:<port>) as required.");
            } else {
                // Log application start failure because test frameworks might not do so nicely
                logger.error("Failed to start application.", e);
            }
            throw new RuntimeException("Failed to start application.", e);
        }

        // Get the test
        Test test = ctx.getBean(Test.class);
        String release = test.getRelease();
        Integer schema = test.getSchema();

        TestRestAPI api = ctx.getBean(TestRestAPI.class);

        // Create a new test
        TestDetails testDetails = new TestDetails();
        String testName = "BMTestRunner_" + System.currentTimeMillis();
        testDetails.setName(testName);
        testDetails.setDescription("Test created by BMTestRunner on " + new Date());
        testDetails.setRelease(release);
        testDetails.setSchema(schema);
        api.createTest(testDetails);

        // We need to tell the test which MongoDB to write data to
        PropSetBean propSet = new PropSetBean();
        propSet.setValue(mongoTestHost);
        propSet.setVersion(0);
        api.setTestProperty(testName, PROP_MONGO_TEST_HOST, propSet);

        // Now set any properties that have been explicitly passed in for the test
        if (testProperties != null) {
            for (Map.Entry<Object, Object> entry : testProperties.entrySet()) {
                String propKey = (String) entry.getKey();
                String propVal = (String) entry.getValue();

                propSet.setValue(propVal);
                propSet.setVersion(0);
                api.setTestProperty(testName, propKey, propSet);
            }
        }

        // Call listeners: the test has been created
        for (BMTestRunnerListener listener : listeners) {
            listener.testReady(ctx, testName);
        }

        // Create a new test run
        TestRunDetails testRunDetails = new TestRunDetails();
        String testRunName = "BMTestRunner_" + System.currentTimeMillis();
        testRunDetails.setName(testRunName);
        testRunDetails.setDescription("Test run created by BMTestRunner on " + new Date());
        api.createTestRun(testDetails.getName(), testRunDetails);

        // Call listeners: the test run has been created
        for (BMTestRunnerListener listener : listeners) {
            listener.testRunReady(ctx, testName, testRunName);
        }

        // Get all the test run properties for logging
        String jsonTestRun = api.getTestRun(testName, testRunName);

        // Start the test run
        logger.info("Starting test run: " + testRunName + "\n" + jsonTestRun);
        TestRunSchedule testRunSchedule = new TestRunSchedule();
        testRunSchedule.setScheduled(System.currentTimeMillis());
        api.scheduleTestRun(testName, testRunName, testRunSchedule);

        // Call listeners: the test run has started
        for (BMTestRunnerListener listener : listeners) {
            listener.testRunStarted(ctx, testName, testRunName);
        }

        // Wait for the test run to complete
        long timeInit = System.currentTimeMillis();
        long timeLastChange = -1L;
        String jsonLastChange = null;
        String testRunStateStr = api.getTestRunState(testName, testRunName);

        // Keep looking until the test run completes
        while (!TestRunState.COMPLETED.toString().equals(testRunStateStr)) {
            long now = System.currentTimeMillis();

            // Check that we have not exceeded the maximum time
            if (now - timeInit > maxTestTime) {
                throw new RuntimeException("Test run failed to complete in " + (int) maxTestTime / 1000 + "s.");
            }

            testRunStateStr = api.getTestRunState(testName, testRunName);

            if (TestRunState.SCHEDULED.toString().equals(testRunStateStr) && (now - timeInit) > 10000L) {
                throw new RuntimeException("Test run failed to start in 10s.");
            }

            // Check that there are updates to the test run
            String jsonNow = api.getTestRunSummary(testName, testRunName);
            if (jsonLastChange != null && jsonLastChange.equals(jsonNow)) {
                if ((now - timeLastChange) > 60000L) {
                    throw new RuntimeException("Test run has not been updated in the last 60s");
                }
            }
            // Store values for next iteration
            timeLastChange = now;
            jsonLastChange = jsonNow;

            synchronized (testRunStateStr) {
                try {
                    testRunStateStr.wait(1000L);
                } catch (InterruptedException e) {
                }
            }
        }
        // Call listeners: the test run has finished
        for (BMTestRunnerListener listener : listeners) {
            listener.testRunFinished(ctx, testName, testRunName);
        }
    } finally {
        // Close the context
        if (ctx != null) {
            try {
                ctx.close();
            } catch (Exception e) {
                logger.error("Failed to shut down application context.", e);
            }
        }
        // Close the local Mongo instance
        if (mongoDBForTestsFactory != null) {
            try {
                mongoDBForTestsFactory.destroy();
            } catch (Exception e) {
                logger.error("Failed to stop in-memory MongoDB instance.", e);
            }
        }
    }
}

From source file:org.apache.cxf.systest.jaxrs.JAXRS20ClientServerBookTest.java

@Test
public void testUnknownHostException() throws InterruptedException {
    String address = "http://unknown-host/bookstore/bookheaders/simple/async";
    try {/* ww  w .ja  v  a2 s .  co  m*/
        doTestPostBookAsyncHandler(address);
        fail("Should fail with UnknownHostException");
    } catch (ExecutionException e) {
        assertTrue("Should fail with UnknownHostException",
                ExceptionUtils.getRootCause(e) instanceof UnknownHostException);
    }
}

From source file:org.apache.drill.common.util.DataInputInputStream.java

@Override
public int read(byte[] b, int off, int len) throws IOException {
    for (int i = off; i < off + len; i++) {
        try {/*from   ww  w  . j a  v a  2  s. c om*/
            b[i] = in.readByte();
        } catch (Exception e) {
            if (ExceptionUtils.getRootCause(e) instanceof EOFException) {
                return i - off;
            } else {
                throw e;
            }
        }
    }
    return len;
}

From source file:org.apache.drill.exec.planner.sql.SqlConverter.java

public SqlNode validate(final SqlNode parsedNode) {
    try {/*from  w ww .j a v  a2  s  . co  m*/
        SqlNode validatedNode = validator.validate(parsedNode);
        return validatedNode;
    } catch (RuntimeException e) {
        final Throwable rootCause = ExceptionUtils.getRootCause(e);
        if (rootCause instanceof SqlValidatorException
                && StringUtils.contains(rootCause.getMessage(), "No match found for function signature")) {
            throw new FunctionNotFoundException(rootCause.getMessage(), e);
        }
        UserException.Builder builder = UserException.validationError(e).addContext("SQL Query", sql);
        if (isInnerQuery) {
            builder.message("Failure validating a view your query is dependent upon.");
        }
        throw builder.build(logger);
    }
}

From source file:org.apache.falcon.rerun.handler.AbstractRerunConsumer.java

@Override
public void run() {
    int attempt = 1;
    AbstractRerunPolicy policy = new ExpBackoffPolicy();
    Frequency frequency = new Frequency("minutes(1)");
    while (!Thread.currentThread().isInterrupted()) {
        try {/*from w ww . j a v a2s.  c  o  m*/
            T message;
            try {
                message = handler.takeFromQueue();
                attempt = 1;
            } catch (FalconException e) {
                if (ExceptionUtils.getRootCause(e) instanceof InterruptedException) {
                    LOG.info("Rerun handler daemon has been interrupted");
                    return;
                } else {
                    LOG.error("Error while reading message from the queue", e);
                    GenericAlert.alertRerunConsumerFailed("Error while reading message from the queue: ", e);
                    Thread.sleep(policy.getDelay(frequency, attempt));
                    handler.reconnect();
                    attempt++;
                    continue;
                }
            }

            // Login the user to access WfEngine as this user
            CurrentUser.authenticate(message.getWorkflowUser());
            String jobStatus = handler.getWfEngine().getWorkflowStatus(message.getClusterName(),
                    message.getWfId());
            handleRerun(message.getClusterName(), jobStatus, message);

        } catch (Throwable e) {
            LOG.error("Error in rerun consumer", e);
        }
    }
}

From source file:org.apache.flink.runtime.blob.BlobCacheGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob.//  ww  w . j av  a 2 s. c  o  m
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 * @param cacheAccessesHAStore
 *       whether the cache has access to the {@link BlobServer}'s HA store or not
 */
private void testConcurrentGetOperations(final JobID jobId, final BlobKey.BlobType blobType,
        final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config,
                    cacheAccessesHAStore ? blobStoreServer : blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(cache, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);

        if (blobType == PERMANENT_BLOB) {
            // wait until all operations have completed and check that no exception was thrown
            filesFuture.get();
        } else {
            // wait for all futures to complete (do not abort on expected exceptions) and check
            // that at least one succeeded
            int completedSuccessfully = 0;
            for (CompletableFuture<File> op : getOperations) {
                try {
                    op.get();
                    ++completedSuccessfully;
                } catch (Throwable t) {
                    // transient BLOBs get deleted upon first access and only one request will be successful while all others will have an IOException caused by a FileNotFoundException
                    if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) {
                        // ignore
                        org.apache.flink.util.ExceptionUtils.rethrowIOException(t);
                    }
                }
            }
            // multiple clients may have accessed the BLOB successfully before it was
            // deleted, but always at least one:
            assertThat(completedSuccessfully, greaterThanOrEqualTo(1));
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.jena.query.text.es.TextIndexES.java

/**
 * Delete the value of the entity from the existing document, if any.
 * The document itself will never get deleted. Only the value will get deleted.
 * @param entity entity whose value needs to be deleted
 */// www. java2 s  . co m
@Override
public void deleteEntity(Entity entity) {

    String fieldToRemove = null;
    String valueToRemove = null;
    for (String field : docDef.fields()) {
        if (entity.get(field) != null) {
            fieldToRemove = field;
            if (entity.getLanguage() != null && !entity.getLanguage().isEmpty()) {
                fieldToRemove = normalizeFieldName(fieldToRemove, entity.getLanguage());
            }
            valueToRemove = (String) entity.get(field);
            break;
        }
    }

    if (fieldToRemove != null && valueToRemove != null) {

        LOGGER.debug("deleting content related to entity: " + entity.getId());
        String deleteScript = DELETE_SCRIPT.replaceAll("<fieldToRemove>", fieldToRemove);
        Map<String, Object> params = new HashMap<>();
        params.put("valueToRemove", valueToRemove);

        UpdateRequest updateRequest = new UpdateRequest(indexName, docDef.getEntityField(), entity.getId())
                .script(new Script(Script.DEFAULT_SCRIPT_TYPE, Script.DEFAULT_SCRIPT_LANG, deleteScript,
                        params));

        try {
            client.update(updateRequest).get();
        } catch (Exception e) {
            if (ExceptionUtils.getRootCause(e) instanceof DocumentMissingException) {
                LOGGER.debug("Trying to delete values from a missing document. Ignoring deletion of entity: ",
                        entity);
            } else {
                throw new TextIndexException("Unable to delete entity.", e);
            }
        }
    }
}

From source file:org.apache.kylin.query.util.PushDownUtil.java

private static boolean isExpectedCause(SQLException sqlException) {
    Preconditions.checkArgument(sqlException != null);
    Throwable rootCause = ExceptionUtils.getRootCause(sqlException);

    //SqlValidatorException is not an excepted exception in the origin design.But in the multi pass scene,
    //query pushdown may create tables, and the tables are not in the model, so will throw SqlValidatorException.
    boolean isPushDownUpdateEnabled = KylinConfig.getInstanceFromEnv().isPushDownUpdateEnabled();

    if (!isPushDownUpdateEnabled) {
        return rootCause != null //
                && (rootCause instanceof NoRealizationFoundException //
                        || rootCause instanceof RoutingIndicatorException); //
    } else {// w  w w. ja va2  s. co  m
        return (rootCause != null //
                && (rootCause instanceof NoRealizationFoundException //
                        || rootCause instanceof SqlValidatorException //
                        || rootCause instanceof RoutingIndicatorException)); //
    }
}

From source file:org.apache.kylin.rest.service.QueryService.java

public SQLResponse doQueryWithCache(SQLRequest sqlRequest) {
    Message msg = MsgPicker.getMsg();//from w ww  . java  2 s .  c  o  m

    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    String serverMode = kylinConfig.getServerMode();
    if (!(Constant.SERVER_MODE_QUERY.equals(serverMode.toLowerCase())
            || Constant.SERVER_MODE_ALL.equals(serverMode.toLowerCase()))) {
        throw new BadRequestException(String.format(msg.getQUERY_NOT_ALLOWED(), serverMode));
    }
    if (StringUtils.isBlank(sqlRequest.getProject())) {
        throw new BadRequestException(msg.getEMPTY_PROJECT_NAME());
    }

    if (sqlRequest.getBackdoorToggles() != null)
        BackdoorToggles.addToggles(sqlRequest.getBackdoorToggles());

    final QueryContext queryContext = QueryContext.current();

    try (SetThreadName ignored = new SetThreadName("Query %s", queryContext.getQueryId())) {
        String sql = sqlRequest.getSql();
        String project = sqlRequest.getProject();
        logger.info("Using project: " + project);
        logger.info("The original query:  " + sql);

        if (!sql.toLowerCase().contains("select")) {
            logger.debug("Directly return exception as not supported");
            throw new BadRequestException(msg.getNOT_SUPPORTED_SQL());
        }

        long startTime = System.currentTimeMillis();

        SQLResponse sqlResponse = null;
        boolean queryCacheEnabled = checkCondition(kylinConfig.isQueryCacheEnabled(),
                "query cache disabled in KylinConfig") && //
                checkCondition(!BackdoorToggles.getDisableCache(), "query cache disabled in BackdoorToggles");

        if (queryCacheEnabled) {
            sqlResponse = searchQueryInCache(sqlRequest);
        }

        try {
            if (null == sqlResponse) {
                sqlResponse = query(sqlRequest);

                long durationThreshold = kylinConfig.getQueryDurationCacheThreshold();
                long scanCountThreshold = kylinConfig.getQueryScanCountCacheThreshold();
                long scanBytesThreshold = kylinConfig.getQueryScanBytesCacheThreshold();
                sqlResponse.setDuration(System.currentTimeMillis() - startTime);
                logger.info("Stats of SQL response: isException: {}, duration: {}, total scan count {}", //
                        String.valueOf(sqlResponse.getIsException()), String.valueOf(sqlResponse.getDuration()),
                        String.valueOf(sqlResponse.getTotalScanCount()));
                if (checkCondition(queryCacheEnabled, "query cache is disabled") //
                        && checkCondition(!sqlResponse.getIsException(), "query has exception") //
                        && checkCondition(
                                sqlResponse.getDuration() > durationThreshold
                                        || sqlResponse.getTotalScanCount() > scanCountThreshold
                                        || sqlResponse.getTotalScanBytes() > scanBytesThreshold, //
                                "query is too lightweight with duration: {} (threshold {}), scan count: {} (threshold {}), scan bytes: {} (threshold {})",
                                sqlResponse.getDuration(), durationThreshold, sqlResponse.getTotalScanCount(),
                                scanCountThreshold, sqlResponse.getTotalScanBytes(), scanBytesThreshold)
                        && checkCondition(
                                sqlResponse.getResults().size() < kylinConfig.getLargeQueryThreshold(),
                                "query response is too large: {} ({})", sqlResponse.getResults().size(),
                                kylinConfig.getLargeQueryThreshold())) {
                    cacheManager.getCache(SUCCESS_QUERY_CACHE)
                            .put(new Element(sqlRequest.getCacheKey(), sqlResponse));
                }

            } else {
                sqlResponse.setDuration(System.currentTimeMillis() - startTime);
                sqlResponse.setTotalScanCount(0);
                sqlResponse.setTotalScanBytes(0);
            }

            checkQueryAuth(sqlResponse, project);

        } catch (Throwable e) { // calcite may throw AssertError
            logger.error("Exception when execute sql", e);
            String errMsg = QueryUtil.makeErrorMsgUserFriendly(e);

            sqlResponse = new SQLResponse(null, null, 0, true, errMsg);
            sqlResponse.setTotalScanCount(queryContext.getScannedRows());
            sqlResponse.setTotalScanBytes(queryContext.getScannedBytes());

            if (queryCacheEnabled && e.getCause() != null
                    && ExceptionUtils.getRootCause(e) instanceof ResourceLimitExceededException) {
                Cache exceptionCache = cacheManager.getCache(EXCEPTION_QUERY_CACHE);
                exceptionCache.put(new Element(sqlRequest.getCacheKey(), sqlResponse));
            }
        }

        logQuery(sqlRequest, sqlResponse);

        QueryMetricsFacade.updateMetrics(sqlRequest, sqlResponse);

        if (sqlResponse.getIsException())
            throw new InternalErrorException(sqlResponse.getExceptionMessage());

        return sqlResponse;

    } finally {
        BackdoorToggles.cleanToggles();
        QueryContext.reset();
    }
}

From source file:org.apache.kylin.rest.service.QueryService.java

private SQLResponse getPrepareOnlySqlResponse(String correctedSql, Connection conn, Boolean isPushDown,
        List<List<String>> results, List<SelectedColumnMeta> columnMetas) throws SQLException {

    CalcitePrepareImpl.KYLIN_ONLY_PREPARE.set(true);
    PreparedStatement preparedStatement = null;
    try {/*from www. j  a  v a  2s.  c  om*/
        preparedStatement = conn.prepareStatement(correctedSql);
        throw new IllegalStateException("Should have thrown OnlyPrepareEarlyAbortException");
    } catch (Exception e) {
        Throwable rootCause = ExceptionUtils.getRootCause(e);
        if (rootCause != null && rootCause instanceof OnlyPrepareEarlyAbortException) {
            OnlyPrepareEarlyAbortException abortException = (OnlyPrepareEarlyAbortException) rootCause;
            CalcitePrepare.Context context = abortException.getContext();
            CalcitePrepare.ParseResult preparedResult = abortException.getPreparedResult();
            List<RelDataTypeField> fieldList = preparedResult.rowType.getFieldList();

            CalciteConnectionConfig config = context.config();

            // Fill in selected column meta
            for (int i = 0; i < fieldList.size(); ++i) {

                RelDataTypeField field = fieldList.get(i);
                String columnName = field.getKey();
                BasicSqlType basicSqlType = (BasicSqlType) field.getValue();

                columnMetas.add(new SelectedColumnMeta(false, config.caseSensitive(), false, false,
                        basicSqlType.isNullable() ? 1 : 0, true, basicSqlType.getPrecision(), columnName,
                        columnName, null, null, null, basicSqlType.getPrecision(),
                        basicSqlType.getScale() < 0 ? 0 : basicSqlType.getScale(),
                        basicSqlType.getSqlTypeName().getJdbcOrdinal(), basicSqlType.getSqlTypeName().getName(),
                        true, false, false));
            }

        } else {
            throw e;
        }
    } finally {
        CalcitePrepareImpl.KYLIN_ONLY_PREPARE.set(false);
        DBUtils.closeQuietly(preparedStatement);
    }

    return getSqlResponse(isPushDown, results, columnMetas);
}