Example usage for java.util.concurrent CompletionException getCause

List of usage examples for java.util.concurrent CompletionException getCause

Introduction

In this page you can find the example usage for java.util.concurrent CompletionException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:io.sqp.client.InformationRequestTest.java

@Test
public void InformationRequestWithWrongResultType() {
    try {/*from  w w w  .  j a v  a2s.  com*/
        connection.getInformation(Integer.class, InformationSubject.SupportsBinaryProtocol).join();
        fail();
    } catch (CompletionException e) {
        Throwable cause = e.getCause();
        assertThat(cause, is(not(nullValue())));
        assertThat(cause, is(instanceOf(UnexpectedResultTypeException.class)));
        UnexpectedResultTypeException unexpectedResult = (UnexpectedResultTypeException) cause;
        assertThat(unexpectedResult.getExpectedClass().getName(), is(Integer.class.getName()));
        assertThat(unexpectedResult.getResult(), instanceOf(Boolean.class));
        assertThat(unexpectedResult.getResult(), is(true));
    }
}

From source file:de.ii.xtraplatform.feature.provider.pgis.FeatureProviderPgis.java

@Override
public List<String> addFeaturesFromStream(String featureType, CrsTransformer crsTransformer,
        Function<FeatureTransformer, RunnableGraph<CompletionStage<Done>>> stream) {
    Optional<SqlFeatureInserts> featureSink = Optional.ofNullable(featureAddSinks.get(featureType));

    if (!featureSink.isPresent()) {
        throw new NotFoundException("Feature type " + featureType + " not found");
    }/*from w  w w  . j  a  v a  2  s .  c o  m*/

    //TODO: merge classes
    SqlFeatureCreator sqlFeatureCreator = new SqlFeatureCreator(session, materializer, featureSink.get());
    FeatureTransformerSql featureTransformerSql = new FeatureTransformerSql(sqlFeatureCreator, crsTransformer);

    try {
        stream.apply(featureTransformerSql).run(materializer).toCompletableFuture().join();

        return featureTransformerSql.getIds();
    } catch (CompletionException e) {
        if (e.getCause() instanceof WebApplicationException) {
            throw (WebApplicationException) e.getCause();
        } else if (e.getCause() instanceof JsonProcessingException) {
            throw new BadRequestException("Input could not be parsed", e.getCause());
        }
        LOGGER.error("Could not add feature", e.getCause());
        throw new BadRequestException("Feature not valid, could not be written");
    }
}

From source file:de.ii.xtraplatform.feature.provider.pgis.FeatureProviderPgis.java

@Override
public void updateFeatureFromStream(String featureType, String id, CrsTransformer crsTransformer,
        Function<FeatureTransformer, RunnableGraph<CompletionStage<Done>>> stream) {
    Optional<SqlFeatureInserts> featureSink = Optional.ofNullable(featureUpdateSinks.get(featureType));

    if (!featureSink.isPresent()) {
        throw new NotFoundException("Feature type " + featureType + " not found");
    }//from w  ww .ja va  2s.c o m

    /* boolean removed = featureRemover.remove(featureType, id);
     if (!removed) {
    throw new NotFoundException();
     }*/

    //TODO: merge classes
    SqlFeatureCreator sqlFeatureCreator = new SqlFeatureCreator(session, materializer, featureSink.get());
    FeatureTransformerSql featureTransformerSql = new FeatureTransformerSql(sqlFeatureCreator, crsTransformer,
            id);

    try {
        stream.apply(featureTransformerSql).run(materializer).toCompletableFuture().join();

        List<String> ids = featureTransformerSql.getIds();
        LOGGER.debug("PUT {}", ids);
    } catch (CompletionException e) {
        if (e.getCause() instanceof WebApplicationException) {
            throw (WebApplicationException) e.getCause();
        } else if (e.getCause() instanceof JsonProcessingException) {
            throw new BadRequestException("Input could not be parsed", e.getCause());
        }
        LOGGER.error("Could not add feature", e.getCause());
        throw new BadRequestException("Feature not valid, could not be written");
    }
}

From source file:org.apache.hadoop.hbase.client.TestAsyncAdmin.java

@Test(timeout = 300000)
public void testCreateTableNumberOfRegions() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc).join();//from   w w  w  .ja  v a2 s . c  o m
    List<HRegionLocation> regions;
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        regions = l.getAllRegionLocations();
        assertEquals("Table should have only 1 region", 1, regions.size());
    }

    final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2");
    desc = new HTableDescriptor(tableName2);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, new byte[][] { new byte[] { 42 } }).join();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName2)) {
        regions = l.getAllRegionLocations();
        assertEquals("Table should have only 2 region", 2, regions.size());
    }

    final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");
    desc = new HTableDescriptor(tableName3);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, "a".getBytes(), "z".getBytes(), 3).join();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName3)) {
        regions = l.getAllRegionLocations();
        assertEquals("Table should have only 3 region", 3, regions.size());
    }

    final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4");
    desc = new HTableDescriptor(tableName4);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    try {
        admin.createTable(desc, "a".getBytes(), "z".getBytes(), 2).join();
        fail("Should not be able to create a table with only 2 regions using this API.");
    } catch (CompletionException e) {
        assertTrue(e.getCause() instanceof IllegalArgumentException);
    }

    final TableName tableName5 = TableName.valueOf(tableName.getNameAsString() + "_5");
    desc = new HTableDescriptor(tableName5);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, new byte[] { 1 }, new byte[] { 127 }, 16).join();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName5)) {
        regions = l.getAllRegionLocations();
        assertEquals("Table should have 16 region", 16, regions.size());
    }
}

From source file:org.apache.hadoop.hbase.client.TestAsyncAdmin.java

@Test(timeout = 300000)
public void testCreateTableWithRegions() throws IOException, InterruptedException {
    final TableName tableName = TableName.valueOf(name.getMethodName());

    byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 },
            new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 },
            new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 }, };
    int expectedRegions = splitKeys.length + 1;

    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, splitKeys).join();

    List<HRegionLocation> regions;
    Iterator<HRegionLocation> hris;
    HRegionInfo hri;/* ww  w  .  j  a  v a  2  s.c  om*/
    ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        regions = l.getAllRegionLocations();

        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
                expectedRegions, regions.size());
        System.err.println("Found " + regions.size() + " regions");

        hris = regions.iterator();
        hri = hris.next().getRegionInfo();
        assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[0]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[0]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[1]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[1]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[2]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[2]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[3]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[3]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[4]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[4]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[5]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[5]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[6]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[6]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[7]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[7]));
        assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[8]));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[8]));
        assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);

        verifyRoundRobinDistribution(conn, l, expectedRegions);
    }

    // Now test using start/end with a number of regions

    // Use 80 bit numbers to make sure we aren't limited
    byte[] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
    byte[] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };

    // Splitting into 10 regions, we expect (null,1) ... (9, null)
    // with (1,2) (2,3) (3,4) (4,5) (5,6) (6,7) (7,8) (8,9) in the middle

    expectedRegions = 10;

    final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2");

    desc = new HTableDescriptor(tableName2);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, startKey, endKey, expectedRegions).join();

    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName2)) {
        regions = l.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
                expectedRegions, regions.size());
        System.err.println("Found " + regions.size() + " regions");

        hris = regions.iterator();
        hri = hris.next().getRegionInfo();
        assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
        assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
        hri = hris.next().getRegionInfo();
        assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
        assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);

        verifyRoundRobinDistribution(conn, l, expectedRegions);
    }

    // Try once more with something that divides into something infinite

    startKey = new byte[] { 0, 0, 0, 0, 0, 0 };
    endKey = new byte[] { 1, 0, 0, 0, 0, 0 };

    expectedRegions = 5;

    final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");

    desc = new HTableDescriptor(tableName3);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc, startKey, endKey, expectedRegions).join();

    try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName3)) {
        regions = l.getAllRegionLocations();
        assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
                expectedRegions, regions.size());
        System.err.println("Found " + regions.size() + " regions");

        verifyRoundRobinDistribution(conn, l, expectedRegions);
    }

    // Try an invalid case where there are duplicate split keys
    splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 },
            new byte[] { 2, 2, 2 } };

    final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4");
    desc = new HTableDescriptor(tableName4);
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    try {
        admin.createTable(desc, splitKeys).join();
        fail("Should not be able to create this table because of " + "duplicate split keys");
    } catch (CompletionException e) {
        assertTrue(e.getCause() instanceof IllegalArgumentException);
    }
}

From source file:org.apache.hadoop.hbase.client.TestAsyncAdmin.java

@Test(timeout = 300000)
public void testCreateTableWithOnlyEmptyStartRow() throws IOException {
    byte[] tableName = Bytes.toBytes(name.getMethodName());
    byte[][] splitKeys = new byte[1][];
    splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    desc.addFamily(new HColumnDescriptor("col"));
    try {//from ww w .  j av  a 2 s  . com
        admin.createTable(desc, splitKeys).join();
        fail("Test case should fail as empty split key is passed.");
    } catch (CompletionException e) {
        assertTrue(e.getCause() instanceof IllegalArgumentException);
    }
}

From source file:org.apache.hadoop.hbase.client.TestAsyncAdmin.java

@Test(timeout = 300000)
public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException {
    byte[] tableName = Bytes.toBytes(name.getMethodName());
    byte[][] splitKeys = new byte[3][];
    splitKeys[0] = "region1".getBytes();
    splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
    splitKeys[2] = "region2".getBytes();
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    desc.addFamily(new HColumnDescriptor("col"));
    try {//  w ww  .  jav  a 2  s  .c om
        admin.createTable(desc, splitKeys).join();
        fail("Test case should fail as empty split key is passed.");
    } catch (CompletionException e) {
        assertTrue(e.getCause() instanceof IllegalArgumentException);
    }
}

From source file:org.apache.hadoop.hbase.client.TestAsyncRegionLocator.java

@Test
public void testNoCompletionException() {
    // make sure that we do not get CompletionException
    SLEEP_MS = 0;/*from  w  w  w  . j a v a  2s  . c o  m*/
    AtomicReference<Throwable> errorHolder = new AtomicReference<>();
    try {
        LOCATOR.getRegionLocation(TableName.valueOf("NotExist"), EMPTY_START_ROW, RegionLocateType.CURRENT,
                TimeUnit.SECONDS.toNanos(1)).whenComplete((r, e) -> errorHolder.set(e)).join();
        fail();
    } catch (CompletionException e) {
        // join will return a CompletionException, which is OK
        assertThat(e.getCause(), instanceOf(TableNotFoundException.class));
    }
    // but we need to make sure that we do not get a CompletionException in the callback
    assertThat(errorHolder.get(), instanceOf(TableNotFoundException.class));
}

From source file:org.apache.james.mailbox.cassandra.mail.CassandraMailboxMapper.java

@Override
public Mailbox findMailboxByPath(MailboxPath path) throws MailboxException {
    try {/*  ww w. ja v  a  2s .c o  m*/
        return mailboxPathDAO.retrieveId(path).thenCompose(cassandraIdOptional -> cassandraIdOptional
                .map(CassandraMailboxPathDAO.CassandraIdAndPath::getCassandraId)
                .map(mailboxDAO::retrieveMailbox).orElse(CompletableFuture.completedFuture(Optional.empty())))
                .join().orElseThrow(() -> new MailboxNotFoundException(path));
    } catch (CompletionException e) {
        if (e.getCause() instanceof InvalidQueryException) {
            if (StringUtils.containsIgnoreCase(e.getCause().getMessage(), VALUES_MAY_NOT_BE_LARGER_THAN_64_K)) {
                throw new TooLongMailboxNameException("too long mailbox name");
            }
            throw new MailboxException("It has error with cassandra storage", e.getCause());
        }
        throw e;
    }
}

From source file:org.apache.james.mailbox.cassandra.mail.CassandraMailboxMapper.java

private void manageException(CompletionException e) throws MailboxException {
    if (e.getCause() instanceof InvalidQueryException) {
        String errorMessage = e.getCause().getMessage();
        if (StringUtils.containsIgnoreCase(errorMessage, VALUES_MAY_NOT_BE_LARGER_THAN_64_K)
                || StringUtils.containsIgnoreCase(errorMessage, CLUSTERING_COLUMNS_IS_TOO_LONG)) {
            throw new TooLongMailboxNameException("too long mailbox name");
        }/*  ww  w  .java 2  s  . com*/
        throw new MailboxException("It has error with cassandra storage", e.getCause());
    }
    throw e;
}