Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:org.apache.hadoop.gateway.hdfs.dispatch.WebHdfsHaHttpClientDispatch.java

private void retryRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest,
        HttpServletResponse outboundResponse, HttpResponse inboundResponse, Exception exception)
        throws IOException {
    LOG.retryingRequest(outboundRequest.getURI().toString());
    AtomicInteger counter = (AtomicInteger) inboundRequest.getAttribute(RETRY_COUNTER_ATTRIBUTE);
    if (counter == null) {
        counter = new AtomicInteger(0);
    }/*w  w w.  ja v  a  2 s.  co m*/
    inboundRequest.setAttribute(RETRY_COUNTER_ATTRIBUTE, counter);
    if (counter.incrementAndGet() <= maxRetryAttempts) {
        if (retrySleep > 0) {
            try {
                Thread.sleep(retrySleep);
            } catch (InterruptedException e) {
                LOG.retrySleepFailed(resourceRole, e);
            }
        }
        executeRequest(outboundRequest, inboundRequest, outboundResponse);
    } else {
        LOG.maxRetryAttemptsReached(maxRetryAttempts, resourceRole, outboundRequest.getURI().toString());
        if (inboundResponse != null) {
            writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
        } else {
            throw new IOException(exception);
        }
    }
}

From source file:io.fabric8.msg.jnatsd.TestProtocol.java

@Test
public void testUnsubscribe() throws Exception {
    final Channel<Boolean> ch = new Channel<Boolean>();
    final AtomicInteger count = new AtomicInteger(0);
    final int max = 20;
    connectionFactory.setReconnectAllowed(false);
    try (Connection c = connectionFactory.createConnection()) {
        try (final AsyncSubscription s = c.subscribeAsync("foo", new MessageHandler() {
            @Override/*from www.  jav a  2 s .c om*/
            public void onMessage(Message m) {
                count.incrementAndGet();
                if (count.get() == max) {
                    try {
                        m.getSubscription().unsubscribe();
                        assertFalse(m.getSubscription().isValid());
                    } catch (Exception e) {
                        fail("Unsubscribe failed with err: " + e.getMessage());
                    }
                    ch.add(true);
                }
            }
        })) {
            for (int i = 0; i < max; i++) {
                c.publish("foo", null, (byte[]) null);
            }
            sleep(100);
            c.flush();

            if (s.isValid()) {
                assertTrue("Test complete signal not received", ch.get(5, TimeUnit.SECONDS));
                assertFalse(s.isValid());
            }
            assertEquals(max, count.get());
        }
    }
}

From source file:org.fcrepo.client.ConnectionManagementTest.java

/**
 * Demonstrates that HTTP connections are released when the FcrepoClient throws an exception.  Each method of the
 * FcrepoClient (get, put, post, etc.) is tested.
 *///from  www.  j a  v a  2 s  . co  m
@Test
public void connectionReleasedOnException() {
    // Removing MOVE and COPY operations as the mock server does not handle them
    final int expectedCount = HttpMethods.values().length - 2;
    final AtomicInteger actualCount = new AtomicInteger(0);
    final MockHttpExpectations.Uris uri = uris.uri500;

    Stream.of(HttpMethods.values())
            // MOVE and COPY do not appear to be supported in the mock server
            .filter(method -> HttpMethods.MOVE != method && HttpMethods.COPY != method).forEach(method -> {
                connect(client, uri, method, null);
                actualCount.getAndIncrement();
            });

    assertEquals("Expected to make " + expectedCount + " connections; made " + actualCount.get(), expectedCount,
            actualCount.get());

    verifyConnectionRequestedAndClosed(actualCount.get(), connectionManager);
}

From source file:uk.ac.ebi.ep.parser.parsers.ChEBICompounds.java

public void computeAndLoadChEBICompounds() {

    List<EnzymePortalSummary> enzymeSummary = enzymeSummaryRepository.findSummariesByCommentType(COMMENT_TYPE);
    LOGGER.warn("Number of Regulation Text from EnzymeSummary Table " + enzymeSummary.size());

    //String text = "Activated by cell stresses such as DNA damage, heat shock, osmotic shock, anisomycin and sodium arsenite, as well as pro-inflammatory stimuli such as bacterial lipopolysaccharide (LPS) and interleukin-1. Activation occurs through dual phosphorylation of Thr-180 and Tyr-182 by either of two dual specificity kinases, MAP2K3/MKK3 or MAP2K6/MKK6, and potentially also MAP2K4/MKK4, as well as by TAB1-mediated autophosphorylation. MAPK14 phosphorylated on both Thr-180 and Tyr-182 is 10-20-fold more active than MAPK14 phosphorylated only on Thr-180, whereas MAPK14 phosphorylated on Tyr-182 alone is inactive. whereas Thr-180 is necessary for catalysis, Tyr-182 may be required for auto-activation and substrate recognition. Phosphorylated at Tyr-323 by ZAP70 in an alternative activation pathway in response to TCR signaling in T-cells. This alternative pathway is inhibited by GADD45A. Inhibited by dual specificity phosphatases, such as DUSP1, DUSP10, and DUSP16. Specifically inhibited by the binding of pyridinyl-imidazole compounds, which are cytokine-suppressive anti-inflammatory drugs (CSAID). Isoform Mxi2 is 100-fold less sensitive to these agents than the other isoforms and is not inhibited by DUSP1. Isoform Exip is not activated by MAP2K6. SB203580 is an inhibitor of MAPK14.";   
    //Java 7 and before only. uncomment if Java 8 is not available in your env
    //        for (EnzymePortalSummary summary : enzymeSummary) {
    //            String enzyme_regulation_text = summary.getCommentText();
    //            
    //         /*from   w  w w.  jav  a 2s . co m*/
    //            inhibitors.put(summary.getUniprotAccession(), EPUtil.parseTextForInhibitors(enzyme_regulation_text));
    //            activators.put(summary.getUniprotAccession(), EPUtil.parseTextForActivators(enzyme_regulation_text));
    //        }
    //        
    //
    //        for (Map.Entry<UniprotEntry, Set<String>> map : inhibitors.entrySet()) {
    //            UniprotEntry key = map.getKey();
    //            for (String inhibitor : map.getValue()) {
    //                EnzymePortalCompound inhibitor_from_chebi = searchMoleculeInChEBI(inhibitor);
    //               
    //                if (inhibitor_from_chebi != null) {
    //                    
    //                    inhibitor_from_chebi.setRelationship(Relationship.is_inhibitor_of.name());
    //                    inhibitor_from_chebi.setUniprotAccession(key);
    //                    compounds.add(inhibitor_from_chebi);
    //                }
    //            }
    //
    //        }
    //
    //        for (Map.Entry<UniprotEntry, Set<String>> map : activators.entrySet()) {
    //            UniprotEntry key = map.getKey();
    //            for (String activator : map.getValue()) {
    //                EnzymePortalCompound activator_from_chebi = searchMoleculeInChEBI(activator);
    //                if (activator_from_chebi != null) {
    //                 
    //                    activator_from_chebi.setRelationship(Relationship.is_activator_of.name());
    //                    activator_from_chebi.setUniprotAccession(key);
    //                    compounds.add(activator_from_chebi);
    //                }
    //            }
    //
    //        }
    //Java 8 specifics - comment out  and uncomment above if java 8 is not found in env
    //        enzymeSummary.stream().forEach((summary) -> {
    //            String enzyme_regulation_text = summary.getCommentText();
    //            inhibitors.put(summary.getUniprotAccession(), EPUtil.parseTextForInhibitors(enzyme_regulation_text));
    //            activators.put(summary.getUniprotAccession(), EPUtil.parseTextForActivators(enzyme_regulation_text));
    //        });

    Stream<EnzymePortalSummary> existingStream = enzymeSummary.stream();
    Stream<List<EnzymePortalSummary>> partitioned = partition(existingStream, 500, 1);
    AtomicInteger count = new AtomicInteger(1);
    partitioned.parallel().forEach((chunk) -> {
        //System.out.println(count.getAndIncrement() + " BATCH SIZE" + chunk.size());
        chunk.stream().forEach((summary) -> {
            String enzyme_regulation_text = summary.getCommentText();

            inhibitors.put(summary.getUniprotAccession(),
                    EPUtil.parseTextForInhibitors(enzyme_regulation_text));
            activators.put(summary.getUniprotAccession(),
                    EPUtil.parseTextForActivators(enzyme_regulation_text));

        });
    });

    LOGGER.debug("number of inhibitors and activators to process are : " + inhibitors.size() + ": "
            + activators.size());
    inhibitors.entrySet().stream().forEach((map) -> {
        map.getValue().stream().map((inhibitor) -> searchMoleculeInChEBI(inhibitor))
                .filter((inhibitor_from_chebi) -> (inhibitor_from_chebi != null))
                .map((inhibitor_from_chebi) -> {
                    inhibitor_from_chebi.setRelationship(Relationship.is_inhibitor_of.name());
                    inhibitor_from_chebi = CompoundUtil.computeRole(inhibitor_from_chebi,
                            inhibitor_from_chebi.getRelationship());
                    return inhibitor_from_chebi;
                }).map((inhibitor_from_chebi) -> {
                    inhibitor_from_chebi.setUniprotAccession(map.getKey());
                    return inhibitor_from_chebi;
                }).forEach((inhibitor_from_chebi) -> {
                    compounds.add(inhibitor_from_chebi);
                });
    });

    activators.entrySet().stream().forEach((map) -> {
        map.getValue().stream().map((activator) -> searchMoleculeInChEBI(activator))
                .filter((activator_from_chebi) -> (activator_from_chebi != null))
                .map((activator_from_chebi) -> {
                    activator_from_chebi.setRelationship(Relationship.is_activator_of.name());
                    activator_from_chebi = CompoundUtil.computeRole(activator_from_chebi,
                            activator_from_chebi.getRelationship());
                    return activator_from_chebi;
                }).map((activator_from_chebi) -> {
                    activator_from_chebi.setUniprotAccession(map.getKey());
                    return activator_from_chebi;
                }).forEach((activator_from_chebi) -> {
                    compounds.add(activator_from_chebi);
                });
    });

    LOGGER.warn("Number of compounds before filtering : " + compounds.size());

    compounds.removeIf(c -> (c.getCompoundId().equalsIgnoreCase("CHEBI:338412")
            || c.getCompoundId().equalsIgnoreCase("CHEBI:16412")
            || c.getCompoundId().equalsIgnoreCase("CHEBI:29678"))
            && c.getUniprotAccession().getAccession().equalsIgnoreCase("Q16539"));

    LOGGER.warn("Writing to Enzyme Portal database... Number of compounds to write : " + compounds.size());

    compoundRepository.save(compounds);

    inhibitors.clear();
    activators.clear();
    compounds.clear();
}

From source file:org.apache.hadoop.gateway.hdfs.dispatch.WebHdfsHaDispatch.java

private void retryRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest,
        HttpServletResponse outboundResponse, HttpResponse inboundResponse, Exception exception)
        throws IOException {
    LOG.retryingRequest(outboundRequest.getURI().toString());
    AtomicInteger counter = (AtomicInteger) inboundRequest.getAttribute(RETRY_COUNTER_ATTRIBUTE);
    if (counter == null) {
        counter = new AtomicInteger(0);
    }//w  ww.j ava  2 s  . co m
    inboundRequest.setAttribute(RETRY_COUNTER_ATTRIBUTE, counter);
    if (counter.incrementAndGet() <= maxRetryAttempts) {
        if (retrySleep > 0) {
            try {
                Thread.sleep(retrySleep);
            } catch (InterruptedException e) {
                LOG.retrySleepFailed(RESOURCE_ROLE, e);
            }
        }
        executeRequest(outboundRequest, inboundRequest, outboundResponse);
    } else {
        LOG.maxRetryAttemptsReached(maxRetryAttempts, RESOURCE_ROLE, outboundRequest.getURI().toString());
        if (inboundResponse != null) {
            writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
        } else {
            throw new IOException(exception);
        }
    }
}

From source file:com.spectralogic.ds3client.metadata.MetadataAccessImpl_Test.java

@Test
public void testMetadataAccessFailureHandlerWindows() {
    Assume.assumeTrue(Platform.isWindows());

    try {/*from  w  ww . j av a 2 s  . c  o  m*/
        final ImmutableMap.Builder<String, Path> fileMapper = ImmutableMap.builder();

        final String fileName = "file";

        fileMapper.put(fileName, Paths.get(fileName));

        final AtomicInteger numTimesFailureHandlerCalled = new AtomicInteger(0);

        new MetadataAccessImpl(fileMapper.build(), new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numTimesFailureHandlerCalled.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.RecordingMetadata, failureEvent.doingWhat());
            }
        }, "localhost").getMetadataValue(fileName);

        assertEquals(1, numTimesFailureHandlerCalled.get());
    } catch (final Throwable t) {
        fail("Throwing exceptions from metadata est verbotten.");
    }
}

From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java

@Test
public void testAllowLateDirectory() throws Exception {
    File f = new File("target", UUID.randomUUID().toString());

    SpoolDirConfigBean conf = new SpoolDirConfigBean();
    conf.dataFormat = DataFormat.TEXT;/*from  w  w w. j a  v a2s  .  c om*/
    conf.spoolDir = f.getAbsolutePath();
    conf.batchSize = 10;
    conf.overrunLimit = 100;
    conf.poolingTimeoutSecs = 1;
    conf.filePattern = "file-[0-9].log";
    conf.pathMatcherMode = PathMatcherMode.GLOB;
    conf.maxSpoolFiles = 10;
    conf.initialFileToProcess = null;
    conf.dataFormatConfig.compression = Compression.NONE;
    conf.dataFormatConfig.filePatternInArchive = "*";
    conf.errorArchiveDir = null;
    conf.postProcessing = PostProcessingOptions.ARCHIVE;
    conf.archiveDir = createTestDir();
    conf.retentionTimeMins = 10;
    conf.dataFormatConfig.textMaxLineLen = 10;
    conf.dataFormatConfig.onParseError = OnParseError.ERROR;
    conf.dataFormatConfig.maxStackTraceLines = 0;

    TSpoolDirSource source = new TSpoolDirSource(conf);
    PushSourceRunner runner = new PushSourceRunner.Builder(TSpoolDirSource.class, source).addOutputLane("lane")
            .build();
    //Late Directories not allowed, init should fail.
    conf.allowLateDirectory = false;
    try {
        runner.runInit();
        Assert.fail("Should throw an exception if the directory does not exist");
    } catch (StageException e) {
        //Expected
    }

    //Late Directories allowed, wait and should be able to detect the file and read.
    conf.allowLateDirectory = true;
    TSpoolDirSource sourceWithLateDirectory = new TSpoolDirSource(conf);
    PushSourceRunner runner2 = new PushSourceRunner.Builder(TSpoolDirSource.class, sourceWithLateDirectory)
            .addOutputLane("lane").build();
    AtomicInteger batchCount = new AtomicInteger(0);
    runner2.runInit();

    try {
        runner2.runProduce(new HashMap<>(), 10, output -> {
            batchCount.incrementAndGet();

            if (batchCount.get() == 1) {
                runner2.setStop();
            }
        });

        runner2.waitOnProduce();

        TestOffsetUtil.compare(NULL_FILE_OFFSET, runner2.getOffsets());

        Assert.assertEquals(1, runner2.getEventRecords().size());
        Assert.assertEquals("no-more-data", runner2.getEventRecords().get(0).getEventType());

        Assert.assertTrue(f.mkdirs());

        File file = new File(source.spoolDir, "file-0.log").getAbsoluteFile();
        Files.createFile(file.toPath());

        source.file = file;
        source.offset = 1;
        source.maxBatchSize = 10;

        Thread.sleep(5000L);

        PushSourceRunner runner3 = new PushSourceRunner.Builder(TSpoolDirSource.class, source)
                .addOutputLane("lane").build();

        runner3.runInit();

        runner3.runProduce(ImmutableMap.of(Source.POLL_SOURCE_OFFSET_KEY, "file-0.log::1"), 10, output -> {
            batchCount.incrementAndGet();

            if (batchCount.get() > 1) {
                runner3.setStop();
            }
        });
        runner3.waitOnProduce();

        TestOffsetUtil.compare("file-0.log::1", runner3.getOffsets());

        Assert.assertEquals(1, runner3.getEventRecords().size());
        Assert.assertEquals("new-file", runner3.getEventRecords().get(0).getEventType());

        runner3.runDestroy();

    } finally {
        runner2.runDestroy();
    }
}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

/**
 * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}.
 * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled.
 * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes
 * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling.
 * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff
 * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values.
 *///from w ww .j  a  va  2 s  .com
public void testOrdinarySniffRounds() throws Exception {
    final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
    long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
    RestClient restClient = mock(RestClient.class);
    CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
    final int iters = randomIntBetween(30, 100);
    final Set<Future<?>> futures = new CopyOnWriteArraySet<>();
    final CountDownLatch completionLatch = new CountDownLatch(1);
    final AtomicInteger runs = new AtomicInteger(iters);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<Future<?>> lastFuture = new AtomicReference<>();
    final AtomicReference<Sniffer.Task> lastTask = new AtomicReference<>();
    Scheduler scheduler = new Scheduler() {
        @Override
        public Future<?> schedule(Sniffer.Task task, long delayMillis) {
            assertEquals(sniffInterval, task.nextTaskDelay);
            int numberOfRuns = runs.getAndDecrement();
            if (numberOfRuns == iters) {
                //the first call is to schedule the first sniff round from the Sniffer constructor, with delay O
                assertEquals(0L, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
            } else {
                //all of the subsequent times "schedule" is called with delay set to the configured sniff interval
                assertEquals(sniffInterval, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
                if (numberOfRuns == 0) {
                    completionLatch.countDown();
                    return null;
                }
            }
            //we submit rather than scheduling to make the test quick and not depend on time
            Future<?> future = executor.submit(task);
            futures.add(future);
            if (numberOfRuns == 1) {
                lastFuture.set(future);
                lastTask.set(task);
            }
            return future;
        }

        @Override
        public void shutdown() {
            //the executor is closed externally, shutdown is tested separately
        }
    };
    try {
        new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
        assertTrue("timeout waiting for sniffing rounds to be completed",
                completionLatch.await(1000, TimeUnit.MILLISECONDS));
        assertEquals(iters, futures.size());
        //the last future is the only one that may not be completed yet, as the count down happens
        //while scheduling the next round which is still part of the execution of the runnable itself.
        assertTrue(lastTask.get().hasStarted());
        lastFuture.get().get();
        for (Future<?> future : futures) {
            assertTrue(future.isDone());
            future.get();
        }
    } finally {
        executor.shutdown();
        assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS));
    }
    int totalRuns = hostsSniffer.runs.get();
    assertEquals(iters, totalRuns);
    int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
    verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
    verifyNoMoreInteractions(restClient);
}

From source file:com.kixeye.chassis.transport.websocket.ActionInvokingWebSocket.java

public void onWebSocketBinary(byte[] payload, int offset, int length) {
    try {/*from   w  w  w . j av  a2  s  .co  m*/
        // don't accept empty frames
        if (payload == null || length < 1) {
            throw new WebSocketServiceException(new ServiceError("EMPTY_ENVELOPE", "Empty envelope!"),
                    "UNKNOWN", null);
        }

        // check if we need to do psk encryption
        byte[] processedPayload = pskFrameProcessor.processIncoming(payload, offset, length);

        if (processedPayload != payload) {
            payload = processedPayload;
            offset = 0;
            length = payload.length;
        }

        // get the envelope
        final WebSocketEnvelope envelope = new WebSocketEnvelope(
                serDe.deserialize(payload, offset, length, Envelope.class));

        // gets all the actions
        Collection<WebSocketAction> actions = mappingRegistry.getActionMethods(envelope.getAction());

        final AtomicInteger invokedActions = new AtomicInteger(0);

        // invokes them
        for (final WebSocketAction action : actions) {
            // get and validate type ID
            Class<?> messageClass = null;

            if (StringUtils.isNotBlank(envelope.getTypeId())) {
                messageClass = messageRegistry.getClassByTypeId(envelope.getTypeId());
            }

            // validate if action has a payload class that it needs
            if (action.getPayloadClass() != null && messageClass == null) {
                throw new WebSocketServiceException(new ServiceError("INVALID_TYPE_ID", "Unknown type ID!"),
                        envelope.getAction(), envelope.getTransactionId());
            }

            // invoke this action if allowed
            if (action.canInvoke(webSocketSession, messageClass)) {
                invokedActions.incrementAndGet();

                final Object handler = handlerCache.get(action.getHandlerClass().getName());
                final Class<?> finalMessageClass = messageClass;

                ListenableFuture<DeferredResult<?>> invocation = serviceExecutor
                        .submit(new Callable<DeferredResult<?>>() {
                            @Override
                            public DeferredResult<?> call() throws Exception {
                                // then invoke
                                return action.invoke(
                                        handler, new RawWebSocketMessage<>(envelope.getPayload(),
                                                finalMessageClass, messageValidator, serDe),
                                        envelope, webSocketSession);
                            }
                        });

                Futures.addCallback(invocation, new FutureCallback<DeferredResult<?>>() {
                    public void onSuccess(DeferredResult<?> result) {
                        if (result != null) {
                            result.setResultHandler(new DeferredResultHandler() {
                                @Override
                                public void handleResult(Object result) {
                                    if (result instanceof Exception) {
                                        onFailure((Exception) result);
                                        return;
                                    }

                                    sendResponse(result);
                                }
                            });
                        }
                    }

                    public void onFailure(Throwable t) {
                        if (t instanceof InvocationTargetException) {
                            t = ((InvocationTargetException) t).getTargetException();
                        }

                        ServiceError error = ExceptionServiceErrorMapper.mapException(t);

                        if (error != null
                                && !ExceptionServiceErrorMapper.VALIDATION_ERROR_CODE.equals(error.code)) {
                            logger.error("Unexpected exception throw while executing action [{}]",
                                    envelope.getAction(), t);
                        }

                        sendResponse(error);
                    }

                    public Future<Void> sendResponse(Object response) {
                        try {
                            return sendMessage(envelope.getAction(), envelope.getTransactionId(), response);
                        } catch (IOException | GeneralSecurityException e) {
                            logger.error("Unable to send message to channel", e);

                            return Futures.immediateFuture(null);
                        }
                    }

                }, responseExecutor);
            }
        }

        // make sure we actually invoked something
        if (invokedActions.get() < 1) {
            throw new WebSocketServiceException(
                    new ServiceError("INVALID_ACTION_MAPPING", "No actions invoked."), envelope.getAction(),
                    envelope.getTransactionId());
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}