Example usage for java.lang Math toIntExact

List of usage examples for java.lang Math toIntExact

Introduction

In this page you can find the example usage for java.lang Math toIntExact.

Prototype

public static int toIntExact(long value) 

Source Link

Document

Returns the value of the long argument; throwing an exception if the value overflows an int .

Usage

From source file:org.eclipse.hawkbit.repository.jpa.ArtifactManagementTest.java

@Test
@Description("Verifies that the quota specifying the maximum artifact storage is enforced (across software modules).")
public void createArtifactWhichExceedsMaxStorage() throws NoSuchAlgorithmException, IOException {

    // create one artifact which exceeds the storage quota at once
    final long maxBytes = quotaManagement.getMaxArtifactStorage();
    final JpaSoftwareModule sm = softwareModuleRepository
            .save(new JpaSoftwareModule(osType, "smd345", "1.0", null, null));
    assertThatExceptionOfType(QuotaExceededException.class).isThrownBy(
            () -> createArtifactForSoftwareModule("file345", sm.getId(), Math.toIntExact(maxBytes) + 128));
}

From source file:org.eclipse.hawkbit.repository.jpa.ArtifactManagementTest.java

@Test
@Description("Verifies that you cannot create artifacts which exceed the configured maximum size.")
public void createArtifactFailsIfTooLarge() throws NoSuchAlgorithmException, IOException {

    // create a software module
    final JpaSoftwareModule sm1 = softwareModuleRepository
            .save(new JpaSoftwareModule(osType, "sm1", "1.0", null, null));

    // create an artifact that exceeds the configured quota
    final long maxSize = quotaManagement.getMaxArtifactSize();
    assertThatExceptionOfType(QuotaExceededException.class).isThrownBy(
            () -> createArtifactForSoftwareModule("file", sm1.getId(), Math.toIntExact(maxSize) + 8));
}

From source file:org.apache.hadoop.hbase.master.cleaner.TestLogsCleaner.java

private void createFiles(FileSystem fs, Path parentDir, int numOfFiles) throws IOException {
    for (int i = 0; i < numOfFiles; i++) {
        // size of each file is 1M, 2M, or 3M
        int xMega = 1 + ThreadLocalRandom.current().nextInt(1, 4);
        try (FSDataOutputStream fsdos = fs.create(new Path(parentDir, "file-" + i))) {
            byte[] M = RandomUtils.nextBytes(Math.toIntExact(FileUtils.ONE_MB * xMega));
            fsdos.write(M);//from  w ww . j  a va2  s.c  om
        }
    }
}

From source file:com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter.java

@VisibleForTesting
Integer dateToEpochSeconds(final Date date) {
    return null == date ? null : Math.toIntExact(date.toInstant().getEpochSecond());
}

From source file:org.wso2.siddhi.extension.input.transport.kafka.KafkaSourceTestCase.java

@Test
public void testRecoveryOnFailureOfMultipleNodeWithKafka() throws InterruptedException {
    try {/*from  w w  w .ja  v  a  2 s  . c o  m*/
        log.info(
                "Test to verify recovering process of multiple Siddhi nodes on a failure when Kafka is the event"
                        + " source");
        String topics[] = new String[] { "kafka_topic5", "kafka_topic6" };
        createTopic(topics, 1);
        // 1st node
        PersistenceStore persistenceStore = new InMemoryPersistenceStore();
        SiddhiManager siddhiManager1 = new SiddhiManager();
        siddhiManager1.setPersistenceStore(persistenceStore);
        siddhiManager1.setExtension("inputmapper:text", TextSourceMapper.class);

        // 2nd node
        PersistenceStore persistenceStore1 = new InMemoryPersistenceStore();
        SiddhiManager siddhiManager2 = new SiddhiManager();
        siddhiManager2.setPersistenceStore(persistenceStore1);
        siddhiManager2.setExtension("inputmapper:text", TextSourceMapper.class);

        String query1 = "@Plan:name('TestExecutionPlan') "
                + "@sink(type='kafka', topic='kafka_topic6', bootstrap.servers='localhost:9092', partition"
                + ".no='0', " + "@map(type='text'))" + "define stream BarStream (count long); "
                + "@source(type='kafka', topic='kafka_topic5', group.id='test', "
                + "threading.option='topic.wise', bootstrap.servers='localhost:9092', partition.no.list='0', "
                + "@map(type='text'))" + "Define stream FooStream (symbol string, price float, volume long);"
                + "@info(name = 'query1') "
                + "from FooStream select count(symbol) as count insert into BarStream;";

        String query2 = "@Plan:name('TestExecutionPlan') " + "define stream BarStream (count long); "
                + "@source(type='kafka', topic='kafka_topic6', "
                + "threading.option='topic.wise', bootstrap.servers='localhost:9092', partition.no.list='0', "
                + "@map(type='text'))" + "Define stream FooStream (number long);" + "@info(name = 'query1') "
                + "from FooStream select count(number) as count insert into BarStream;";

        ExecutionPlanRuntime executionPlanRuntime1 = siddhiManager1.createExecutionPlanRuntime(query1);
        ExecutionPlanRuntime executionPlanRuntime2 = siddhiManager2.createExecutionPlanRuntime(query2);

        executionPlanRuntime2.addCallback("BarStream", new StreamCallback() {
            @Override
            public void receive(Event[] events) {
                for (Event event : events) {
                    eventArrived = true;
                    System.out.println(event);
                    count = Math.toIntExact((long) event.getData(0));
                }

            }
        });

        // start the execution plan
        executionPlanRuntime1.start();
        executionPlanRuntime2.start();
        // let it initialize
        Thread.sleep(2000);

        // start publishing events to Kafka
        Future eventSender = executorService.submit(new Runnable() {
            @Override
            public void run() {
                kafkaPublisher(new String[] { "kafka_topic5" }, 1, 50, 1000);
            }
        });

        // wait for some time
        Thread.sleep(28000);
        // initiate a checkpointing task
        Future perisistor1 = executionPlanRuntime1.persist();
        Future perisistor2 = executionPlanRuntime2.persist();
        // waits till the checkpointing task is done
        while (!perisistor1.isDone() && !perisistor2.isDone()) {
            Thread.sleep(100);
        }
        // let few more events to be published
        Thread.sleep(5000);
        // initiate a execution plan shutdown - to demonstrate a node failure
        executionPlanRuntime1.shutdown();
        executionPlanRuntime2.shutdown();
        // let few events to be published while the execution plan is down
        Thread.sleep(5000);
        // recreate the execution plan
        executionPlanRuntime1 = siddhiManager1.createExecutionPlanRuntime(query1);
        executionPlanRuntime2 = siddhiManager2.createExecutionPlanRuntime(query2);
        executionPlanRuntime2.addCallback("BarStream", new StreamCallback() {
            @Override
            public void receive(Event[] events) {
                for (Event event : events) {
                    eventArrived = true;
                    System.out.println(event);
                    count = Math.toIntExact((long) event.getData(0));
                }

            }
        });
        // start the execution plan
        executionPlanRuntime1.start();
        executionPlanRuntime2.start();
        // immediately trigger a restore from last revision
        executionPlanRuntime1.restoreLastRevision();
        executionPlanRuntime2.restoreLastRevision();
        Thread.sleep(5000);

        // waits till all the events are published
        while (!eventSender.isDone()) {
            Thread.sleep(2000);
        }

        Thread.sleep(20000);
        assertTrue(eventArrived);
        // assert the count
        assertEquals(50, count);

        executionPlanRuntime1.shutdown();
        executionPlanRuntime2.shutdown();
    } catch (ZkTimeoutException ex) {
        log.warn("No zookeeper may not be available.", ex);
    }
}

From source file:eu.esdihumboldt.hale.io.haleconnect.internal.HaleConnectServiceImpl.java

private int computeTotalWork(File file) {
    int totalWork;
    // Support upload progress only for files where its size in
    // KiB fits into an int. Round up to next KiB.
    long sizeKiB = (file.length() >> 10) + 1;
    if (sizeKiB > Integer.MAX_VALUE) {
        totalWork = ProgressIndicator.UNKNOWN;
    } else {/*w w  w  .j  a v  a  2 s.co m*/
        totalWork = Math.toIntExact(sizeKiB);
    }
    return totalWork;
}

From source file:io.pravega.controller.server.SegmentHelper.java

private Pair<Byte, Integer> extractFromPolicy(ScalingPolicy policy) {
    final int desiredRate;
    final byte rateType;
    if (policy.getType().equals(ScalingPolicy.Type.FIXED_NUM_SEGMENTS)) {
        desiredRate = 0;/*ww  w.  j a  v a 2 s. c  o m*/
        rateType = WireCommands.CreateSegment.NO_SCALE;
    } else {
        desiredRate = Math.toIntExact(policy.getTargetRate());
        if (policy.getType().equals(ScalingPolicy.Type.BY_RATE_IN_KBYTES_PER_SEC)) {
            rateType = WireCommands.CreateSegment.IN_KBYTES_PER_SEC;
        } else {
            rateType = WireCommands.CreateSegment.IN_EVENTS_PER_SEC;
        }
    }

    return new ImmutablePair<>(rateType, desiredRate);
}

From source file:org.eclipse.hawkbit.repository.jpa.JpaRolloutManagement.java

private Long assignTargetsToGroupInNewTransaction(final JpaRollout rollout, final RolloutGroup group,
        final String targetFilter, final long limit) {

    return runInNewTransaction("assignTargetsToRolloutGroup", status -> {
        final PageRequest pageRequest = new PageRequest(0, Math.toIntExact(limit));
        final List<Long> readyGroups = RolloutHelper.getGroupsByStatusIncludingGroup(rollout.getRolloutGroups(),
                RolloutGroupStatus.READY, group);
        final Page<Target> targets = targetManagement
                .findAllTargetsByTargetFilterQueryAndNotInRolloutGroups(pageRequest, readyGroups, targetFilter);

        createAssignmentOfTargetsToGroup(targets, group);

        return Long.valueOf(targets.getNumberOfElements());
    });/*from   w w  w  .  j a v a 2s . c om*/
}

From source file:org.eclipse.hawkbit.mgmt.rest.resource.MgmtSoftwareModuleResourceTest.java

@Test
@Description("Verifies that artifacts can only be added as long as the artifact storage quota is not exceeded.")
public void uploadArtifactsUntilStorageQuotaExceeded() throws Exception {

    final long storageLimit = quotaManagement.getMaxArtifactStorage();

    // choose an artifact size which does not violate the max file size
    final int artifactSize = Math.toIntExact(quotaManagement.getMaxArtifactSize() / 10);
    final int numArtifacts = Math.toIntExact(storageLimit / artifactSize);

    for (int i = 0; i < numArtifacts; ++i) {
        // create test file
        final byte random[] = randomBytes(artifactSize);
        final String md5sum = HashGeneratorUtils.generateMD5(random);
        final String sha1sum = HashGeneratorUtils.generateSHA1(random);
        final MockMultipartFile file = new MockMultipartFile("file", "origFilename" + i, null, random);

        // upload
        final SoftwareModule sm = testdataFactory.createSoftwareModuleOs("sm" + i);
        mvc.perform(fileUpload("/rest/v1/softwaremodules/{smId}/artifacts", sm.getId()).file(file)
                .accept(MediaType.APPLICATION_JSON)).andDo(MockMvcResultPrinter.print())
                .andExpect(status().isCreated())
                .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8_VALUE))
                .andExpect(jsonPath("$.hashes.md5", equalTo(md5sum)))
                .andExpect(jsonPath("$.hashes.sha1", equalTo(sha1sum)))
                .andExpect(jsonPath("$.size", equalTo(random.length)))
                .andExpect(jsonPath("$.providedFilename", equalTo("origFilename" + i))).andReturn();
    }// w w  w .j av a2s . co m

    // upload one more file to cause the quota to be exceeded
    final byte random[] = randomBytes(artifactSize);
    HashGeneratorUtils.generateMD5(random);
    HashGeneratorUtils.generateSHA1(random);
    final MockMultipartFile file = new MockMultipartFile("file", "origFilename_final", null, random);

    // upload
    final SoftwareModule sm = testdataFactory.createSoftwareModuleOs("sm" + numArtifacts);
    mvc.perform(fileUpload("/rest/v1/softwaremodules/{smId}/artifacts", sm.getId()).file(file)
            .accept(MediaType.APPLICATION_JSON)).andDo(MockMvcResultPrinter.print())
            .andExpect(status().isForbidden())
            .andExpect(jsonPath("$.exceptionClass", equalTo(QuotaExceededException.class.getName())))
            .andExpect(jsonPath("$.errorCode", equalTo(SpServerError.SP_QUOTA_EXCEEDED.getKey())));

}

From source file:eu.esdihumboldt.hale.io.haleconnect.internal.HaleConnectServiceImpl.java

private ApiCallback<Feedback> createUploadFileCallback(final SettableFuture<Boolean> future,
        final ProgressIndicator progress, final File file, final int totalWork) {
    return new ApiCallback<Feedback>() {

        AtomicLong chunkWritten = new AtomicLong(0);
        AtomicLong bytesReported = new AtomicLong(0);

        @Override/* w w w. j  a v a2  s.co  m*/
        public void onDownloadProgress(long bytesRead, long contentLength, boolean done) {
            // not required
        }

        @Override
        public void onFailure(com.haleconnect.api.projectstore.v1.ApiException e, int statusCode,
                Map<String, List<String>> responseHeaders) {
            progress.end();
            future.setException(new HaleConnectException(e.getMessage(), e, statusCode, responseHeaders));
        }

        @Override
        public void onSuccess(Feedback result, int statusCode, Map<String, List<String>> responseHeaders) {
            if (result.getError()) {
                log.error(MessageFormat.format("Error uploading project file \"{0}\": {1}",
                        file.getAbsolutePath(), result.getMessage()));
                future.set(false);
            } else {
                future.set(true);
            }
            progress.end();
        }

        @Override
        public void onUploadProgress(long bytesWritten, long contentLength, boolean done) {
            // bytesWritten contains the accumulated amount of bytes written
            if (totalWork != ProgressIndicator.UNKNOWN) {
                // Wait until at least 1 KiB was written
                long chunk = chunkWritten.get();
                chunk += bytesWritten - bytesReported.get();
                if (chunk >= 1024) {
                    long workToReport = chunk >> 10;
                    // cannot overflow, total size in KiB
                    // is guaranteed to be < Integer.MAX_VALUE
                    progress.advance(Math.toIntExact(workToReport));
                    chunk -= workToReport << 10;
                    // chunkWritten now always < 1024
                }
                chunkWritten.set(chunk);
                bytesReported.set(bytesWritten);
            }
        }
    };
}