Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:com.netflix.conductor.core.execution.TestWorkflowExecutor.java

@Test
public void test() throws Exception {

    AtomicBoolean httpTaskExecuted = new AtomicBoolean(false);
    AtomicBoolean http2TaskExecuted = new AtomicBoolean(false);

    new Wait();//w  w w. j  a  v a 2s .  com
    new WorkflowSystemTask("HTTP") {
        @Override
        public boolean isAsync() {
            return true;
        }

        @Override
        public void start(Workflow workflow, Task task, WorkflowExecutor executor) throws Exception {
            httpTaskExecuted.set(true);
            task.setStatus(Status.COMPLETED);
            super.start(workflow, task, executor);
        }

    };

    new WorkflowSystemTask("HTTP2") {

        @Override
        public void start(Workflow workflow, Task task, WorkflowExecutor executor) throws Exception {
            http2TaskExecuted.set(true);
            task.setStatus(Status.COMPLETED);
            super.start(workflow, task, executor);
        }

    };

    Workflow workflow = new Workflow();
    workflow.setWorkflowId("1");

    TestConfiguration config = new TestConfiguration();
    MetadataDAO metadata = mock(MetadataDAO.class);
    ExecutionDAO edao = mock(ExecutionDAO.class);
    QueueDAO queue = mock(QueueDAO.class);
    ObjectMapper om = new ObjectMapper();

    WorkflowExecutor executor = new WorkflowExecutor(metadata, edao, queue, om, config);
    List<Task> tasks = new LinkedList<>();

    WorkflowTask taskToSchedule = new WorkflowTask();
    taskToSchedule.setWorkflowTaskType(Type.USER_DEFINED);
    taskToSchedule.setType("HTTP");

    WorkflowTask taskToSchedule2 = new WorkflowTask();
    taskToSchedule2.setWorkflowTaskType(Type.USER_DEFINED);
    taskToSchedule2.setType("HTTP2");

    WorkflowTask wait = new WorkflowTask();
    wait.setWorkflowTaskType(Type.WAIT);
    wait.setType("WAIT");
    wait.setTaskReferenceName("wait");

    Task task1 = SystemTask.userDefined(workflow, IDGenerator.generate(), taskToSchedule, new HashMap<>(), null,
            0);
    Task task2 = SystemTask.waitTask(workflow, IDGenerator.generate(), taskToSchedule, new HashMap<>());
    Task task3 = SystemTask.userDefined(workflow, IDGenerator.generate(), taskToSchedule2, new HashMap<>(),
            null, 0);

    tasks.add(task1);
    tasks.add(task2);
    tasks.add(task3);

    when(edao.createTasks(tasks)).thenReturn(tasks);
    AtomicInteger startedTaskCount = new AtomicInteger(0);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            startedTaskCount.incrementAndGet();
            return null;
        }
    }).when(edao).updateTask(any());

    AtomicInteger queuedTaskCount = new AtomicInteger(0);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            String queueName = invocation.getArgumentAt(0, String.class);
            System.out.println(queueName);
            queuedTaskCount.incrementAndGet();
            return null;
        }
    }).when(queue).push(any(), any(), anyInt());

    boolean stateChanged = executor.scheduleTask(workflow, tasks);
    assertEquals(2, startedTaskCount.get());
    assertEquals(1, queuedTaskCount.get());
    assertTrue(stateChanged);
    assertFalse(httpTaskExecuted.get());
    assertTrue(http2TaskExecuted.get());
}

From source file:gobblin.kafka.writer.KafkaDataWriter.java

public KafkaDataWriter(KafkaProducer producer, Config config) {
    super(ConfigUtils.configToState(config));
    recordsProduced = getMetricContext().meter(KafkaWriterMetricNames.RECORDS_PRODUCED_METER);
    recordsWritten = getMetricContext().meter(KafkaWriterMetricNames.RECORDS_SUCCESS_METER);
    recordsFailed = getMetricContext().meter(KafkaWriterMetricNames.RECORDS_FAILED_METER);
    bytesWritten = new AtomicInteger(-1);
    this.topic = config.getString(KafkaWriterConfigurationKeys.KAFKA_TOPIC);
    this.commitTimeoutInNanos = ConfigUtils.getLong(config,
            KafkaWriterConfigurationKeys.COMMIT_TIMEOUT_MILLIS_CONFIG,
            KafkaWriterConfigurationKeys.COMMIT_TIMEOUT_MILLIS_DEFAULT) * MILLIS_TO_NANOS;
    this.commitStepWaitTimeMillis = ConfigUtils.getLong(config,
            KafkaWriterConfigurationKeys.COMMIT_STEP_WAIT_TIME_CONFIG,
            KafkaWriterConfigurationKeys.COMMIT_STEP_WAIT_TIME_DEFAULT);
    this.failureAllowance = ConfigUtils.getDouble(config,
            KafkaWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_CONFIG,
            KafkaWriterConfigurationKeys.FAILURE_ALLOWANCE_PCT_DEFAULT) / 100.0;
    this.producer = producer;
    this.producerCallback = new Callback() {
        @Override/*from  ww  w . j av a 2s  .  c  o m*/
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            if (null == exception) {
                recordsWritten.mark();
            } else {
                log.debug("record failed to write", exception);
                recordsFailed.mark();
            }
        }
    };
}

From source file:com.indeed.lsmtree.core.TestImmutableBTreeIndex.java

public void testRandom() throws Exception {
    final int[] ints = createTree();
    final ImmutableBTreeIndex.Reader<Integer, Long> reader = new ImmutableBTreeIndex.Reader(tmpDir,
            new IntSerializer(), new LongSerializer(), false);
    final int max = ints[ints.length - 1];
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override/*  www.  j  ava 2s .co m*/
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < treeSize; i++) {
                        int rand = r.nextInt(max + 1);
                        int insertionindex = Arrays.binarySearch(ints, rand);
                        final Iterator<Generation.Entry<Integer, Long>> iterator = reader.iterator(rand, true);
                        try {
                            assertTrue(iterator.hasNext());
                        } catch (Throwable t) {
                            System.err.println("rand: " + rand);
                            throw Throwables.propagate(t);
                        }
                        Generation.Entry<Integer, Long> entry = iterator.next();
                        assertTrue("entry: " + entry + " rand: " + rand, entry.getKey() >= rand);
                        assertTrue(entry.getKey().longValue() == entry.getValue());
                        if (insertionindex >= 0) {
                            assertTrue(rand == ints[insertionindex]);
                            assertTrue(entry.getKey() == rand);
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result.getValue() == rand);
                        } else {
                            if (insertionindex != -1)
                                assertTrue(ints[(~insertionindex) - 1] < rand);
                            assertTrue(
                                    "insertionindex: " + insertionindex + " entry: " + entry
                                            + " ints[!insertionindex]" + ints[~insertionindex],
                                    ints[~insertionindex] == entry.getKey());
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result == null);
                        }
                    }
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    reader.close();
}

From source file:org.dasein.cloud.azure.tests.network.AzureIpAddressSupportTest.java

@Test
public void forwardShouldPostCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger putCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock(invocations = 2)/*from   www .  ja v  a 2  s .  c  o  m*/
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if (request.getMethod().equals("GET")) {
                DaseinObjectToXmlEntity<PersistentVMRoleModel> daseinEntity = new DaseinObjectToXmlEntity<PersistentVMRoleModel>(
                        createPersistentVMRoleModelWithoutEndpoint());
                assertGet(request, EXPECTED_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if (request.getMethod().equals("PUT")) {
                putCount.incrementAndGet();
                PersistentVMRoleModel persistentVMRoleModel = createPersistentVMRoleModelWithEndpoint();
                assertPut(request, EXPECTED_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        persistentVMRoleModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_ACCEPTED), null,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };

    String result = ipAddressSupport.forward("127.0.0.1", PUBLIC_PORT, PROTOCOL, PRIVATE_PORT, VM_ID);
    assertEquals("IpAddressSupport.forward() doesn't return correct result",
            new AzureRuleIdParts(VM_ID, Protocol.TCP.toString(), String.valueOf(PRIVATE_PORT)).toProviderId(),
            result);
    assertEquals("PUT count doesn't match", 1, putCount.get());
}

From source file:com.asprise.imaging.core.Imaging.java

/** Use this executor service to make sure that all scanning related code is executed from the same thread. */
public static ExecutorService getDefaultExecutorServiceForScanning() {
    if (executorServiceForScanning == null) {
        synchronized (Imaging.class) {
            if (executorServiceForScanning == null) {
                executorServiceForScanning = Executors.newSingleThreadExecutor(new ThreadFactory() { // custom factory for user-friendly thread name
                    final AtomicInteger threadNumber = new AtomicInteger(1);
                    ThreadFactory defaultThreadFactory = Executors.defaultThreadFactory();

                    @Override//  ww w . j  a  va 2 s .  c  o m
                    public Thread newThread(Runnable r) {
                        Thread thread = defaultThreadFactory.newThread(r);
                        thread.setName("scan" + (threadNumber.get() == 1 ? "" : "-" + threadNumber));
                        return thread;
                    }
                });
            }
        }
    }
    return executorServiceForScanning;
}

From source file:com.palantir.docker.compose.DockerComposeRuleShould.java

@Test
public void pass_wait_for_service_when_check_is_true() throws IOException, InterruptedException {
    AtomicInteger timesCheckCalled = new AtomicInteger(0);
    withComposeExecutableReturningContainerFor("db");
    HealthCheck<Container> checkCalledOnce = (container) -> SuccessOrFailure
            .fromBoolean(timesCheckCalled.incrementAndGet() == 1, "not called once yet");
    DockerComposeRule.builder().from(rule).waitingForService("db", checkCalledOnce).build().before();
    assertThat(timesCheckCalled.get(), is(1));
}

From source file:com.xylocore.cassandra.query.SharedResultSetProcessor.java

/**
 * FILLIN//from   ww w .  j a  va2s .co m
 * 
 * @param       aExecutionContext
 * @param       aExecutor
 * @param       aCompletionNotifier
 */
SharedResultSetProcessor(PagedQueryExecutionContext<T> aExecutionContext, Executor aExecutor,
        ResultSetCompletionNotifier<T> aCompletionNotifier) {
    Validate.notNull(aExecutionContext);
    Validate.notNull(aCompletionNotifier);

    executionContext = aExecutionContext;
    executor = aExecutor;
    completionNotifier = aCompletionNotifier;
    availableTasks = new ConcurrentLinkedQueue<>();
    availableTaskCount = new AtomicInteger(aExecutionContext.getConcurrencyLevel());
    state = new AtomicReference<>(State.Inactive);
    workTasksLocked = new AtomicBoolean(false);
    fetchFuture = null;
    completed = false;
    resultSet = null;
    currentWorkTask = null;

    for (int i = 0, ci = aExecutionContext.getConcurrencyLevel(); i < ci; i++) {
        WorkTask myWorkTask = new WorkTask(i);

        availableTasks.offer(myWorkTask);
    }
}

From source file:com.github.naoghuman.testdata.abclist.service.LinkMappingService.java

@Override
protected Task<Void> createTask() {
    return new Task<Void>() {
        {//w  ww.  j av a2  s .  c  o m
            updateProgress(0, saveMaxEntities);
        }

        @Override
        protected Void call() throws Exception {
            LoggerFacade.getDefault().deactivate(Boolean.TRUE);

            final StopWatch stopWatch = new StopWatch();
            stopWatch.start();

            /*
             1) over all links
             2) if random > 0.005d then do
             3) otherwise create a link without parent
             4) get 1-10 terms, create LinkMapping foreach of them
             - means a link is mapped to 1-10 terms
             5) get 0-10 topics, create LinkMapping foreach of them
             - means a link is mapped to 0-10 topics
            */

            final ObservableList<Link> links = SqlProvider.getDefault().findAllLinks();
            final ObservableList<Term> terms = SqlProvider.getDefault().findAllTerms();
            final int sizeTerms = terms.size();
            final ObservableList<Topic> topics = SqlProvider.getDefault().findAllTopics();
            final int sizeTopics = topics.size();
            final AtomicInteger index = new AtomicInteger(0);

            final CrudService crudService = DatabaseFacade.getDefault().getCrudService(entityName);
            final AtomicLong id = new AtomicLong(
                    -1_000_000_000L + DatabaseFacade.getDefault().getCrudService().count(entityName));
            links.stream() // 1
                    .forEach(link -> {
                        // 2) Should the [Link] have a parent
                        final double random = TestdataGenerator.RANDOM.nextDouble();
                        if (random > 0.005d) {
                            // 4) Create [Link]s with parent [Term]
                            final int maxTerms = TestdataGenerator.RANDOM.nextInt(10) + 1;
                            for (int i = 0; i < maxTerms; i++) {
                                final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                                lm.setId(id.getAndIncrement());

                                final Term term = terms.get(TestdataGenerator.RANDOM.nextInt(sizeTerms));
                                lm.setParentId(term.getId());
                                lm.setParentType(LinkMappingType.TERM);

                                lm.setChildId(link.getId());
                                lm.setChildType(LinkMappingType.LINK);

                                crudService.create(lm);
                            }

                            // 5) Create [Link]s with parent [Topic]
                            final int maxTopics = TestdataGenerator.RANDOM.nextInt(11);
                            for (int i = 0; i < maxTopics; i++) {
                                final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                                lm.setId(id.getAndIncrement());

                                final Topic topic = topics.get(TestdataGenerator.RANDOM.nextInt(sizeTopics));
                                lm.setParentId(topic.getId());
                                lm.setParentType(LinkMappingType.TOPIC);

                                lm.setChildId(link.getId());
                                lm.setChildType(LinkMappingType.LINK);

                                crudService.create(lm);
                            }
                        } else {
                            // 3) Some [Link]s havn't a parent
                            final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                            lm.setId(id.getAndIncrement());
                            lm.setParentId(IDefaultConfiguration.DEFAULT_ID);
                            lm.setParentType(LinkMappingType.NOT_DEFINED);
                            lm.setChildId(link.getId());
                            lm.setChildType(LinkMappingType.LINK);

                            crudService.create(lm);
                        }

                        updateProgress(index.getAndIncrement(), saveMaxEntities);
                    });

            LoggerFacade.getDefault().deactivate(Boolean.FALSE);
            stopWatch.split();
            LoggerFacade.getDefault().debug(this.getClass(),
                    "  + " + stopWatch.toSplitString() + " for " + saveMaxEntities + " LinkMappings."); // NOI18N
            stopWatch.stop();

            return null;
        }
    };
}

From source file:no.difi.sdp.client.asice.signature.CreateSignatureTest.java

@Test
public void multithreaded_signing() throws Exception {
    List<Thread> threads = new ArrayList<Thread>();
    final AtomicInteger fails = new AtomicInteger(0);
    for (int i = 0; i < 50; i++) {
        Thread t = new Thread() {
            @Override//from w  w w .  j a  v  a  2 s  .c o  m
            public void run() {
                for (int j = 0; j < 20; j++) {
                    Signature signature = sut.createSignature(noekkelpar, files);
                    if (!verify_signature(signature)) {
                        fails.incrementAndGet();
                    }
                    if (fails.get() > 0) {
                        break;
                    }
                }
            }
        };
        threads.add(t);
        t.start();
    }
    for (Thread t : threads) {
        t.join();
    }
    if (fails.get() > 0) {
        fail("Signature validation failed");
    }
}

From source file:com.ict.dtube.namesrv.NamesrvStartup.java

public static NamesrvController main0(String[] args) {
    System.setProperty(RemotingCommand.RemotingVersionKey, Integer.toString(MQVersion.CurrentVersion));

    // Socket???/*from  www .j a v a2  s  . co m*/
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketSndbufSize)) {
        NettySystemConfig.SocketSndbufSize = 2048;
    }

    // Socket?
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketRcvbufSize)) {
        NettySystemConfig.SocketRcvbufSize = 1024;
    }

    try {
        // ?
        Options options = ServerUtil.buildCommandlineOptions(new Options());
        commandLine = ServerUtil.parseCmdLine("mqnamesrv", args, buildCommandlineOptions(options),
                new PosixParser());
        if (null == commandLine) {
            System.exit(-1);
            return null;
        }

        // ??
        final NamesrvConfig namesrvConfig = new NamesrvConfig();
        final NettyServerConfig nettyServerConfig = new NettyServerConfig();
        nettyServerConfig.setListenPort(9876);
        if (commandLine.hasOption('c')) {
            String file = commandLine.getOptionValue('c');
            if (file != null) {
                InputStream in = new BufferedInputStream(new FileInputStream(file));
                properties = new Properties();
                properties.load(in);
                MixAll.properties2Object(properties, namesrvConfig);
                MixAll.properties2Object(properties, nettyServerConfig);
                System.out.println("load config properties file OK, " + file);
                in.close();
            }
        }

        // ??
        if (commandLine.hasOption('p')) {
            MixAll.printObjectProperties(null, namesrvConfig);
            MixAll.printObjectProperties(null, nettyServerConfig);
            System.exit(0);
        }

        MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), namesrvConfig);

        if (null == namesrvConfig.getDtubeHome()) {
            System.out.println("Please set the " + MixAll.DTUBE_HOME_ENV
                    + " variable in your environment to match the location of the Dtube installation");
            System.exit(-2);
        }

        // ?Logback
        LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
        JoranConfigurator configurator = new JoranConfigurator();
        configurator.setContext(lc);
        lc.reset();
        configurator.doConfigure(namesrvConfig.getDtubeHome() + "/conf/logback_namesrv.xml");
        final Logger log = LoggerFactory.getLogger(LoggerName.NamesrvLoggerName);

        // ????
        MixAll.printObjectProperties(log, namesrvConfig);
        MixAll.printObjectProperties(log, nettyServerConfig);

        // ??
        final NamesrvController controller = new NamesrvController(namesrvConfig, nettyServerConfig);
        boolean initResult = controller.initialize();
        if (!initResult) {
            controller.shutdown();
            System.exit(-3);
        }

        Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
            private volatile boolean hasShutdown = false;
            private AtomicInteger shutdownTimes = new AtomicInteger(0);

            @Override
            public void run() {
                synchronized (this) {
                    log.info("shutdown hook was invoked, " + this.shutdownTimes.incrementAndGet());
                    if (!this.hasShutdown) {
                        this.hasShutdown = true;
                        long begineTime = System.currentTimeMillis();
                        controller.shutdown();
                        long consumingTimeTotal = System.currentTimeMillis() - begineTime;
                        log.info("shutdown hook over, consuming time total(ms): " + consumingTimeTotal);
                    }
                }
            }
        }, "ShutdownHook"));

        // ??
        controller.start();

        String tip = "The Name Server boot success.";
        log.info(tip);
        System.out.println(tip);

        return controller;
    } catch (Throwable e) {
        e.printStackTrace();
        System.exit(-1);
    }

    return null;
}