Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:Main.java

public static void main(String[] argv) throws Exception {
    AtomicInteger atomicInteger = new AtomicInteger(20);

    System.out.println(atomicInteger.get());
}

From source file:examples.CheckSessionState.java

public static void main(String[] args) {
    if (args.length == 0) {
        System.err.println("At least one path to serialized session required.");
        System.exit(1);// w w  w.j a v  a2s  . c o m
    }
    for (String sessionStatePath : args) {
        try {
            File sessionStateFile = new File(sessionStatePath);
            SessionState sessionState = JACKSON.readValue(sessionStateFile, SessionState.class);
            final AtomicInteger idx = new AtomicInteger(0);
            sessionState.foreachPartition(new Action1<PartitionState>() {
                @Override
                public void call(PartitionState state) {
                    int partition = idx.getAndIncrement();
                    if (lessThan(state.getEndSeqno(), state.getStartSeqno())) {
                        System.out.printf("stream request for partition %d will fail because "
                                + "start sequence number (%d) is larger than " + "end sequence number (%d)\n",
                                partition, state.getStartSeqno(), state.getEndSeqno());
                    }
                    if (lessThan(state.getStartSeqno(), state.getSnapshotStartSeqno())) {
                        System.out.printf(
                                "stream request for partition %d will fail because "
                                        + "snapshot start sequence number (%d) must not be larger than "
                                        + "start sequence number (%d)\n",
                                partition, state.getSnapshotStartSeqno(), state.getStartSeqno());
                    }
                    if (lessThan(state.getSnapshotEndSeqno(), state.getStartSeqno())) {
                        System.out.printf(
                                "stream request for partition %d will fail because "
                                        + "start sequence number (%d) must not be larger than "
                                        + "snapshot end sequence number (%d)\n",
                                partition, state.getStartSeqno(), state.getSnapshotEndSeqno());
                    }
                }
            });
        } catch (IOException e) {
            System.out.println("Failed to decode " + sessionStatePath + ": " + e);
        }
    }
}

From source file:csv.sorting.PrepareWeatherData.java

public static void main(String[] args) throws Exception {

    // Path to read the CSV data from:
    final Path csvStationDataFilePath = FileSystems.getDefault()
            .getPath("C:\\Users\\philipp\\Downloads\\csv\\201503station.txt");
    final Path csvLocalWeatherDataUnsortedFilePath = FileSystems.getDefault()
            .getPath("C:\\Users\\philipp\\Downloads\\csv\\201503hourly.txt");
    final Path csvLocalWeatherDataSortedFilePath = FileSystems.getDefault()
            .getPath("C:\\Users\\philipp\\Downloads\\csv\\201503hourly_sorted.txt");

    // A map between the WBAN and Station for faster Lookups:
    final Map<String, Station> stationMap = getStationMap(csvStationDataFilePath);

    // Holds the List of Sorted DateTimes (including ZoneOffset):
    List<Integer> indices = new ArrayList<>();

    // Comparator for sorting the File:
    Comparator<OffsetDateTime> byMeasurementTime = (e1, e2) -> e1.compareTo(e2);

    // Get the sorted indices from the stream of LocalWeatherData Elements:
    try (Stream<CsvMappingResult<csv.model.LocalWeatherData>> stream = getLocalWeatherData(
            csvLocalWeatherDataUnsortedFilePath)) {

        // Holds the current line index, when processing the input Stream:
        AtomicInteger currentIndex = new AtomicInteger(1);

        // We want to get a list of indices, which sorts the CSV file by measurement time:
        indices = stream//from   w w w . j av a 2  s .co m
                // Skip the CSV Header:
                .skip(1)
                // Start by enumerating ALL mapping results:
                .map(x -> new ImmutablePair<>(currentIndex.getAndAdd(1), x))
                // Then only take those lines, that are actually valid:
                .filter(x -> x.getRight().isValid())
                // Now take the parsed entity from the CsvMappingResult:
                .map(x -> new ImmutablePair<>(x.getLeft(), x.getRight().getResult()))
                // Take only those measurements, that are also available in the list of stations:
                .filter(x -> stationMap.containsKey(x.getRight().getWban()))
                // Get the OffsetDateTime from the LocalWeatherData, which includes the ZoneOffset of the Station:
                .map(x -> {
                    // Get the matching station:
                    csv.model.Station station = stationMap.get(x.getRight().getWban());
                    // Calculate the OffsetDateTime from the given measurement:
                    OffsetDateTime measurementTime = OffsetDateTime.of(x.getRight().getDate(),
                            x.getRight().getTime(), ZoneOffset.ofHours(0));
                    // Build the Immutable pair with the Index again:
                    return new ImmutablePair<>(x.getLeft(), measurementTime);
                })
                // Now sort the Measurements by their Timestamp:
                .sorted((x, y) -> byMeasurementTime.compare(x.getRight(), y.getRight()))
                // Take only the Index:
                .map(x -> x.getLeft())
                // And turn it into a List:
                .collect(Collectors.toList());
    }

    // Now sorts the File by Line Number:
    writeSortedFileByIndices(csvLocalWeatherDataUnsortedFilePath, indices, csvLocalWeatherDataSortedFilePath);
}

From source file:com.weibo.motan.demo.client.DemoRpcClient.java

public static void main(String[] args) throws Exception {
    final DescriptiveStatistics stats = new SynchronizedDescriptiveStatistics();

    int threads = Integer.parseInt(args[0]);

    DubboBenchmark.BenchmarkMessage msg = prepareArgs();
    final byte[] msgBytes = msg.toByteArray();

    int n = 1000000;
    final CountDownLatch latch = new CountDownLatch(n);

    ExecutorService es = Executors.newFixedThreadPool(threads);

    final AtomicInteger trans = new AtomicInteger(0);
    final AtomicInteger transOK = new AtomicInteger(0);

    ApplicationContext ctx = new ClassPathXmlApplicationContext(
            new String[] { "classpath:motan_demo_client.xml" });

    MotanDemoService service = (MotanDemoService) ctx.getBean("motanDemoReferer");

    long start = System.currentTimeMillis();
    for (int i = 0; i < n; i++) {
        es.submit(() -> {/*from w w w.  j a va 2  s  .c  o m*/
            try {

                long t = System.currentTimeMillis();
                DubboBenchmark.BenchmarkMessage m = testSay(service, msgBytes);
                t = System.currentTimeMillis() - t;
                stats.addValue(t);

                trans.incrementAndGet();

                if (m != null && m.getField1().equals("OK")) {
                    transOK.incrementAndGet();
                }

            } finally {
                latch.countDown();
            }
        });
    }

    latch.await();

    start = System.currentTimeMillis() - start;

    System.out.printf("sent     requests    : %d\n", n);
    System.out.printf("received requests    : %d\n", trans.get());
    System.out.printf("received requests_OK : %d\n", transOK.get());
    System.out.printf("throughput  (TPS)    : %d\n", n * 1000 / start);

    System.out.printf("mean: %f\n", stats.getMean());
    System.out.printf("median: %f\n", stats.getPercentile(50));
    System.out.printf("max: %f\n", stats.getMax());
    System.out.printf("min: %f\n", stats.getMin());

    System.out.printf("99P: %f\n", stats.getPercentile(90));

}

From source file:PinotThroughput.java

@SuppressWarnings("InfiniteLoopStatement")
public static void main(String[] args) throws Exception {
    final int numQueries = QUERIES.length;
    final Random random = new Random(RANDOM_SEED);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(NUM_CLIENTS);

    for (int i = 0; i < NUM_CLIENTS; i++) {
        executorService.submit(new Runnable() {
            @Override//from w  ww.j  a  v a 2s.  c  o m
            public void run() {
                try (CloseableHttpClient client = HttpClients.createDefault()) {
                    HttpPost post = new HttpPost("http://localhost:8099/query");
                    CloseableHttpResponse res;
                    while (true) {
                        String query = QUERIES[random.nextInt(numQueries)];
                        post.setEntity(new StringEntity("{\"pql\":\"" + query + "\"}"));
                        long start = System.currentTimeMillis();
                        res = client.execute(post);
                        res.close();
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(System.currentTimeMillis() - start);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }

    long startTime = System.currentTimeMillis();
    while (true) {
        Thread.sleep(REPORT_INTERVAL_MILLIS);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        System.out.println("Time Passed: " + timePassedSeconds + "s, Query Executed: " + count + ", QPS: "
                + count / timePassedSeconds + ", Avg Response Time: " + avgResponseTime + "ms");
    }
}

From source file:com.linkedin.pinot.perf.FilterOperatorBenchmark.java

public static void main(String[] args) throws Exception {
    String rootDir = args[0];/*from   w  ww .  j  a v  a 2  s.c o  m*/
    File[] segmentDirs = new File(rootDir).listFiles();
    String query = args[1];
    AtomicInteger totalDocsMatched = new AtomicInteger(0);
    Pql2Compiler pql2Compiler = new Pql2Compiler();
    BrokerRequest brokerRequest = pql2Compiler.compileToBrokerRequest(query);
    List<Callable<Void>> segmentProcessors = new ArrayList<>();
    long[] timesSpent = new long[segmentDirs.length];
    for (int i = 0; i < segmentDirs.length; i++) {
        File indexSegmentDir = segmentDirs[i];
        System.out.println("Loading " + indexSegmentDir.getName());
        Configuration tableDataManagerConfig = new PropertiesConfiguration();
        List<String> invertedColumns = new ArrayList<>();
        FilenameFilter filter = new FilenameFilter() {

            @Override
            public boolean accept(File dir, String name) {
                return name.endsWith(".bitmap.inv");
            }
        };
        String[] indexFiles = indexSegmentDir.list(filter);
        for (String indexFileName : indexFiles) {
            invertedColumns.add(indexFileName.replace(".bitmap.inv", ""));
        }
        tableDataManagerConfig.setProperty(IndexLoadingConfigMetadata.KEY_OF_LOADING_INVERTED_INDEX,
                invertedColumns);
        IndexLoadingConfigMetadata indexLoadingConfigMetadata = new IndexLoadingConfigMetadata(
                tableDataManagerConfig);
        IndexSegmentImpl indexSegmentImpl = (IndexSegmentImpl) Loaders.IndexSegment.load(indexSegmentDir,
                ReadMode.heap, indexLoadingConfigMetadata);
        segmentProcessors
                .add(new SegmentProcessor(i, indexSegmentImpl, brokerRequest, totalDocsMatched, timesSpent));
    }
    ExecutorService executorService = Executors.newCachedThreadPool();
    for (int run = 0; run < 5; run++) {
        System.out.println("START RUN:" + run);
        totalDocsMatched.set(0);
        long start = System.currentTimeMillis();
        List<Future<Void>> futures = executorService.invokeAll(segmentProcessors);
        for (int i = 0; i < futures.size(); i++) {
            futures.get(i).get();
        }
        long end = System.currentTimeMillis();
        System.out.println("Total docs matched:" + totalDocsMatched + " took:" + (end - start));
        System.out.println("Times spent:" + Arrays.toString(timesSpent));
        System.out.println("END RUN:" + run);
    }
    System.exit(0);
}

From source file:DruidThroughput.java

@SuppressWarnings("InfiniteLoopStatement")
public static void main(String[] args) throws Exception {
    final int numQueries = QUERIES.length;
    final Random random = new Random(RANDOM_SEED);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(NUM_CLIENTS);

    for (int i = 0; i < NUM_CLIENTS; i++) {
        executorService.submit(new Runnable() {
            @Override/* w w  w  .jav a2 s.c  o  m*/
            public void run() {
                try (CloseableHttpClient client = HttpClients.createDefault()) {
                    HttpPost post = new HttpPost("http://localhost:8082/druid/v2/?pretty");
                    post.addHeader("content-type", "application/json");
                    CloseableHttpResponse res;
                    while (true) {
                        try (BufferedReader reader = new BufferedReader(new FileReader(
                                QUERY_FILE_DIR + File.separator + random.nextInt(numQueries) + ".json"))) {
                            int length = reader.read(BUFFER);
                            post.setEntity(new StringEntity(new String(BUFFER, 0, length)));
                        }
                        long start = System.currentTimeMillis();
                        res = client.execute(post);
                        res.close();
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(System.currentTimeMillis() - start);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }

    long startTime = System.currentTimeMillis();
    while (true) {
        Thread.sleep(REPORT_INTERVAL_MILLIS);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        System.out.println("Time Passed: " + timePassedSeconds + "s, Query Executed: " + count + ", QPS: "
                + count / timePassedSeconds + ", Avg Response Time: " + avgResponseTime + "ms");
    }
}

From source file:com.linkedin.pinotdruidbenchmark.PinotThroughput.java

@SuppressWarnings("InfiniteLoopStatement")
public static void main(String[] args) throws Exception {
    if (args.length != 3 && args.length != 4) {
        System.err.println(//from w  w  w  . jav a  2s . c  o  m
                "3 or 4 arguments required: QUERY_DIR, RESOURCE_URL, NUM_CLIENTS, TEST_TIME (seconds).");
        return;
    }

    File queryDir = new File(args[0]);
    String resourceUrl = args[1];
    final int numClients = Integer.parseInt(args[2]);
    final long endTime;
    if (args.length == 3) {
        endTime = Long.MAX_VALUE;
    } else {
        endTime = System.currentTimeMillis() + Integer.parseInt(args[3]) * MILLIS_PER_SECOND;
    }

    File[] queryFiles = queryDir.listFiles();
    assert queryFiles != null;
    Arrays.sort(queryFiles);

    final int numQueries = queryFiles.length;
    final HttpPost[] httpPosts = new HttpPost[numQueries];
    for (int i = 0; i < numQueries; i++) {
        HttpPost httpPost = new HttpPost(resourceUrl);
        String query = new BufferedReader(new FileReader(queryFiles[i])).readLine();
        httpPost.setEntity(new StringEntity("{\"pql\":\"" + query + "\"}"));
        httpPosts[i] = httpPost;
    }

    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numClients);

    for (int i = 0; i < numClients; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                try (CloseableHttpClient httpClient = HttpClients.createDefault()) {
                    while (System.currentTimeMillis() < endTime) {
                        long startTime = System.currentTimeMillis();
                        CloseableHttpResponse httpResponse = httpClient
                                .execute(httpPosts[RANDOM.nextInt(numQueries)]);
                        httpResponse.close();
                        long responseTime = System.currentTimeMillis() - startTime;
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(responseTime);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() < endTime) {
        Thread.sleep(REPORT_INTERVAL_MILLIS);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        System.out.println("Time Passed: " + timePassedSeconds + "s, Query Executed: " + count + ", QPS: "
                + count / timePassedSeconds + ", Avg Response Time: " + avgResponseTime + "ms");
    }
}

From source file:com.fjn.helper.frameworkex.apache.commons.pool.connectionPool.ConnectionManager.java

public static void main(String[] args) {
    final ConnectionManager mgr = new ConnectionManager();
    mgr.connFactory = new ConnectionFactory();
    mgr.connFactory.setDriverClass("com.mysql.jdbc.Driver");
    mgr.connFactory.setPassword("mysql");
    mgr.connFactory.setUsername("mysql");
    mgr.connFactory.setUrl("url:localhost:3306"); // ?URL

    mgr.initConnectionPool(1000, 50, 5, 1000 * 60);
    mgr.pool = mgr.connPoolFactory.createPool();

    final AtomicInteger count = new AtomicInteger(0);

    int threadNum = Runtime.getRuntime().availableProcessors();
    ExecutorService client = Executors.newFixedThreadPool(threadNum);
    for (int i = 0; i < threadNum; i++) {
        client.submit(new Runnable() {
            @Override/* ww  w.j av  a  2 s .c o m*/
            public void run() {
                while (true && count.get() < 100) {
                    try {
                        Thread.sleep(500);
                    } catch (InterruptedException e1) {
                        e1.printStackTrace();
                    }
                    Connection connection = null;

                    try {
                        connection = (Connection) mgr.pool.borrowObject();
                        try {

                            int value = count.incrementAndGet();
                            if (value < 100) {
                                String threadName = Thread.currentThread().getName();

                                int activeNum = mgr.pool.getNumActive();
                                int idleNum = mgr.pool.getNumIdle();
                                String content = "ThreadName: " + threadName + "\t SQL: "
                                        + "insert into tableA ( ct ) values ('" + value + "'); \t activeNum="
                                        + activeNum + "\t idleNum=" + idleNum;
                                System.out.println(content);
                            }

                        } catch (Exception e) {
                            mgr.pool.invalidateObject(connection);
                            connection = null;
                        } finally {
                            // make sure the object is returned to the pool
                            if (null != connection) {
                                mgr.pool.returnObject(connection);
                            }
                        }
                    } catch (Exception e) {
                        // failed to borrow an object
                    }

                }
            }
        });
    }
}

From source file:com.linkedin.pinotdruidbenchmark.DruidThroughput.java

@SuppressWarnings("InfiniteLoopStatement")
public static void main(String[] args) throws Exception {
    if (args.length != 3 && args.length != 4) {
        System.err.println(//from  ww  w. j a v a 2 s.  c  om
                "3 or 4 arguments required: QUERY_DIR, RESOURCE_URL, NUM_CLIENTS, TEST_TIME (seconds).");
        return;
    }

    File queryDir = new File(args[0]);
    String resourceUrl = args[1];
    final int numClients = Integer.parseInt(args[2]);
    final long endTime;
    if (args.length == 3) {
        endTime = Long.MAX_VALUE;
    } else {
        endTime = System.currentTimeMillis() + Integer.parseInt(args[3]) * MILLIS_PER_SECOND;
    }

    File[] queryFiles = queryDir.listFiles();
    assert queryFiles != null;
    Arrays.sort(queryFiles);

    final int numQueries = queryFiles.length;
    final HttpPost[] httpPosts = new HttpPost[numQueries];
    for (int i = 0; i < numQueries; i++) {
        HttpPost httpPost = new HttpPost(resourceUrl);
        httpPost.addHeader("content-type", "application/json");
        StringBuilder stringBuilder = new StringBuilder();
        try (BufferedReader bufferedReader = new BufferedReader(new FileReader(queryFiles[i]))) {
            int length;
            while ((length = bufferedReader.read(CHAR_BUFFER)) > 0) {
                stringBuilder.append(new String(CHAR_BUFFER, 0, length));
            }
        }
        String query = stringBuilder.toString();
        httpPost.setEntity(new StringEntity(query));
        httpPosts[i] = httpPost;
    }

    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numClients);

    for (int i = 0; i < numClients; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                try (CloseableHttpClient httpClient = HttpClients.createDefault()) {
                    while (System.currentTimeMillis() < endTime) {
                        long startTime = System.currentTimeMillis();
                        CloseableHttpResponse httpResponse = httpClient
                                .execute(httpPosts[RANDOM.nextInt(numQueries)]);
                        httpResponse.close();
                        long responseTime = System.currentTimeMillis() - startTime;
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(responseTime);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() < endTime) {
        Thread.sleep(REPORT_INTERVAL_MILLIS);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        System.out.println("Time Passed: " + timePassedSeconds + "s, Query Executed: " + count + ", QPS: "
                + count / timePassedSeconds + ", Avg Response Time: " + avgResponseTime + "ms");
    }
}