Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.hbase.test.MultiThreadedMultiClusterWithCmApiTest.java

public static void main(String[] args) throws Exception {
    if (args.length == 0) {

        System.out.println("RunMultiClusterTest " + "<CM-Host-1> " + "<UserName> " + "<Password> "
                + "<Cluster-1> " + "<HBase-Service-1> " + "<CM-Host-2> " + "<UserName-2> " + "<Password-2> "
                + "<Cluster-2> " + "<HBase-Service-2> " + "<tableName> " + "<familyName> " + "<numberOfPuts> "
                + "<millisecond of wait> " + "<numberOfThreads> " + "<outputCsvFile>");
    }//from  www.ja  va2s . co m

    final String cmHost1 = args[0];
    final String username1 = args[1];
    final String password1 = args[2];
    final String cluster1 = args[3];
    final String hbaseService1 = args[4];
    final String cmHost2 = args[5];
    final String username2 = args[6];
    final String password2 = args[7];
    final String cluster2 = args[8];
    final String hbaseService2 = args[9];

    LOG.info("--Getting Configurations");

    Configuration config = HBaseMultiClusterConfigUtil.combineConfigurations(cmHost1, username1, password1,
            cluster1, hbaseService1, cmHost2, username2, password2, cluster2, hbaseService2);

    LOG.info("--Got Configuration");

    final String tableName = args[10];
    final String familyName = args[11];
    final int numberOfPuts = Integer.parseInt(args[12]);
    final int millisecondToWait = Integer.parseInt(args[13]);
    final int numberOfThreads = Integer.parseInt(args[14]);
    final String outputCsvFile = args[15];

    LOG.info("Getting HAdmin");

    LOG.info(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG + ": "
            + config.get(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG));
    LOG.info("hbase.zookeeper.quorum: " + config.get("hbase.zookeeper.quorum"));
    LOG.info("hbase.failover.cluster.fail1.hbase.hstore.compaction.max: "
            + config.get("hbase.failover.cluster.fail1.hbase.hstore.compaction.max"));

    HBaseAdmin admin = new HBaseAdminMultiCluster(config);

    try {
        if (admin.tableExists(TableName.valueOf(tableName))) {
            try {
                admin.disableTable(TableName.valueOf(tableName));
            } catch (Exception e) {
                //nothing
            }
            admin.deleteTable(TableName.valueOf(tableName));
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

    LOG.info(" - Got HAdmin:" + admin.getClass());

    HTableDescriptor tableD = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor columnD = new HColumnDescriptor(Bytes.toBytes(familyName));
    tableD.addFamily(columnD);

    byte[][] splitKeys = new byte[10][1];
    splitKeys[0][0] = '0';
    splitKeys[1][0] = '1';
    splitKeys[2][0] = '2';
    splitKeys[3][0] = '3';
    splitKeys[4][0] = '4';
    splitKeys[5][0] = '5';
    splitKeys[6][0] = '6';
    splitKeys[7][0] = '7';
    splitKeys[8][0] = '8';
    splitKeys[9][0] = '9';

    LOG.info(" - About to create Table " + tableD.getName());

    admin.createTable(tableD, splitKeys);

    LOG.info(" - Created Table " + tableD.getName());

    LOG.info("Getting HConnection");

    config.set("hbase.client.retries.number", "1");
    config.set("hbase.client.pause", "1");

    final HConnection connection = HConnectionManagerMultiClusterWrapper.createConnection(config);

    LOG.info(" - Got HConnection: " + connection.getClass());

    LOG.info("Getting HTable");

    final AtomicInteger threadFinishCounter = new AtomicInteger(0);

    //Make sure output folder exist
    File outputFolder = new File(outputCsvFile);
    if (outputFolder.exists() == false) {
        outputFolder.mkdirs();
    }

    for (int threadNum = 0; threadNum < numberOfThreads; threadNum++) {

        final BufferedWriter writer = new BufferedWriter(
                new FileWriter(new File(outputCsvFile + "/thread-" + threadNum + ".csv")));

        final int threadFinalNum = threadNum;

        Thread t = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    Random r = new Random();
                    HTableInterface table = connection.getTable(tableName);
                    HTableStats stats = ((HTableMultiCluster) table).getStats();
                    stats.printStats(writer, 5000);

                    for (int i = 1; i <= numberOfPuts; i++) {

                        int hash = r.nextInt(10);

                        Put put = new Put(Bytes.toBytes(hash + ".key." + i + "."
                                + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        put.add(Bytes.toBytes(familyName), Bytes.toBytes("C"),
                                Bytes.toBytes("Value:" + i * threadFinalNum));
                        table.put(put);

                        Thread.sleep(millisecondToWait);

                        Get get = new Get(Bytes.toBytes(
                                hash + ".key." + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        table.get(get);

                        Thread.sleep(millisecondToWait);

                        //Delete delete = new Delete(Bytes.toBytes(hash + ".key." + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        //table.delete(delete);

                        Thread.sleep(millisecondToWait);

                        if (i % 10 == 0) {
                            writeToSystemOut("{thread:" + threadFinalNum + ",count=" + i + "}", true);
                        } else if (numberOfPuts % 1000 == 0) {
                            writeToSystemOut(".", false);
                        }
                    }
                    stats.stopPrintingStats();
                } catch (Exception e) {
                    e.printStackTrace();
                } finally {
                    threadFinishCounter.incrementAndGet();
                    try {
                        writer.close();
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }

            }
        });
        t.start();
    }

    while (threadFinishCounter.get() < numberOfThreads) {
        Thread.sleep(millisecondToWait * 10);
    }

    //admin.disableTable(TableName.valueOf(tableName));
    //admin.deleteTable(TableName.valueOf(tableName));

    System.out.println("close connection");
    connection.close();
    System.out.println("close admin");
    admin.close();
    System.out.println("done");
    System.exit(0);
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.table.AllTypesIT.java

private static void createIdField(Map<String, Field> fields, AtomicInteger id_field) {
    fields.put("p_id", Field.create(Field.Type.INTEGER, id_field.incrementAndGet()));
}

From source file:org.apache.ranger.common.TestTimedExecutor.java

static void recordResult(ConcurrentMap<String, AtomicInteger> results, String key) {
    if (results.containsKey(key)) {
        results.get(key).incrementAndGet();
    } else {/*from  w  w  w.j  av  a 2  s. co m*/
        AtomicInteger previous = results.putIfAbsent(key, new AtomicInteger(1));
        if (previous != null) { // a value was already associated with the key
            previous.incrementAndGet();
        }
    }
}

From source file:com.opinionlab.woa.WallOfAwesome.java

private static SockJSHandler makeEventStream(Vertx vertx) {
    final SockJSHandlerOptions options = new SockJSHandlerOptions().setHeartbeatInterval(2000);
    final SockJSHandler sockJSHandler = SockJSHandler.create(vertx, options);

    sockJSHandler.socketHandler(socket -> {
        final AtomicInteger openCount = new AtomicInteger();
        final AtomicBoolean running = new AtomicBoolean(true);
        LOGGER.info(format("[OPEN] Sockets: %d", openCount.incrementAndGet()));

        socket.endHandler(aVoid -> {/*from   ww  w.  ja  v a2s  .c o m*/
            running.set(false);
            LOGGER.info(format("[CLOSE] Sockets: %d", openCount.decrementAndGet()));
        });

        socket.handler(buffer -> {
            String command = buffer.toString();
            if ("purge".equals(command)) {
                EXECUTOR.execute(() -> {
                    try {
                        AwesomeImap.purge(s -> socket.write(buffer(objectToJson(
                                HashTreePMap.empty().plus("deleted", true).plus("id", s.getId()), NO_TYPES))));
                    } catch (NoSuchProviderException e) {
                        LOGGER.error("Could not purge messages", e);
                    }
                });
            } else {
                LOGGER.error(format("Unknown command: %s", command));
            }
        });

        try {
            final AtomicReference<Date> latestDate = new AtomicReference<>(new Date(0));

            Consumer<Awesome> publishAwesome = awesome -> {
                socket.write(buffer(objectToJson(awesome, NO_TYPES)));

                final Date receivedDate = awesome.getReceivedDate();
                if (latestDate.get().before(receivedDate)) {
                    latestDate.set(receivedDate);
                }
            };
            AwesomeImap.fetchAwesome().forEach(publishAwesome);

            EXECUTOR.execute(() -> {
                LOGGER.info("Polling started.");
                try {
                    while (running.get()) {
                        AwesomeImap.fetchAwesomeSince(latestDate.get()).forEach(publishAwesome);
                        Thread.sleep(1000);
                    }
                } catch (Throwable t) {
                    running.set(false);
                    socket.close();
                    LOGGER.error("Polling ended ABNORMALLY", t);
                } finally {
                    LOGGER.info("Polling ended normally.");
                }
            });
        } catch (MessagingException e) {
            LOGGER.error("Unable to fetch messages.", e);
        }
    });
    return sockJSHandler;
}

From source file:com.khartec.waltz.jobs.sample.MeasurablesGenerator.java

private static void generateRegions(DSLContext dsl) throws IOException {
    List<String> lines = readLines(OrgUnitGenerator.class.getResourceAsStream("/regions.csv"));

    System.out.println("Deleting existing Regions & Countries ...");
    int deletedCount = dsl.deleteFrom(MEASURABLE)
            .where(MEASURABLE.MEASURABLE_CATEGORY_ID
                    .in(DSL.select(MEASURABLE_CATEGORY.ID).from(MEASURABLE_CATEGORY)
                            .where(MEASURABLE_CATEGORY.EXTERNAL_ID.eq(REGION_CATEGORY_EXTERNAL_ID))))
            .and(MEASURABLE.PROVENANCE.eq("demo")).execute();
    System.out.println("Deleted: " + deletedCount + " existing Regions & Countries");

    Map<String, Map<String, Set<String>>> regionHierarchy = lines.stream().skip(1)
            .map(line -> StringUtils.splitPreserveAllTokens(line, ","))
            .filter(cells -> notEmpty(cells[0]) && notEmpty(cells[6]) && notEmpty(cells[5]))
            .map(cells -> Tuple.tuple(cells[0], cells[6], cells[5]))
            .collect(groupingBy(t -> t.v3, groupingBy(t -> t.v2, mapping(t -> t.v1, toSet()))));

    final long measurableCategoryId = dsl.select(MEASURABLE_CATEGORY.ID).from(MEASURABLE_CATEGORY)
            .where(MEASURABLE_CATEGORY.EXTERNAL_ID.eq(REGION_CATEGORY_EXTERNAL_ID)).fetchAny().value1();

    AtomicInteger insertCount = new AtomicInteger(0);
    regionHierarchy.forEach((region, subRegionMap) -> {
        final long regionId = dsl.insertInto(MEASURABLE)
                .set(createRegion(null, region, measurableCategoryId, false)).returning(MEASURABLE.ID)
                .fetchOne().getId();//from ww  w  . j a v a 2  s  .c o  m
        insertCount.incrementAndGet();

        subRegionMap.forEach((subRegion, countries) -> {
            final long subRegionId = dsl.insertInto(MEASURABLE)
                    .set(createRegion(regionId, subRegion, measurableCategoryId, true)).returning(MEASURABLE.ID)
                    .fetchOne().getId();
            insertCount.incrementAndGet();

            insertCount.addAndGet(dsl.batchInsert(countries.stream()
                    .map(country -> createRegion(subRegionId, country, measurableCategoryId, true))
                    .collect(toList())).execute().length);
        });
    });

    System.out.println("Inserted: " + insertCount + " Regions & Countries");
}

From source file:com.khartec.waltz.jobs.sample.BusinessRegionProductHierarchyGenerator.java

private static void generateHierarchy(DSLContext dsl) throws IOException {
    System.out.println("Deleting existing  hierarchy ...");
    int deletedCount = dsl.deleteFrom(MEASURABLE)
            .where(MEASURABLE.MEASURABLE_CATEGORY_ID.in(DSL.select(MEASURABLE_CATEGORY.ID)
                    .from(MEASURABLE_CATEGORY).where(MEASURABLE_CATEGORY.EXTERNAL_ID.eq(CATEGORY_EXTERNAL_ID))))
            .and(MEASURABLE.PROVENANCE.eq("demo")).execute();
    System.out.println("Deleted: " + deletedCount + " existing hierarchy");

    Set<String> topLevelRegions = readRegions();
    Set<String> products = readProducts();

    insertCategoryIfNotExists(dsl);/* ww  w . ja v  a 2  s .  co  m*/

    final long measurableCategoryId = dsl.select(MEASURABLE_CATEGORY.ID).from(MEASURABLE_CATEGORY)
            .where(MEASURABLE_CATEGORY.EXTERNAL_ID.eq(CATEGORY_EXTERNAL_ID)).fetchAny().value1();

    AtomicInteger insertCount = new AtomicInteger(0);

    Stream.of(businesses).forEach(business -> {
        final long businessId = dsl.insertInto(MEASURABLE)
                .set(createMeasurable(null, business, measurableCategoryId, true)).returning(MEASURABLE.ID)
                .fetchOne().getId();

        insertCount.incrementAndGet();

        topLevelRegions.forEach((region) -> {
            final long regionId = dsl.insertInto(MEASURABLE)
                    .set(createMeasurable(businessId, region, measurableCategoryId, true))
                    .returning(MEASURABLE.ID).fetchOne().getId();
            insertCount.incrementAndGet();

            products.forEach(product -> {
                dsl.insertInto(MEASURABLE).set(createMeasurable(regionId, product, measurableCategoryId, true))
                        .execute();

                insertCount.incrementAndGet();
            });
        });
    });

    System.out.println("Inserted: " + insertCount + " new nodes for hierarchy");
}

From source file:jurls.core.becca.DefaultZiptie.java

public static double getGeneralizedMean(RealMatrix c, double exponent, int rowStart, int rowEnd, int colStart,
        int colEnd) {
    AtomicDouble s = new AtomicDouble(0);
    AtomicInteger n = new AtomicInteger(0);
    c.walkInOptimizedOrder(new DefaultRealMatrixPreservingVisitor() {
        @Override/*from w  w w.j  a  v  a2 s  .c om*/
        public void visit(int row, int column, double value) {
            s.addAndGet(Math.pow(value, exponent));
            n.incrementAndGet();
        }
    }, rowStart, rowEnd, colStart, colEnd);

    return (1.0 / n.doubleValue()) * Math.pow(s.doubleValue(), 1.0 / exponent);
}

From source file:edu.umd.umiacs.clip.tools.math.StatisticalTests.java

private static Triple<Double, Double, Double> pRecPrecF1(int pos, int tpboth, int tp0only, int tp1only,
        int fpboth, int fp0only, int fp1only) {
    double delta_f1 = Math.abs(f1(tp0only + tpboth, fp0only + fpboth, pos - (tp0only + tpboth))
            - f1(tp1only + tpboth, fp1only + fpboth, pos - (tp1only + tpboth)));

    double delta_rec = Math.abs(
            rec(tp0only + tpboth, pos - (tp0only + tpboth)) - rec(tp1only + tpboth, pos - (tp1only + tpboth)));

    double delta_prec = Math
            .abs(prec(tp0only + tpboth, fp0only + fpboth) - prec(tp1only + tpboth, fp1only + fpboth));

    AtomicInteger nc_f1 = new AtomicInteger();
    AtomicInteger nc_rec = new AtomicInteger();
    AtomicInteger nc_prec = new AtomicInteger();

    range(0, NT).forEach(j -> {/*  w w  w .j a v  a 2  s  .c  o m*/
        int tp[] = nextPair(tp0only, tp1only, tpboth);
        int fp[] = nextPair(fp0only, fp1only, fpboth);

        if (Math.abs(f1(tp[0], fp[0], pos - tp[0]) - f1(tp[1], fp[1], pos - tp[1])) > delta_f1) {
            nc_f1.incrementAndGet();
        }
        if (Math.abs(rec(tp[0], pos - tp[0]) - rec(tp[1], pos - tp[1])) > delta_rec) {
            nc_rec.incrementAndGet();
        }
        if (Math.abs(prec(tp[0], fp[0]) - prec(tp[1], fp[1])) > delta_prec) {
            nc_prec.incrementAndGet();
        }
    });

    return Triple.of((nc_rec.get() + 1.) / (NT + 1), (nc_prec.get() + 1.) / (NT + 1),
            (nc_f1.get() + 1.) / (NT + 1));
}

From source file:com.msd.gin.halyard.tools.HalyardExportTest.java

private static int getTriplesCount(String uri, String compression, RDFFormat format) throws Exception {
    InputStream in = FileSystem.get(URI.create(uri), HBaseServerTestInstance.getInstanceConfig())
            .open(new Path(uri));
    try {//from   ww  w .  j a v a2  s .co  m
        if (compression != null) {
            in = new CompressorStreamFactory().createCompressorInputStream(compression, in);
        }
        RDFParser parser = Rio.createParser(format);
        final AtomicInteger i = new AtomicInteger();
        parser.setRDFHandler(new AbstractRDFHandler() {
            @Override
            public void handleStatement(Statement st) throws RDFHandlerException {
                i.incrementAndGet();
            }
        });
        parser.parse(in, uri);
        return i.get();
    } finally {
        in.close();
    }
}

From source file:com.example.jendrik.moerder.FCM.MyFcmListenerService.java

private static void sendData(JSONObject json, AtomicInteger msgId, String code) {

    fm.send(new RemoteMessage.Builder(SENDER_ID + "@gcm.googleapis.com")
            .setMessageId(Integer.toString(msgId.incrementAndGet())).addData("message", code)
            .addData("data", json.toString()).build());

}