Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger() 

Source Link

Document

Creates a new AtomicInteger with initial value 0 .

Usage

From source file:com.btoddb.fastpersitentqueue.flume.FpqChannelTest.java

@Test
public void testThreading() throws Exception {
    final int numEntries = 1000;
    final int numPushers = 4;
    final int numPoppers = 4;
    final int entrySize = 1000;
    channel.setMaxTransactionSize(2000);
    final int popBatchSize = 100;
    channel.setMaxMemorySegmentSizeInBytes(10000000);
    channel.setMaxJournalFileSize(10000000);
    channel.setMaxJournalDurationInMs(30000);
    channel.setFlushPeriodInMs(1000);/* ww  w .j a  v a 2  s.  c  o  m*/
    channel.setNumberOfFlushWorkers(4);

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong counter = new AtomicLong();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    channel.start();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = counter.getAndIncrement();
                        pushSum.addAndGet(x);
                        ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]);
                        bb.putLong(x);

                        Transaction tx = channel.getTransaction();
                        tx.begin();
                        MyEvent event1 = new MyEvent();
                        event1.addHeader("x", String.valueOf(x)).setBody(new byte[numEntries - 8]); // take out size of long
                        channel.put(event1);
                        tx.commit();
                        tx.close();

                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !channel.isEmpty()) {
                    try {
                        Transaction tx = channel.getTransaction();
                        tx.begin();

                        Event event;
                        int count = popBatchSize;
                        while (null != (event = channel.take()) && count-- > 0) {
                            popSum.addAndGet(Long.valueOf(event.getHeaders().get("x")));
                            numPops.incrementAndGet();
                        }

                        tx.commit();
                        tx.close();

                        Thread.sleep(popRand.nextInt(10));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(channel.isEmpty(), is(true));
    assertThat(pushSum.get(), is(popSum.get()));
}

From source file:com.squarespace.template.CompilerTest.java

@Test
public void testLoggingHook() throws CodeException {
    final AtomicInteger count = new AtomicInteger();
    LoggingHook loggingHook = new LoggingHook() {
        @Override/*w  ww .  j  a v  a2 s.  co m*/
        public void log(Exception e) {
            count.incrementAndGet();
            assertTrue(e instanceof NullPointerException);
        }
    };
    Context ctx = COMPILER.newExecutor().template("{@|npe}").json("123").safeExecution(true)
            .loggingHook(loggingHook).execute();
    assertEquals(count.get(), 1);
    assertEquals(ctx.getErrors().size(), 1);
    assertEquals(ctx.getErrors().get(0).getType(), ExecuteErrorType.UNEXPECTED_ERROR);
}

From source file:com.squarespace.template.HardSoftCodeLimiterTest.java

@Test
public void testBothLimits() throws CodeException {
    final AtomicInteger softCount = new AtomicInteger();
    final AtomicInteger hardCount = new AtomicInteger();
    HardSoftCodeLimiter.Handler handler = new HardSoftCodeLimiter.Handler() {

        @Override/*from   www.  ja  v  a  2s.  c o m*/
        public void onLimit(Limit limit, HardSoftCodeLimiter limiter) throws CodeExecuteException {
            if (limit.equals(Limit.SOFT)) {
                softCount.incrementAndGet();
            }
            if (limit.equals(Limit.HARD)) {
                hardCount.incrementAndGet();
            }
        }
    };

    CodeLimiter codeLimiter = HardSoftCodeLimiter.builder().setSoftLimit(5).setHardLimit(10).setResolution(1)
            .setHandler(handler).build();

    compiler().newExecutor().template("{.repeated section @}{.even?}{@}{.or}#{.end}{.end}")
            .json("[0,1,2,3,4,5,6,7,8,9]").codeLimiter(codeLimiter).execute();

    assertEquals(softCount.get(), 1);
    assertEquals(hardCount.get(), 1);
}

From source file:org.waarp.openr66.protocol.http.rest.test.HttpTestResponseHandler.java

@Override
protected boolean afterDbOptions(Channel channel, RestArgument ra) throws HttpInvalidAuthenticationException {
    HttpTestRestR66Client.count.incrementAndGet();
    boolean newMessage = false;
    AtomicInteger counter = null;
    RestFuture future = channel.attr(HttpRestClientSimpleResponseHandler.RESTARGUMENT).get();
    if (future.getOtherObject() == null) {
        counter = new AtomicInteger();
        future.setOtherObject(counter);//ww w . ja va 2s.com
        JsonNode node = ra.getDetailedAllowOption();
        if (!node.isMissingNode()) {
            for (JsonNode jsonNode : node) {
                Iterator<String> iterator = jsonNode.fieldNames();
                while (iterator.hasNext()) {
                    String name = iterator.next();
                    if (!jsonNode.path(name).path(RestArgument.REST_FIELD.JSON_PATH.field).isMissingNode()) {
                        break;
                    }
                    if (name.equals(RootOptionsRestMethodHandler.ROOT)) {
                        continue;
                    }
                    counter.incrementAndGet();
                    HttpTestRestR66Client.options(channel, name);
                    newMessage = true;
                }
            }
        }
    }
    if (!newMessage) {
        counter = (AtomicInteger) future.getOtherObject();
        newMessage = counter.decrementAndGet() > 0;
        if (!newMessage) {
            future.setOtherObject(null);
        }
    }
    if (!newMessage) {
        WaarpSslUtility.closingSslChannel(channel);
    }
    return newMessage;
}

From source file:net.sf.nutchcontentexporter.NutchToWARCConverter.java

public WARCWriter prepareOutputWarcFile(File outputDir, String fileName, int totalFilesWritten,
        String fileExtension, boolean compressBz2) throws IOException {
    // create a warc writer
    OutputStream outputStream;//from w  ww. j  a va 2  s  .co  m

    File warc = new File(outputDir,
            fileName + String.format(Locale.ENGLISH, "_%02d%s", totalFilesWritten, fileExtension));

    System.out.println("Writing to " + warc);

    if (compressBz2) {
        // we don't compress using the built-in GZ support, use bz2 instead
        outputStream = new BZip2CompressorOutputStream(new BufferedOutputStream(new FileOutputStream(warc)));
    } else {
        // default compression (gz)
        outputStream = new FileOutputStream(warc);
    }
    WARCWriter writer = new WARCWriter(new AtomicInteger(), outputStream, warc,
            new WARCWriterPoolSettingsData("", "", -1, !compressBz2, null, null, generator));

    // warc info record
    writer.writeWarcinfoRecord(warc.getName(), "Made by " + this.getClass().getName() + "/" + getRevision());

    return writer;

}

From source file:com.esri.UC.MainActivity.java

@Override
public void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    LicenseResult licenseResult = ArcGISRuntime.setClientId(AGO_CLIENT_ID);

    setContentView(R.layout.activity_main);

    mGeotriggerBroadcastReceiver = new GeotriggerBroadcastReceiver();
    mShouldCreateTrigger = false;/*www .  j a  v  a 2s.  c  om*/
    mShouldSendNotification = true;
    mMapView = (MapView) findViewById(R.id.map);

    // Tap on the map and show popups for selected features.
    mMapView.setOnSingleTapListener(new OnSingleTapListener() {
        private static final long serialVersionUID = 1L;

        public void onSingleTap(float x, float y) {
            if (mMapView.isLoaded()) {
                // Instantiate a PopupContainer
                popupContainer = new PopupContainer(mMapView);
                int id = popupContainer.hashCode();
                popupDialog = null;
                // Display spinner.
                if (progressDialog == null || !progressDialog.isShowing())
                    progressDialog = ProgressDialog.show(mMapView.getContext(), "", "Querying...");

                // Loop through each layer in the webmap
                int tolerance = 20;
                Envelope env = new Envelope(mMapView.toMapPoint(x, y), 20 * mMapView.getResolution(),
                        20 * mMapView.getResolution());
                Layer[] layers = mMapView.getLayers();
                count = new AtomicInteger();
                for (Layer layer : layers) {
                    // If the layer has not been initialized or is invisible, do nothing.
                    if (!layer.isInitialized() || !layer.isVisible())
                        continue;

                    if (layer instanceof ArcGISFeatureLayer) {
                        Log.d("querying", "a featurelayer");
                        // Query feature layer and display popups
                        ArcGISFeatureLayer featureLayer = (ArcGISFeatureLayer) layer;
                        if (featureLayer.getPopupInfo() != null) {
                            // Query feature layer which is associated with a popup definition.
                            count.incrementAndGet();
                            new RunQueryFeatureLayerTask(x, y, tolerance, id).execute(featureLayer);
                        }
                    }
                }
            }
        }
    });

    SharedPreferences prefs = getSharedPreferences(MainActivity.class.getSimpleName(), Context.MODE_PRIVATE);
    String registrationId = prefs.getString("registration_id", "");
    Log.d("registration", registrationId);

}

From source file:eu.itesla_project.modules.validation.OverloadValidationTool.java

private static void writeCsv(Set<String> contingencyIds,
        Map<String, Map<String, OverloadStatus>> statusPerContingencyPerCase, Path outputDir)
        throws IOException {
    try (BufferedWriter writer = Files.newBufferedWriter(outputDir.resolve("comparison.csv"),
            StandardCharsets.UTF_8)) {
        writer.write("base case");
        for (String contingencyId : contingencyIds) {
            writer.write(CSV_SEPARATOR);
            writer.write(contingencyId + " load flow");
            writer.write(CSV_SEPARATOR);
            writer.write(contingencyId + " offline rule");
        }/* w  ww. jav  a 2  s. co  m*/
        writer.newLine();

        for (Map.Entry<String, Map<String, OverloadStatus>> e : statusPerContingencyPerCase.entrySet()) {
            String baseCaseName = e.getKey();
            Map<String, OverloadStatus> statusPerContingency = e.getValue();
            writer.write(baseCaseName);
            for (String contingencyId : contingencyIds) {
                OverloadStatus overloadStatus = statusPerContingency.get(contingencyId);
                writer.write(CSV_SEPARATOR);
                writer.write(Boolean.toString(overloadStatus.isLfOk()));
                writer.write(CSV_SEPARATOR);
                writer.write(Boolean.toString(overloadStatus.isOfflineRuleOk()));
            }
            writer.newLine();
        }
    }

    List<String> categories = Arrays.asList("OK_OK", "NOK_NOK", "OK_NOK", "NOK_OK");

    Map<String, Map<String, AtomicInteger>> synthesisPerContingency = new HashMap<>();
    for (String contingencyId : contingencyIds) {
        synthesisPerContingency.put(contingencyId,
                categories.stream().collect(Collectors.toMap(Function.identity(), e -> new AtomicInteger())));
    }
    for (Map.Entry<String, Map<String, OverloadStatus>> e : statusPerContingencyPerCase.entrySet()) {
        Map<String, OverloadStatus> statusPerContingency = e.getValue();
        for (String contingencyId : contingencyIds) {
            OverloadStatus overloadStatus = statusPerContingency.get(contingencyId);
            synthesisPerContingency.get(contingencyId).get(
                    okToString(overloadStatus.isLfOk()) + "_" + okToString(overloadStatus.isOfflineRuleOk()))
                    .incrementAndGet();
        }
    }

    try (BufferedWriter writer = Files.newBufferedWriter(outputDir.resolve("synthesis.csv"),
            StandardCharsets.UTF_8)) {
        writer.write("contingency");
        for (String c : categories) {
            writer.write(CSV_SEPARATOR);
            writer.write(c);
        }
        writer.newLine();
        for (Map.Entry<String, Map<String, AtomicInteger>> e : synthesisPerContingency.entrySet()) {
            String contingencyId = e.getKey();
            Map<String, AtomicInteger> count = e.getValue();
            writer.write(contingencyId);
            for (String c : categories) {
                writer.write(CSV_SEPARATOR);
                writer.write(Integer.toString(count.get(c).get()));
            }
            writer.newLine();
        }
    }
}

From source file:com.microsoft.wake.contrib.grouper.impl.AdaptiveSnowshovelGrouper.java

@Inject
public AdaptiveSnowshovelGrouper(Combiner<OutType, K, V> c, Partitioner<K> p, Extractor<InType, K, V> ext,
        @Parameter(StageConfiguration.StageObserver.class) Observer<Tuple<Integer, OutType>> o,
        @Parameter(StageConfiguration.StageName.class) String stageName,
        @Parameter(InitialPeriod.class) long initialPeriod, @Parameter(MaxPeriod.class) long maxPeriod,
        @Parameter(MinPeriod.class) long minPeriod, @Parameter(Interval.class) long interval)
        throws InjectionException {
    super(stageName);

    this.c = c;// w w w .  j  av  a2s .  com
    this.p = p;
    this.ext = ext;
    this.o = o;
    this.outputHandler = new OutputImpl<Long>();
    this.outputDriver = new InitialDelayStage<Long>(outputHandler, 1, stageName + "-output");
    this.doneHandler = ((InitialDelayStage<Long>) outputDriver).getDoneHandler();
    register = new ConcurrentSkipListMap<>();
    inputDone = false;
    this.inputObserver = this.new InputImpl();
    this.sleeping = new AtomicInteger();
    this.combiningMeter = new Meter(stageName);

    // there is no dependence from input finish to output start
    // The alternative placement of this event is in the first call to onNext,
    // but Output onNext already provides blocking

    outputDriver.onNext(new Long(initialPeriod));
    prevAggregatedCount = 0;
    prevCombiningRate = currCombiningRate = 0.0;
    prevFlushingPeriod = 0;
    currFlushingPeriod = initialPeriod;
    prevAdjustedTime = startTime = System.nanoTime();

    flushingPeriodInterval = interval;
    this.minPeriod = minPeriod;
    this.maxPeriod = maxPeriod;

}

From source file:com.cloudera.livy.rsc.rpc.TestRpc.java

@Test
public void testCloseListener() throws Exception {
    RpcServer server = autoClose(new RpcServer(emptyConfig));
    Rpc[] rpcs = createRpcConnection(server);
    Rpc client = rpcs[1];//from w w  w .  j av a 2  s .  c om

    final AtomicInteger closeCount = new AtomicInteger();
    Utils.addListener(client.getChannel().closeFuture(), new FutureListener<Void>() {
        @Override
        public void onSuccess(Void unused) {
            closeCount.incrementAndGet();
        }
    });

    client.close();
    client.close();
    assertEquals(1, closeCount.get());
}

From source file:com.cloudera.livy.client.local.rpc.TestRpc.java

@Test
public void testCloseListener() throws Exception {
    RpcServer server = autoClose(new RpcServer(emptyConfig));
    Rpc[] rpcs = createRpcConnection(server);
    Rpc client = rpcs[1];/* www  .  j  ava 2  s .co m*/

    final AtomicInteger closeCount = new AtomicInteger();
    client.addListener(new Rpc.Listener() {
        @Override
        public void rpcClosed(Rpc rpc) {
            closeCount.incrementAndGet();
        }
    });

    client.close();
    client.close();
    assertEquals(1, closeCount.get());
}