Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.openhab.binding.russound.internal.rio.source.RioSourceProtocol.java

/**
 * Helper method to handle any media management change. If the channel is the INFO text channel, we delegate to
 * {@link #handleMMInfoText(String)} instead. This helper method will simply get the next MM identifier and send the
 * json representation out for the channel change (this ensures unique messages for each MM notification)
 *
 * @param channelId a non-null, non-empty channelId
 * @param value the value for the channel
 * @throws IllegalArgumentException if channelID is null or empty
 *///from  www. ja va2s. co  m
private void handleMMChange(String channelId, String value) {
    if (StringUtils.isEmpty(channelId)) {
        throw new NullArgumentException("channelId cannot be null or empty");
    }

    final AtomicInteger ai = mmSeqNbrs.get(channelId);
    if (ai == null) {
        logger.error("Channel {} does not have an ID configuration - programmer error!", channelId);
    } else {

        if (channelId.equals(RioConstants.CHANNEL_SOURCEMMINFOTEXT)) {
            value = handleMMInfoText(value);
            if (value == null) {
                return;
            }
        }

        final int id = ai.getAndIncrement();

        final String json = gson.toJson(new IdValue(id, value));
        stateChanged(channelId, new StringType(json));
    }
}

From source file:org.eclipse.wb.tests.designer.core.model.ObjectInfoTest.java

public void test_endEdit_aboutToRefresh() throws Exception {
    final AtomicInteger saveCount = new AtomicInteger();
    final ObjectInfo object = new TestObjectInfo() {
        @Override/*ww w .  ja v  a  2 s. c  o m*/
        protected void saveEdit() throws Exception {
            saveCount.getAndIncrement();
        }
    };
    //
    final AtomicBoolean wasFired = new AtomicBoolean();
    object.addBroadcastListener(new ObjectEventListener() {
        @Override
        public void endEdit_aboutToRefresh() throws Exception {
            wasFired.set(true);
            // perform inner edit operation
            object.startEdit();
            try {
            } finally {
                object.endEdit();
            }
        }
    });
    // do edit operation
    object.startEdit();
    try {
    } finally {
        object.endEdit();
    }
    // verify, only one saveEdit() should be done
    assertTrue(wasFired.get());
    assertEquals(1, saveCount.get());
}

From source file:spade.utility.BitcoinTools.java

public void writeBlocksToCSV(int startIndex, int endIndex) {
    // Block block, int lastBlockId
    int lastBlockId = -1;
    final BitcoinTools bitcoinTools = new BitcoinTools();

    String pattern = "#.##";
    DecimalFormat decimalFormat = new DecimalFormat(pattern);

    final ConcurrentHashMap<Integer, Block> blockMap = new ConcurrentHashMap<Integer, Block>();
    final AtomicInteger currentBlock = new AtomicInteger(startIndex);
    final int stopIndex = endIndex;
    final int totalThreads = Runtime.getRuntime().availableProcessors();

    class BlockFetcher implements Runnable {

        public void run() {

            while (true) {
                if (blockMap.size() > totalThreads * 5) { // max objects to hold in memory max 1 MB * totalThreads * factor
                    try {
                        Thread.sleep(100);
                        continue;
                    } catch (Exception exception) {
                    }/* www. jav a 2s . c  om*/
                }

                int blockToFetch = currentBlock.getAndIncrement();
                try {
                    blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch));
                } catch (JSONException exception) {
                    Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " has invalid json. Redownloading.",
                            exception);
                    try {
                        blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch));
                    } catch (JSONException ex) {
                        Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " couldn't be included in CSV.",
                                ex);
                    }
                }
                if (blockToFetch >= stopIndex) {
                    break;
                }
            }
        }
    }

    ArrayList<Thread> workers = new ArrayList<Thread>();
    for (int i = 0; i < totalThreads; i++) {
        Thread th = new Thread(new BlockFetcher());
        workers.add(th);
        th.start();
    }

    int percentageCompleted = 0;

    for (int i = startIndex; i < endIndex; i++) {

        try {

            Block block;
            while (!blockMap.containsKey(i)) {

            }
            block = blockMap.get(i);
            blockMap.remove(i);
            lastBlockId = writeBlockToCSV(block, lastBlockId);

            if ((((i - startIndex + 1) * 100) / (endIndex - startIndex)) > percentageCompleted) {
                Runtime rt = Runtime.getRuntime();
                long totalMemory = rt.totalMemory() / 1024 / 1024;
                long freeMemory = rt.freeMemory() / 1024 / 1024;
                long usedMemory = totalMemory - freeMemory;
                System.out.print("| Cores: " + rt.availableProcessors() + " | Threads: " + totalThreads
                        + " | Heap (MB) - total: " + totalMemory + ", %age free: "
                        + (freeMemory * 100) / totalMemory + " | At Block: " + (i - startIndex + 1) + " / "
                        + (endIndex - startIndex) + " | Percentage Completed: " + percentageCompleted
                        // + " |\r");
                        + " |\n");
            }

            percentageCompleted = ((i - startIndex + 1) * 100) / (endIndex - startIndex);

        } catch (IOException ex) {
            Bitcoin.log(Level.SEVERE, "Unexpected IOException. Stopping CSV creation.", ex);
            break;
        }
    }

    for (int i = 0; i < totalThreads; i++) {
        try {
            workers.get(i).interrupt();
            workers.get(i).join();
        } catch (InterruptedException exception) {
        }
    }

    System.out.println("\n\ndone with creating CSVes!");
}

From source file:org.apache.hadoop.hbase.tool.TestLoadIncrementalHFilesSplitRecovery.java

/**
 * Test that shows that exception thrown from the RS side will result in the expected number of
 * retries set by ${@link HConstants#HBASE_CLIENT_RETRIES_NUMBER} when
 * ${@link LoadIncrementalHFiles#RETRY_ON_IO_EXCEPTION} is set
 *//*from   w  w w .  ja  v  a  2  s  .c o m*/
@Test
public void testRetryOnIOException() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    final AtomicInteger calls = new AtomicInteger(1);
    final Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
    util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
    util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, true);
    final LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
        @Override
        protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serverCallable,
                TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
            if (calls.getAndIncrement() < util.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
                    HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) - 1) {
                ClientServiceCallable<byte[]> newServerCallable = new ClientServiceCallable<byte[]>(conn,
                        tableName, first, new RpcControllerFactory(util.getConfiguration()).newController(),
                        HConstants.PRIORITY_UNSET) {
                    @Override
                    public byte[] rpcCall() throws Exception {
                        throw new IOException("Error calling something on RegionServer");
                    }
                };
                return super.tryAtomicRegionLoad(newServerCallable, tableName, first, lqis);
            } else {
                return super.tryAtomicRegionLoad(serverCallable, tableName, first, lqis);
            }
        }
    };
    setupTable(conn, table, 10);
    Path dir = buildBulkFiles(table, 1);
    lih.doBulkLoad(dir, conn.getAdmin(), conn.getTable(table), conn.getRegionLocator(table));
    util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, false);

}

From source file:org.mitre.dsmiley.httpproxy.ProxyServletTest.java

/**
 * If we're proxying a remote service that tries to set cookies, we need to make sure the cookies are not captured
 * by the httpclient in the ProxyServlet, otherwise later requests from ALL users will all access the remote proxy
 * with the same cookie as the first user
 *//*from w w  w  .j  a va  2 s.c  o m*/
@Test
public void testMultipleRequestsWithDiffCookies() throws Exception {
    final AtomicInteger requestCounter = new AtomicInteger(1);
    final StringBuffer captureCookieValue = new StringBuffer();
    localTestServer.register("/targetPath*", new RequestInfoHandler() {
        public void handle(HttpRequest request, HttpResponse response, HttpContext context)
                throws HttpException, IOException {
            // there shouldn't be a cookie sent since each user request in this test is logging in for the first time
            if (request.getFirstHeader("Cookie") != null) {
                captureCookieValue.append(request.getFirstHeader("Cookie"));
            } else {
                response.setHeader("Set-Cookie",
                        "JSESSIONID=USER_" + requestCounter.getAndIncrement() + "_SESSION");
            }
            super.handle(request, response, context);
        }
    });

    // user one logs in for the first time to a proxied web service
    GetMethodWebRequest req = makeGetMethodRequest(sourceBaseUri);
    WebResponse rsp = execAndAssert(req, "");
    assertEquals("", captureCookieValue.toString());
    assertEquals("USER_1_SESSION",
            sc.getCookieJar().getCookie("!Proxy!" + servletName + "JSESSIONID").getValue());

    // user two logs in for the first time to a proxied web service
    sc.clearContents(); // clear httpunit cookies since we want to login as a different user
    req = makeGetMethodRequest(sourceBaseUri);
    rsp = execAndAssert(req, "");
    assertEquals("", captureCookieValue.toString());
    assertEquals("USER_2_SESSION",
            sc.getCookieJar().getCookie("!Proxy!" + servletName + "JSESSIONID").getValue());
}

From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java

@Test
public void testMultipleFilesSameTimeStamp() throws Exception {
    File f = new File("target", UUID.randomUUID().toString());
    f.mkdir();//from   w w w .  j  a va2 s  . c  o  m

    SpoolDirConfigBean conf = new SpoolDirConfigBean();
    conf.dataFormat = DataFormat.DELIMITED;
    conf.useLastModified = FileOrdering.TIMESTAMP;
    conf.spoolDir = f.getAbsolutePath();
    conf.batchSize = 10;
    conf.overrunLimit = 100;
    conf.poolingTimeoutSecs = 1;
    conf.filePattern = "*";
    conf.pathMatcherMode = PathMatcherMode.GLOB;
    conf.maxSpoolFiles = 10;
    conf.dataFormatConfig.compression = Compression.NONE;
    conf.dataFormatConfig.filePatternInArchive = "*";
    conf.dataFormatConfig.csvHeader = CsvHeader.WITH_HEADER;
    conf.errorArchiveDir = null;
    conf.postProcessing = PostProcessingOptions.NONE;
    conf.retentionTimeMins = 10;
    conf.allowLateDirectory = false;
    conf.dataFormatConfig.textMaxLineLen = 10;
    conf.dataFormatConfig.onParseError = OnParseError.ERROR;
    conf.dataFormatConfig.maxStackTraceLines = 0;
    long timestamp = System.currentTimeMillis() - 100000;

    for (int i = 0; i < 8; i++) {
        File current = new File(conf.spoolDir, Utils.format("file-{}.log", i));
        try (FileOutputStream outputStream = new FileOutputStream(current)) {
            IOUtils.writeLines(ImmutableList.of("A,B", Utils.format("a-{},b-{}", i, i), "a,b"), "\n",
                    outputStream);
        }
        Assert.assertTrue(current.setLastModified(timestamp));
    }

    // for ctime delays, there's no way to set ctime (change timestamp) explicitly by rule
    Thread.sleep(5000L);

    File current = new File(conf.spoolDir, "a.log");
    try (FileOutputStream outputStream = new FileOutputStream(current)) {
        IOUtils.writeLines(ImmutableList.of("A,B", "Gollum,Sauron", "Aragorn,Boromir"), "\n", outputStream);
    }
    Assert.assertTrue(current.setLastModified(System.currentTimeMillis()));

    SpoolDirSource source = new SpoolDirSource(conf);
    PushSourceRunner runner = new PushSourceRunner.Builder(SpoolDirDSource.class, source)
            .setOnRecordError(OnRecordError.TO_ERROR).addOutputLane("lane").build();

    AtomicInteger batchCount = new AtomicInteger(0);
    runner.runInit();

    Assert.assertEquals(0, runner.getErrors().size());

    try {
        runner.runProduce(new HashMap<>(), 10, output -> {
            int i = batchCount.getAndIncrement();

            if (i < 8) {
                List<Record> records = output.getRecords().get("lane");
                Assert.assertNotNull(records);
                Assert.assertTrue(!records.isEmpty());
                Assert.assertEquals(2, records.size());

                Assert.assertEquals(Utils.format("file-{}.log", i),
                        records.get(0).getHeader().getAttribute(HeaderAttributeConstants.FILE_NAME));

                try {
                    Assert.assertEquals(
                            String.valueOf(Files
                                    .getLastModifiedTime(
                                            Paths.get(f.getAbsolutePath(), Utils.format("file-{}.log", i)))
                                    .toMillis()),
                            records.get(0).getHeader()
                                    .getAttribute(HeaderAttributeConstants.LAST_MODIFIED_TIME));
                } catch (IOException ex) {
                    Assert.fail(ex.toString());
                }
                Assert.assertEquals("a-" + i, records.get(0).get("/A").getValueAsString());
                Assert.assertEquals("b-" + i, records.get(0).get("/B").getValueAsString());

                Assert.assertEquals("a", records.get(1).get("/A").getValueAsString());
                Assert.assertEquals("b", records.get(1).get("/B").getValueAsString());

                // And error record
                List<Record> errorRecords = runner.getErrorRecords();
                Assert.assertEquals(0, errorRecords.size());

            } else if (i < 9) {
                List<Record> records = output.getRecords().get("lane");
                Assert.assertNotNull(records);
                Assert.assertTrue(!records.isEmpty());
                Assert.assertEquals(2, records.size());

                Assert.assertEquals("a.log",
                        records.get(0).getHeader().getAttribute(HeaderAttributeConstants.FILE_NAME));
                Assert.assertEquals("Gollum", records.get(0).get("/A").getValueAsString());
                Assert.assertEquals("Sauron", records.get(0).get("/B").getValueAsString());

                Assert.assertEquals("Aragorn", records.get(1).get("/A").getValueAsString());
                Assert.assertEquals("Boromir", records.get(1).get("/B").getValueAsString());
            } else if (i < 10) {
                List<Record> records = output.getRecords().get("lane");
                Assert.assertTrue(records.isEmpty());

                // And error record
                records = runner.getErrorRecords();
                Assert.assertEquals(0, records.size());
            } else if (i < 11) {
                // And a bunch of event records...
                // new-file event, finished-file event for each file.
                // file-0.log through file-7.log and a.log  (9 files)
                // two no-more-data events.
                Assert.assertEquals(20, runner.getEventRecords().size());
                Map<String, Integer> map = new HashMap<>();
                for (EventRecord rec : runner.getEventRecords()) {
                    if (map.get(rec.getEventType()) != null) {
                        map.put(rec.getEventType(), map.get(rec.getEventType()) + 1);
                    } else {
                        map.put(rec.getEventType(), 1);
                    }
                }

                Assert.assertNotNull(map.get("new-file"));
                Assert.assertNotNull(map.get("finished-file"));
                Assert.assertNotNull(map.get("no-more-data"));

                int numEvents = map.get("new-file");
                Assert.assertEquals(9, numEvents);

                numEvents = map.get("finished-file");
                Assert.assertEquals(9, numEvents);

                numEvents = map.get("no-more-data");
                Assert.assertEquals(2, numEvents);
            } else {
                runner.setStop();
            }
        });

        runner.waitOnProduce();

        Assert.assertEquals(12, batchCount.get());

    } finally {
        runner.runDestroy();
    }
}

From source file:org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryParsing.java

@Test
public void testMultipleFailedTasks() throws Exception {
    JobHistoryParser parser = new JobHistoryParser(Mockito.mock(FSDataInputStream.class));
    EventReader reader = Mockito.mock(EventReader.class);
    final AtomicInteger numEventsRead = new AtomicInteger(0); // Hack!
    final org.apache.hadoop.mapreduce.TaskType taskType = org.apache.hadoop.mapreduce.TaskType.MAP;
    final TaskID[] tids = new TaskID[2];
    final JobID jid = new JobID("1", 1);
    tids[0] = new TaskID(jid, taskType, 0);
    tids[1] = new TaskID(jid, taskType, 1);
    Mockito.when(reader.getNextEvent()).thenAnswer(new Answer<HistoryEvent>() {
        public HistoryEvent answer(InvocationOnMock invocation) throws IOException {
            // send two task start and two task fail events for tasks 0 and 1
            int eventId = numEventsRead.getAndIncrement();
            TaskID tid = tids[eventId & 0x1];
            if (eventId < 2) {
                return new TaskStartedEvent(tid, 0, taskType, "");
            }/* ww w.j  av  a 2 s .  c  o m*/
            if (eventId < 4) {
                TaskFailedEvent tfe = new TaskFailedEvent(tid, 0, taskType, "failed", "FAILED", null,
                        new Counters());
                tfe.setDatum(tfe.getDatum());
                return tfe;
            }
            if (eventId < 5) {
                JobUnsuccessfulCompletionEvent juce = new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0,
                        "JOB_FAILED", Collections.singletonList("Task failed: " + tids[0].toString()));
                return juce;
            }
            return null;
        }
    });
    JobInfo info = parser.parse(reader);
    assertTrue("Task 0 not implicated", info.getErrorInfo().contains(tids[0].toString()));
}

From source file:de.fosd.jdime.matcher.cost_model.CostModelMatcher.java

/**
 * Sets the bounds ({@link CMMatching#setCostBounds(Bounds)}) for the cost of all current matchings.
 *
 * @param currentMatchings/* w w  w.j  a  va  2 s  . c  o  m*/
 *         the current <code>CMMatchings</code>s being considered
 * @param parameters
 *         the <code>CMParameters</code> to use
 */
private void boundCost(CMMatchings<T> currentMatchings, CMParameters<T> parameters) {
    LOG.finer(() -> "Bounding " + currentMatchings.size() + " matchings.");

    AtomicInteger mCount = LOG.isLoggable(FINEST) ? new AtomicInteger() : null;
    Consumer<CMMatching<T>> mPeek = m -> LOG
            .finest(() -> "Done with matching " + mCount.getAndIncrement() + " " + m);

    if (parameters.parallel) {
        currentMatchings.parallelStream().peek(mPeek).forEach(m -> boundCost(m, currentMatchings, parameters));
    } else {
        currentMatchings.stream().peek(mPeek).forEach(m -> boundCost(m, currentMatchings, parameters));
    }

    parameters.clearBoundCaches();
}

From source file:org.batoo.jpa.benchmark.BenchmarkTest.java

private ThreadPoolExecutor createExecutor(BlockingQueue<Runnable> workQueue) {
    final AtomicInteger nextThreadNo = new AtomicInteger(0);

    final ThreadPoolExecutor executor = new ThreadPoolExecutor(//
            BenchmarkTest.THREAD_COUNT, BenchmarkTest.THREAD_COUNT, // min max threads
            0L, TimeUnit.MILLISECONDS, // the keep alive time - hold it forever
            workQueue, new ThreadFactory() {

                @Override/* ww w . j a va 2 s  .c om*/
                public Thread newThread(Runnable r) {
                    final Thread t = new Thread(r);
                    t.setDaemon(true);
                    t.setPriority(Thread.NORM_PRIORITY);
                    t.setName("Benchmark-" + nextThreadNo.get());

                    BenchmarkTest.this.threadIds[nextThreadNo.getAndIncrement()] = t.getId();

                    return t;
                }
            });

    executor.prestartAllCoreThreads();

    return executor;
}

From source file:lenscorrection.Distortion_Correction.java

protected String correctImages() {
    if (!sp.applyCorrection) {
        sp.target_dir = System.getProperty("user.dir").replace('\\', '/') + "/distCorr_tmp/";
        System.out.println("Tmp target directory: " + sp.target_dir);

        if (new File(sp.target_dir).exists()) {
            System.out.println("removing old tmp directory!");

            final String[] filesToDelete = new File(sp.target_dir).list();
            for (int i = 0; i < filesToDelete.length; i++) {
                System.out.println(filesToDelete[i]);
                final boolean deleted = new File(sp.target_dir + filesToDelete[i]).delete();
                if (!deleted)
                    IJ.log("Error: Could not remove temporary directory!");
            }/*from www. j  a va 2s .c o  m*/
            new File(sp.target_dir).delete();
        }
        try {
            // Create one directory
            final boolean success = (new File(sp.target_dir)).mkdir();
            if (success)
                new File(sp.target_dir).deleteOnExit();
        } catch (final Exception e) {
            IJ.showMessage("Error! Could not create temporary directory. " + e.getMessage());
        }
    }
    if (sp.target_dir == "" || null == sp.target_dir) {
        final DirectoryChooser dc = new DirectoryChooser("Target Directory");
        sp.target_dir = dc.getDirectory();
        if (null == sp.target_dir)
            return null;
        sp.target_dir = sp.target_dir.replace('\\', '/');
        if (!sp.target_dir.endsWith("/"))
            sp.target_dir += "/";
    }

    final String[] namesTarget = new File(sp.target_dir).list(new FilenameFilter() {
        @Override
        public boolean accept(final File dir, final String namesTarget) {
            final int idot = namesTarget.lastIndexOf('.');
            if (-1 == idot)
                return false;
            return namesTarget.contains(namesTarget.substring(idot).toLowerCase());
        }
    });

    if (namesTarget.length > 0)
        IJ.showMessage("Overwrite Message",
                "There  are already images in that directory. These will be used for evaluation.");
    else {

        IJ.showStatus("Correcting Images");

        final Thread[] threads = MultiThreading.newThreads();
        final AtomicInteger ai = new AtomicInteger(sp.applyCorrection ? 0 : sp.firstImageIndex);

        for (int ithread = 0; ithread < threads.length; ++ithread) {
            threads[ithread] = new Thread() {
                @Override
                public void run() {
                    setPriority(Thread.NORM_PRIORITY);

                    for (int i = ai.getAndIncrement(); i < (sp.applyCorrection ? sp.names.length
                            : (sp.firstImageIndex + sp.numberOfImages)); i = ai.getAndIncrement()) {
                        IJ.log("Correcting image " + sp.names[i]);
                        final ImagePlus imps = new Opener().openImage(sp.source_dir + sp.names[i]);
                        imps.setProcessor(imps.getTitle(), imps.getProcessor().convertToShort(false));
                        final ImageProcessor[] transErg = nlt.transform(imps.getProcessor());
                        imps.setProcessor(imps.getTitle(), transErg[0]);
                        if (!sp.applyCorrection)
                            new File(sp.target_dir + sp.names[i]).deleteOnExit();
                        new FileSaver(imps).saveAsTiff(sp.target_dir + sp.names[i]);
                    }
                }
            };
        }
        MultiThreading.startAndJoin(threads);
    }
    return sp.target_dir;
}