List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet
public final int incrementAndGet()
From source file:com.google.cloud.bigtable.hbase.TestBatchExecutor.java
@Test public void testBatchBulkGets() throws Exception { // Test 10 gets, but return only 9 to test the row not found case. final List<Get> gets = new ArrayList<>(10); gets.add(new Get(Bytes.toBytes("key0"))); for (int i = 1; i < 10; i++) { byte[] row_key = randomBytes(8); gets.add(new Get(row_key)); }/*from w w w. java 2s. c o m*/ ResultScanner<Row> mockScanner = Mockito.mock(ResultScanner.class); when(mockClient.readRows(any(ReadRowsRequest.class))).thenReturn(mockScanner); final AtomicInteger counter = new AtomicInteger(); when(mockScanner.next()).then(new Answer<Row>() { @Override public Row answer(InvocationOnMock invocation) throws Throwable { int current = counter.incrementAndGet(); if (current == 10) { return null; } ByteString key = BigtableZeroCopyByteStringUtil.wrap(gets.get(current).getRow()); ByteString cellValue = ByteString.copyFrom(randomBytes(8)); com.google.bigtable.v2.Cell cell = Cell.newBuilder().setTimestampMicros(System.nanoTime() / 1000) .setValue(cellValue).build(); Family family = Family.newBuilder().setName("family").addColumns(Column.newBuilder().addCells(cell)) .build(); return Row.newBuilder().setKey(key).addFamilies(family).build(); } }); BulkOptions bulkOptions = new BulkOptions.Builder().setUseBulkApi(true).build(); BigtableOptions options = new BigtableOptions.Builder().setBulkOptions(bulkOptions).build(); BatchExecutor underTest = createExecutor(options); Result[] results = underTest.batch(gets); verify(mockClient, times(1)).readRows(any(ReadRowsRequest.class)); Assert.assertTrue(matchesRow(Result.EMPTY_RESULT).matches(results[0])); for (int i = 1; i < results.length; i++) { Assert.assertTrue(Bytes.equals(results[i].getRow(), gets.get(i).getRow())); } }
From source file:org.apache.hadoop.gateway.rm.dispatch.RMHaBaseDispatcher.java
private void failoverRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse, Exception exception) throws IOException { LOG.failingOverRequest(outboundRequest.getURI().toString()); URI uri;/*from w w w .ja v a 2s . c o m*/ String outboundURIs; AtomicInteger counter = (AtomicInteger) inboundRequest.getAttribute(FAILOVER_COUNTER_ATTRIBUTE); if (counter == null) { counter = new AtomicInteger(0); } inboundRequest.setAttribute(FAILOVER_COUNTER_ATTRIBUTE, counter); outboundURIs = outboundRequest.getURI().toString(); if (counter.incrementAndGet() <= maxFailoverAttempts) { //null out target url so that rewriters run again inboundRequest.setAttribute(AbstractGatewayFilter.TARGET_REQUEST_URL_ATTRIBUTE_NAME, null); uri = getUriFromInbound(inboundRequest, inboundResponse, outboundURIs); ((HttpRequestBase) outboundRequest).setURI(uri); if (failoverSleep > 0) { try { Thread.sleep(failoverSleep); } catch (InterruptedException e) { LOG.failoverSleepFailed(this.resourceRole, e); } } executeRequest(outboundRequest, inboundRequest, outboundResponse); } else { LOG.maxFailoverAttemptsReached(maxFailoverAttempts, this.resourceRole); if (inboundResponse != null) { writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse); } else { throw new IOException(exception); } } }
From source file:org.apache.hadoop.raid.RaidHistogram.java
public synchronized void put(String path, long value, String taskId) { Point p;/*w ww . ja v a 2 s . c om*/ int last = windowNum - 1; if (value == RECOVERY_FAIL) { p = new Point(value, path, System.currentTimeMillis(), taskId); AtomicInteger counter = failedRecoveredFiles.get(path); if (counter == null) { counter = new AtomicInteger(0); failedRecoveredFiles.put(path, counter); } if (counter.incrementAndGet() == 1) { totalFailedPaths.get(last).incrementAndGet(); } } else { value /= dividend; p = new Point(value, path, System.currentTimeMillis(), taskId); CounterArray counters = histo.get(value); if (counters == null) { counters = new CounterArray(windowNum); histo.put(value, counters); } counters.incrementAndGet(last); totalPoints.incrementAndGet(last); } points.add(p); InjectionHandler.processEvent(InjectionEvent.RAID_SEND_RECOVERY_TIME, this, path, value, taskId); }
From source file:com.cronutils.model.time.TimeNode.java
/** * We return same reference value if matches or next one if does not match. * Then we start applying shifts.//from ww w.jav a2 s . c om * This way we ensure same value is returned if no shift is requested. * @param reference - reference value * @param shiftsToApply - shifts to apply * @return NearestValue instance, never null. Holds information on nearest (forward) value and shifts performed. */ @VisibleForTesting NearestValue getNearestForwardValue(int reference, int shiftsToApply) { List<Integer> values = new ArrayList<Integer>(this.values); int index = 0; boolean foundGreater = false; AtomicInteger shift = new AtomicInteger(0); if (!values.contains(reference)) { for (Integer value : values) { if (value > reference) { index = values.indexOf(value); shiftsToApply--;//we just moved a position! foundGreater = true; break; } } if (!foundGreater) { shift.incrementAndGet(); } } else { index = values.indexOf(reference); } int value = values.get(index); for (int j = 0; j < shiftsToApply; j++) { value = getValueFromList(values, index + 1, shift); index = values.indexOf(value); } return new NearestValue(value, shift.get()); }
From source file:com.cronutils.model.time.TimeNode.java
/** * We return same reference value if matches or previous one if does not match. * Then we start applying shifts.// ww w . jav a 2s .c om * This way we ensure same value is returned if no shift is requested. * @param reference - reference value * @param shiftsToApply - shifts to apply * @return NearestValue instance, never null. Holds information on nearest (backward) value and shifts performed. */ @VisibleForTesting NearestValue getNearestBackwardValue(int reference, int shiftsToApply) { List<Integer> values = new ArrayList<Integer>(this.values); Collections.reverse(values); int index = 0; boolean foundSmaller = false; AtomicInteger shift = new AtomicInteger(0); if (!values.contains(reference)) { for (Integer value : values) { if (value < reference) { index = values.indexOf(value); shiftsToApply--;//we just moved a position! foundSmaller = true; break; } } if (!foundSmaller) { shift.incrementAndGet(); } } else { index = values.indexOf(reference); } int value = values.get(index); for (int j = 0; j < shiftsToApply; j++) { value = getValueFromList(values, index + 1, shift); index = values.indexOf(value); } return new NearestValue(value, shift.get()); }
From source file:org.apache.sling.distribution.servlet.DistributionPackageExporterServlet.java
private void exportOnePackage(final SlingHttpServletRequest request, final SlingHttpServletResponse response, final boolean delete) throws ServletException, IOException { DistributionPackageExporter distributionPackageExporter = request.getResource() .adaptTo(DistributionPackageExporter.class); final long start = System.currentTimeMillis(); response.setContentType(ContentType.APPLICATION_OCTET_STREAM.toString()); DistributionRequest distributionRequest = RequestUtils.fromServletRequest(request); ResourceResolver resourceResolver = request.getResourceResolver(); final AtomicInteger fetched = new AtomicInteger(0); try {// www . ja v a 2s.c o m // get all items distributionPackageExporter.exportPackages(resourceResolver, distributionRequest, new DistributionPackageProcessor() { @Override public void process(DistributionPackage distributionPackage) { fetched.incrementAndGet(); InputStream inputStream = null; int bytesCopied = -1; try { inputStream = DistributionPackageUtils.createStreamWithHeader(distributionPackage); bytesCopied = IOUtils.copy(inputStream, response.getOutputStream()); } catch (IOException e) { log.error("cannot process package", e); } finally { IOUtils.closeQuietly(inputStream); } String packageId = distributionPackage.getId(); if (delete) { // delete the package permanently distributionPackage.delete(); } // everything ok response.setStatus(200); log.debug("exported package {} was sent (and deleted={}), bytes written {}", new Object[] { packageId, delete, bytesCopied }); } }); if (fetched.get() > 0) { long end = System.currentTimeMillis(); log.info("Processed distribution export request in {} ms: : fetched {}", new Object[] { end - start, fetched }); } else { response.setStatus(204); log.debug("nothing to fetch"); } } catch (Throwable e) { response.setStatus(503); log.error("error while exporting package", e); } }
From source file:jurls.core.becca.DefaultZiptie.java
public double getRowGeneralizedMean(RealMatrix c, Function<Integer, Double> rowEntryMultiplier, double exponent, int rowStart, int rowEnd, int column) { AtomicDouble s = new AtomicDouble(0); AtomicInteger n = new AtomicInteger(0); c.walkInOptimizedOrder(new DefaultRealMatrixPreservingVisitor() { @Override//ww w . j av a 2s.c om public void visit(int row, int column, double value) { double a = Math.pow(value, exponent); double b = rowEntryMultiplier.apply(row); s.addAndGet(a * b); n.incrementAndGet(); } }, rowStart, rowEnd, column, column); return (1.0 / n.doubleValue()) * Math.pow(s.doubleValue(), 1.0 / exponent); }
From source file:org.apache.blur.shell.QueryCommand.java
private void count(Column column, AtomicInteger columnCount, AtomicInteger columnSize) { columnCount.incrementAndGet(); String name = column.getName(); String value = column.getValue(); columnSize.addAndGet(name.length() * 2); columnSize.addAndGet(value.length() * 2); }
From source file:com.squarespace.template.HardSoftCodeLimiterTest.java
@Test public void testBothLimits() throws CodeException { final AtomicInteger softCount = new AtomicInteger(); final AtomicInteger hardCount = new AtomicInteger(); HardSoftCodeLimiter.Handler handler = new HardSoftCodeLimiter.Handler() { @Override/* ww w. j a va 2 s. com*/ public void onLimit(Limit limit, HardSoftCodeLimiter limiter) throws CodeExecuteException { if (limit.equals(Limit.SOFT)) { softCount.incrementAndGet(); } if (limit.equals(Limit.HARD)) { hardCount.incrementAndGet(); } } }; CodeLimiter codeLimiter = HardSoftCodeLimiter.builder().setSoftLimit(5).setHardLimit(10).setResolution(1) .setHandler(handler).build(); compiler().newExecutor().template("{.repeated section @}{.even?}{@}{.or}#{.end}{.end}") .json("[0,1,2,3,4,5,6,7,8,9]").codeLimiter(codeLimiter).execute(); assertEquals(softCount.get(), 1); assertEquals(hardCount.get(), 1); }
From source file:com.asakusafw.lang.compiler.cli.BatchCompilerCliTest.java
/** * minimal args.//from w w w . jav a 2s . c o m * @throws Exception if failed */ @Test public void execute_minimal() throws Exception { File output = deployer.newFolder(); String[] args = strings( new Object[] { "--explore", files(ResourceUtil.findLibraryByClass(DummyBatch.class)), "--output", output, "--classAnalyzer", classes(DummyClassAnalyzer.class), "--batchCompiler", classes(DelegateBatchCompiler.class), "--include", classes(DummyBatch.class), "--externalPortProcessors", classes(DummyExternalPortProcessor.class), }); AtomicInteger count = new AtomicInteger(); int status = execute(args, (context, batch) -> { count.incrementAndGet(); assertThat(batch.getBatchId(), is("DummyBatch")); assertThat(batch.getDescriptionClass(), is(classOf(DummyBatch.class))); assertThat(context.getOutput().getBasePath(), is(new File(output, batch.getBatchId()))); }); assertThat(status, is(0)); assertThat(count.get(), is(1)); }