List of usage examples for java.util.concurrent.atomic AtomicInteger get
public final int get()
From source file:com.netflix.curator.framework.imps.TestCompression.java
@Test public void testCompressionProvider() throws Exception { final byte[] data = "here's a string".getBytes(); final AtomicInteger compressCounter = new AtomicInteger(); final AtomicInteger decompressCounter = new AtomicInteger(); CompressionProvider compressionProvider = new CompressionProvider() { @Override/*from w ww .ja v a 2 s . c o m*/ public byte[] compress(String path, byte[] data) throws Exception { compressCounter.incrementAndGet(); byte[] bytes = new byte[data.length * 2]; System.arraycopy(data, 0, bytes, 0, data.length); System.arraycopy(data, 0, bytes, data.length, data.length); return bytes; } @Override public byte[] decompress(String path, byte[] compressedData) throws Exception { decompressCounter.incrementAndGet(); byte[] bytes = new byte[compressedData.length / 2]; System.arraycopy(compressedData, 0, bytes, 0, bytes.length); return bytes; } }; CuratorFramework client = CuratorFrameworkFactory.builder().compressionProvider(compressionProvider) .connectString(server.getConnectString()).retryPolicy(new RetryOneTime(1)).build(); try { client.start(); client.create().compressed().creatingParentsIfNeeded().forPath("/a/b/c", data); Assert.assertNotEquals(data, client.getData().forPath("/a/b/c")); Assert.assertEquals(data.length, client.getData().decompressed().forPath("/a/b/c").length); } finally { IOUtils.closeQuietly(client); } Assert.assertEquals(compressCounter.get(), 1); Assert.assertEquals(decompressCounter.get(), 1); }
From source file:com.netflix.spinnaker.igor.gitlabci.GitlabCiBuildMonitor.java
@Override protected BuildPollingDelta generateDelta(PollContext ctx) { final String master = ctx.partitionName; log.info("Checking for new builds for {}", kv("master", master)); final AtomicInteger updatedBuilds = new AtomicInteger(); final GitlabCiService gitlabCiService = (GitlabCiService) buildMasters.getMap().get(master); long startTime = System.currentTimeMillis(); final List<Project> projects = gitlabCiService.getProjects(); log.info("Took {} ms to retrieve {} repositories (master: {})", System.currentTimeMillis() - startTime, projects.size(), kv("master", master)); List<BuildDelta> delta = new ArrayList<>(); projects.parallelStream().forEach(project -> { List<Pipeline> pipelines = filterOldPipelines( gitlabCiService.getPipelines(project, MAX_NUMBER_OF_PIPELINES)); for (Pipeline pipeline : pipelines) { String branchedRepoSlug = GitlabCiPipelineUtis.getBranchedPipelineSlug(project, pipeline); boolean isPipelineRunning = GitlabCiResultConverter.running(pipeline.getStatus()); int cachedBuildId = buildCache.getLastBuild(master, branchedRepoSlug, isPipelineRunning); // In case of Gitlab CI the pipeline ids are increasing so we can use it for ordering if (pipeline.getId() > cachedBuildId) { updatedBuilds.incrementAndGet(); delta.add(new BuildDelta(branchedRepoSlug, project, pipeline, isPipelineRunning)); }/*from w w w. j av a 2 s . c om*/ } }); if (!delta.isEmpty()) { log.info("Found {} new builds (master: {})", updatedBuilds.get(), kv("master", master)); } return new BuildPollingDelta(delta, master, startTime); }
From source file:org.eclipse.hono.deviceregistry.FileBasedCredentialsService.java
Future<Void> saveToFile() { if (!getConfig().isSaveToFile()) { return Future.succeededFuture(); } else if (dirty) { return checkFileExists(true).compose(s -> { final AtomicInteger idCount = new AtomicInteger(); final JsonArray tenants = new JsonArray(); for (final Entry<String, Map<String, JsonArray>> entry : credentials.entrySet()) { final JsonArray credentialsArray = new JsonArray(); for (final JsonArray singleAuthIdCredentials : entry.getValue().values()) { credentialsArray.addAll(singleAuthIdCredentials.copy()); idCount.incrementAndGet(); }// w w w . j a v a 2 s . c om tenants.add(new JsonObject().put(FIELD_TENANT, entry.getKey()).put(ARRAY_CREDENTIALS, credentialsArray)); } final Future<Void> writeHandler = Future.future(); vertx.fileSystem().writeFile(getConfig().getFilename(), Buffer.buffer(tenants.encodePrettily(), StandardCharsets.UTF_8.name()), writeHandler.completer()); return writeHandler.map(ok -> { dirty = false; log.trace("successfully wrote {} credentials to file {}", idCount.get(), getConfig().getFilename()); return (Void) null; }).otherwise(t -> { log.warn("could not write credentials to file {}", getConfig().getFilename(), t); return (Void) null; }); }); } else { log.trace("credentials registry does not need to be persisted"); return Future.succeededFuture(); } }
From source file:org.duracloud.snapshot.service.impl.SpaceItemWriterTest.java
/** * Distributes the items into sublists and runs them in separate threads. * @param items/*from ww w . j av a 2 s .c o m*/ * @param threads * @throws IOException */ private void writeItems(List<ContentItem> items, int threads) throws IOException { int itemCount = items.size(); final CountDownLatch countdownLatch = new CountDownLatch(threads); int bottomIndex = 0; int itemsPerThread = itemCount / threads; int remainder = itemCount % threads; int thread = 0; final AtomicInteger processed = new AtomicInteger(0); while (bottomIndex < itemCount) { thread++; final int fromIndex = bottomIndex; int topIndex = fromIndex + itemsPerThread; if (thread == threads) { topIndex += remainder; } final int toIndex = topIndex; final List<ContentItem> contents = items.subList(fromIndex, toIndex); new Thread(new Runnable() { /* (non-Javadoc) * @see java.lang.Runnable#run() */ @Override public void run() { try { writer.beforeWrite(contents); writer.write(contents); writer.afterWrite(contents); processed.addAndGet(contents.size()); countdownLatch.countDown(); } catch (IOException e) { throw new RuntimeException(e); } } }).start(); bottomIndex = topIndex; } try { assertTrue(countdownLatch.await(20, TimeUnit.SECONDS)); } catch (InterruptedException e) { e.printStackTrace(); } assertEquals(items.size(), processed.get()); }
From source file:com.github.gfx.android.orma.example.fragment.BenchmarkFragment.java
Single<Result> startSelectAllWithHandWritten() { return Single.fromCallable(() -> { long result = runWithBenchmark(() -> { AtomicInteger count = new AtomicInteger(); Database db = hw.getReadableDatabase(); Cursor cursor = db.query("todo", new String[] { "id, title, content, done, createdTime" }, null, null, null, null, "createdTime ASC", null // whereClause, whereArgs, groupBy, having, orderBy, limit );/* w w w . j a va 2s.c o m*/ if (cursor.moveToFirst()) { int titleIndex = cursor.getColumnIndexOrThrow("title"); int contentIndex = cursor.getColumnIndexOrThrow("content"); int createdTimeIndex = cursor.getColumnIndexOrThrow("createdTime"); do { @SuppressWarnings("unused") String title = cursor.getString(titleIndex); @SuppressWarnings("unused") String content = cursor.getString(contentIndex); @SuppressWarnings("unused") Date createdTime = new Date(cursor.getLong(createdTimeIndex)); count.incrementAndGet(); } while (cursor.moveToNext()); } cursor.close(); long dbCount = longForQuery(db, "SELECT COUNT(*) FROM todo", null); if (dbCount != count.get()) { throw new AssertionError("unexpected get: " + count.get() + " != " + dbCount); } Log.d(TAG, "HandWritten/forEachAll count: " + count); }); return new Result("HandWritten/forEachAll", result); }).subscribeOn(Schedulers.io()).observeOn(AndroidSchedulers.mainThread()); }
From source file:com.github.brandtg.switchboard.TestMysqlLogServer.java
@Test public void testMysqlEventListener() throws Exception { try (Connection conn = DriverManager.getConnection(jdbc, "root", "")) { // Write some rows, so we have binlog entries PreparedStatement pstmt = conn.prepareStatement("INSERT INTO simple VALUES(?, ?)"); for (int i = 0; i < 10; i++) { pstmt.setInt(1, i);//from w ww.j a v a 2s. co m pstmt.setInt(2, i); pstmt.execute(); } } final AtomicInteger insertCount = new AtomicInteger(); final AtomicInteger beginCount = new AtomicInteger(); final AtomicInteger commitCount = new AtomicInteger(); final AtomicInteger rollbackCount = new AtomicInteger(); InetSocketAddress sourceAddress = new InetSocketAddress(8080); InetSocketAddress sinkAddress = new InetSocketAddress(9090); MysqlEventListener eventListener = new MysqlEventListener("test", sourceAddress, sinkAddress) { @Override public void onBegin(UUID sourceId, long transactionId) { beginCount.incrementAndGet(); } @Override public void onInsert(List<Row> rows) { insertCount.incrementAndGet(); } @Override public void onUpdate(List<Pair<Row>> rows) { } @Override public void onDelete(List<Row> rows) { } @Override public void onCommit() { commitCount.incrementAndGet(); } @Override public void onRollback() { rollbackCount.incrementAndGet(); } }; try { eventListener.start(); long startTime = System.currentTimeMillis(); long currentTime = startTime; do { // Once we've seen all writes, check expected state if (insertCount.get() == 10) { Assert.assertEquals(beginCount.get(), 10); Assert.assertEquals(commitCount.get(), 10); Assert.assertEquals(rollbackCount.get(), 0); return; } Thread.sleep(pollMillis); currentTime = System.currentTimeMillis(); } while (currentTime - startTime < timeoutMillis); } finally { eventListener.shutdown(); } Assert.fail("Timed out while polling"); }
From source file:com.couchbase.client.core.endpoint.view.ViewHandlerTest.java
@Test public void shouldFireKeepAlive() throws Exception { final AtomicInteger keepAliveEventCounter = new AtomicInteger(); final AtomicReference<ChannelHandlerContext> ctxRef = new AtomicReference(); ViewHandler testHandler = new ViewHandler(endpoint, responseRingBuffer, queue, false) { @Override// w w w . ja v a 2s. co m public void channelRegistered(ChannelHandlerContext ctx) throws Exception { super.channelRegistered(ctx); ctxRef.compareAndSet(null, ctx); } @Override protected void onKeepAliveFired(ChannelHandlerContext ctx, CouchbaseRequest keepAliveRequest) { assertEquals(1, keepAliveEventCounter.incrementAndGet()); } @Override protected void onKeepAliveResponse(ChannelHandlerContext ctx, CouchbaseResponse keepAliveResponse) { assertEquals(2, keepAliveEventCounter.incrementAndGet()); } }; EmbeddedChannel channel = new EmbeddedChannel(testHandler); //test idle event triggers a view keepAlive request and hook is called testHandler.userEventTriggered(ctxRef.get(), IdleStateEvent.FIRST_ALL_IDLE_STATE_EVENT); assertEquals(1, keepAliveEventCounter.get()); assertTrue(queue.peek() instanceof ViewHandler.KeepAliveRequest); ViewHandler.KeepAliveRequest keepAliveRequest = (ViewHandler.KeepAliveRequest) queue.peek(); //test responding to the request with http response is interpreted into a KeepAliveResponse and hook is called HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND); channel.writeInbound(response); ViewHandler.KeepAliveResponse keepAliveResponse = keepAliveRequest.observable() .cast(ViewHandler.KeepAliveResponse.class).timeout(1, TimeUnit.SECONDS).toBlocking().single(); assertEquals(2, keepAliveEventCounter.get()); assertEquals(ResponseStatus.NOT_EXISTS, keepAliveResponse.status()); }
From source file:com.android.sdklib.repository.legacy.remote.internal.DownloadCache.java
/** * Downloads a small file, typically XML manifests. * The current {@link Strategy} governs whether the file is served as-is * from the cache, potentially updated first or directly downloaded. * <p>/*w ww .ja va 2 s . com*/ * For large downloads (e.g. installable archives) please do not invoke the * cache and instead use the {@link #openDirectUrl} method. * <p> * For details on realm authentication and user/password handling, * see {@link HttpConfigurable#openHttpConnection(String)}. * * @param urlString the URL string to be opened. * @param monitor {@link ITaskMonitor} which is related to this URL * fetching. * @return Returns an {@link InputStream} holding the URL content. * Returns null if there's no content (e.g. resource not found.) * Returns null if the document is not cached and strategy is {@link Strategy#ONLY_CACHE}. * @throws IOException Exception thrown when there are problems retrieving * the URL or its content. * @throws ProcessCanceledException Exception thrown if the user cancels the * authentication dialog. */ @NonNull public InputStream openCachedUrl(@NonNull String urlString, @NonNull ITaskMonitor monitor) throws IOException { // Don't cache in direct mode. if (mStrategy == Strategy.DIRECT) { Pair<InputStream, URLConnection> result = openUrl(urlString, true /*needsMarkResetSupport*/, monitor, null /*headers*/); return result.getFirst(); } File cached = new File(mCacheRoot, getCacheFilename(urlString)); File info = new File(mCacheRoot, getInfoFilename(cached.getName())); boolean useCached = mFileOp.exists(cached); if (useCached && mStrategy == Strategy.FRESH_CACHE) { // Check whether the file should be served from the cache or // refreshed first. long cacheModifiedMs = mFileOp.lastModified(cached); /* last mod time in epoch/millis */ boolean checkCache = true; Properties props = readInfo(info); if (props == null) { // No properties, no chocolate for you. useCached = false; } else { long minExpiration = System.currentTimeMillis() - MIN_TIME_EXPIRED_MS; checkCache = cacheModifiedMs < minExpiration; if (!checkCache && DEBUG) { System.out.println(String.format("%s : Too fresh [%,d ms], not checking yet.", //$NON-NLS-1$ urlString, cacheModifiedMs - minExpiration)); } } if (useCached && checkCache) { assert props != null; // Right now we only support 200 codes and will requery all 404s. String code = props.getProperty(KEY_STATUS_CODE, ""); //$NON-NLS-1$ useCached = Integer.toString(HttpStatus.SC_OK).equals(code); if (!useCached && DEBUG) { System.out.println(String.format("%s : cache disabled by code %s", //$NON-NLS-1$ urlString, code)); } if (useCached) { // Do we have a valid Content-Length? If so, it should match the file size. try { long length = Long.parseLong(props.getProperty(HttpHeaders.CONTENT_LENGTH, "-1")); //$NON-NLS-1$ if (length >= 0) { useCached = length == mFileOp.length(cached); if (!useCached && DEBUG) { System.out.println( String.format("%s : cache disabled by length mismatch %d, expected %d", //$NON-NLS-1$ urlString, length, cached.length())); } } } catch (NumberFormatException ignore) { } } if (useCached) { // Do we have an ETag and/or a Last-Modified? String etag = props.getProperty(HttpHeaders.ETAG); String lastMod = props.getProperty(HttpHeaders.LAST_MODIFIED); if (etag != null || lastMod != null) { // Details on how to use them is defined at // http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.3.4 // Bottom line: // - if there's an ETag, it should be used first with an // If-None-Match header. That's a strong comparison for HTTP/1.1 servers. // - otherwise use a Last-Modified if an If-Modified-Since header exists. // In this case, we place both and the rules indicates a spec-abiding // server should strongly match ETag and weakly the Modified-Since. // TODO there are some servers out there which report ETag/Last-Mod // yet don't honor them when presented with a precondition. In this // case we should identify it in the reply and invalidate ETag support // for these servers and instead fallback on the pure-timeout case below. AtomicInteger statusCode = new AtomicInteger(0); InputStream is = null; List<Header> headers = new ArrayList<Header>(2); if (etag != null) { headers.add(new BasicHeader(HttpHeaders.IF_NONE_MATCH, etag)); } if (lastMod != null) { headers.add(new BasicHeader(HttpHeaders.IF_MODIFIED_SINCE, lastMod)); } if (!headers.isEmpty()) { is = downloadAndCache(urlString, monitor, cached, info, headers.toArray(new Header[headers.size()]), statusCode); } if (is != null && statusCode.get() == HttpStatus.SC_OK) { // The resource was modified, the server said there was something // new, which has been cached. We can return that to the caller. return is; } // If we get here, we should have is == null and code // could be: // - 304 for not-modified -- same resource, still available, in // which case we'll use the cached one. // - 404 -- resource doesn't exist anymore in which case there's // no point in retrying. // - For any other code, just retry a download. if (is != null) { try { is.close(); } catch (Exception ignore) { } is = null; } if (statusCode.get() == HttpStatus.SC_NOT_MODIFIED) { // Cached file was not modified. // Change its timestamp for the next MIN_TIME_EXPIRED_MS check. cached.setLastModified(System.currentTimeMillis()); // At this point useCached==true so we'll return // the cached file below. } else { // URL fetch returned something other than 200 or 304. // For 404, we're done, no need to check the server again. // For all other codes, we'll retry a download below. useCached = false; if (statusCode.get() == HttpStatus.SC_NOT_FOUND) { return null; } } } else { // If we don't have an Etag nor Last-Modified, let's use a // basic file timestamp and compare to a 1 hour threshold. long maxExpiration = System.currentTimeMillis() - MAX_TIME_EXPIRED_MS; useCached = cacheModifiedMs >= maxExpiration; if (!useCached && DEBUG) { System.out.println( String.format("[%1$s] cache disabled by timestamp %2$tD %2$tT < %3$tD %3$tT", //$NON-NLS-1$ urlString, cacheModifiedMs, maxExpiration)); } } } } } if (useCached) { // The caller needs an InputStream that supports the reset() operation. // The default FileInputStream does not, so load the file into a byte // array and return that. try { InputStream is = readCachedFile(cached); if (is != null) { if (DEBUG) { System.out.println(String.format("%s : Use cached file", urlString)); //$NON-NLS-1$ } return is; } } catch (IOException ignore) { } } if (!useCached && mStrategy == Strategy.ONLY_CACHE) { // We don't have a document to serve from the cache. if (DEBUG) { System.out.println(String.format("%s : file not in cache", urlString)); //$NON-NLS-1$ } return null; } // If we're not using the cache, try to remove the cache and download again. try { mFileOp.delete(cached); mFileOp.delete(info); } catch (SecurityException ignore) { } return downloadAndCache(urlString, monitor, cached, info, null /*headers*/, null /*statusCode*/); }
From source file:com.android.sdklib.internal.repository.DownloadCache.java
/** * Downloads a small file, typically XML manifests. * The current {@link Strategy} governs whether the file is served as-is * from the cache, potentially updated first or directly downloaded. * <p/>//from w w w . j a va2 s .c o m * For large downloads (e.g. installable archives) please do not invoke the * cache and instead use the {@link #openDirectUrl} method. * <p/> * For details on realm authentication and user/password handling, * check the underlying {@link UrlOpener#openUrl(String, boolean, ITaskMonitor, Header[])} * documentation. * * @param urlString the URL string to be opened. * @param monitor {@link ITaskMonitor} which is related to this URL * fetching. * @return Returns an {@link InputStream} holding the URL content. * Returns null if there's no content (e.g. resource not found.) * Returns null if the document is not cached and strategy is {@link Strategy#ONLY_CACHE}. * @throws IOException Exception thrown when there are problems retrieving * the URL or its content. * @throws CanceledByUserException Exception thrown if the user cancels the * authentication dialog. */ @NonNull public InputStream openCachedUrl(@NonNull String urlString, @NonNull ITaskMonitor monitor) throws IOException, CanceledByUserException { // Don't cache in direct mode. if (mStrategy == Strategy.DIRECT) { Pair<InputStream, HttpResponse> result = openUrl(urlString, true /*needsMarkResetSupport*/, monitor, null /*headers*/); return result.getFirst(); } File cached = new File(mCacheRoot, getCacheFilename(urlString)); File info = new File(mCacheRoot, getInfoFilename(cached.getName())); boolean useCached = mFileOp.exists(cached); if (useCached && mStrategy == Strategy.FRESH_CACHE) { // Check whether the file should be served from the cache or // refreshed first. long cacheModifiedMs = mFileOp.lastModified(cached); /* last mod time in epoch/millis */ boolean checkCache = true; Properties props = readInfo(info); if (props == null) { // No properties, no chocolate for you. useCached = false; } else { long minExpiration = System.currentTimeMillis() - MIN_TIME_EXPIRED_MS; checkCache = cacheModifiedMs < minExpiration; if (!checkCache && DEBUG) { System.out.println(String.format("%s : Too fresh [%,d ms], not checking yet.", //$NON-NLS-1$ urlString, cacheModifiedMs - minExpiration)); } } if (useCached && checkCache) { assert props != null; // Right now we only support 200 codes and will requery all 404s. String code = props.getProperty(KEY_STATUS_CODE, ""); //$NON-NLS-1$ useCached = Integer.toString(HttpStatus.SC_OK).equals(code); if (!useCached && DEBUG) { System.out.println(String.format("%s : cache disabled by code %s", //$NON-NLS-1$ urlString, code)); } if (useCached) { // Do we have a valid Content-Length? If so, it should match the file size. try { long length = Long.parseLong(props.getProperty(HttpHeaders.CONTENT_LENGTH, "-1")); //$NON-NLS-1$ if (length >= 0) { useCached = length == mFileOp.length(cached); if (!useCached && DEBUG) { System.out.println( String.format("%s : cache disabled by length mismatch %d, expected %d", //$NON-NLS-1$ urlString, length, cached.length())); } } } catch (NumberFormatException ignore) { } } if (useCached) { // Do we have an ETag and/or a Last-Modified? String etag = props.getProperty(HttpHeaders.ETAG); String lastMod = props.getProperty(HttpHeaders.LAST_MODIFIED); if (etag != null || lastMod != null) { // Details on how to use them is defined at // http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.3.4 // Bottom line: // - if there's an ETag, it should be used first with an // If-None-Match header. That's a strong comparison for HTTP/1.1 servers. // - otherwise use a Last-Modified if an If-Modified-Since header exists. // In this case, we place both and the rules indicates a spec-abiding // server should strongly match ETag and weakly the Modified-Since. // TODO there are some servers out there which report ETag/Last-Mod // yet don't honor them when presented with a precondition. In this // case we should identify it in the reply and invalidate ETag support // for these servers and instead fallback on the pure-timeout case below. AtomicInteger statusCode = new AtomicInteger(0); InputStream is = null; List<Header> headers = new ArrayList<Header>(2); if (etag != null) { headers.add(new BasicHeader(HttpHeaders.IF_NONE_MATCH, etag)); } if (lastMod != null) { headers.add(new BasicHeader(HttpHeaders.IF_MODIFIED_SINCE, lastMod)); } if (!headers.isEmpty()) { is = downloadAndCache(urlString, monitor, cached, info, headers.toArray(new Header[headers.size()]), statusCode); } if (is != null && statusCode.get() == HttpStatus.SC_OK) { // The resource was modified, the server said there was something // new, which has been cached. We can return that to the caller. return is; } // If we get here, we should have is == null and code // could be: // - 304 for not-modified -- same resource, still available, in // which case we'll use the cached one. // - 404 -- resource doesn't exist anymore in which case there's // no point in retrying. // - For any other code, just retry a download. if (is != null) { try { is.close(); } catch (Exception ignore) { } is = null; } if (statusCode.get() == HttpStatus.SC_NOT_MODIFIED) { // Cached file was not modified. // Change its timestamp for the next MIN_TIME_EXPIRED_MS check. cached.setLastModified(System.currentTimeMillis()); // At this point useCached==true so we'll return // the cached file below. } else { // URL fetch returned something other than 200 or 304. // For 404, we're done, no need to check the server again. // For all other codes, we'll retry a download below. useCached = false; if (statusCode.get() == HttpStatus.SC_NOT_FOUND) { return null; } } } else { // If we don't have an Etag nor Last-Modified, let's use a // basic file timestamp and compare to a 1 hour threshold. long maxExpiration = System.currentTimeMillis() - MAX_TIME_EXPIRED_MS; useCached = cacheModifiedMs >= maxExpiration; if (!useCached && DEBUG) { System.out.println( String.format("[%1$s] cache disabled by timestamp %2$tD %2$tT < %3$tD %3$tT", //$NON-NLS-1$ urlString, cacheModifiedMs, maxExpiration)); } } } } } if (useCached) { // The caller needs an InputStream that supports the reset() operation. // The default FileInputStream does not, so load the file into a byte // array and return that. try { InputStream is = readCachedFile(cached); if (is != null) { if (DEBUG) { System.out.println(String.format("%s : Use cached file", urlString)); //$NON-NLS-1$ } return is; } } catch (IOException ignore) { } } if (!useCached && mStrategy == Strategy.ONLY_CACHE) { // We don't have a document to serve from the cache. if (DEBUG) { System.out.println(String.format("%s : file not in cache", urlString)); //$NON-NLS-1$ } return null; } // If we're not using the cache, try to remove the cache and download again. try { mFileOp.delete(cached); mFileOp.delete(info); } catch (SecurityException ignore) { } return downloadAndCache(urlString, monitor, cached, info, null /*headers*/, null /*statusCode*/); }
From source file:org.dasein.cloud.azurepack.tests.compute.AzurePackVirtualMachineSupportTest.java
@Test public void terminateShouldSendCorrectRequest() throws CloudException, InternalException { final AtomicInteger deleteCount = new AtomicInteger(0); new GetOrListVirtualMachinesRequestExecutorMockUp() { @Mock/* ww w.jav a2s . co m*/ public void $init(CloudProvider provider, HttpClientBuilder clientBuilder, HttpUriRequest request, ResponseHandler handler) { String requestUri = request.getURI().toString(); if (request.getMethod().equals("DELETE") && requestUri .equals(String.format(VM_RESOURCES, ENDPOINT, ACCOUNT_NO, DATACENTER_ID, VM_1_ID))) { requestResourceType = 11; } else { super.$init(provider, clientBuilder, request, handler); } } @Mock public Object execute() { if (requestResourceType == 11) { deleteCount.incrementAndGet(); return ""; } else { return super.execute(); } } }; azurePackVirtualMachineSupport.terminate(VM_1_ID, "no reason"); assertEquals("terminate doesn't send DELETE request", 1, deleteCount.get()); }