List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet
public final int incrementAndGet()
From source file:org.dswarm.graph.resources.GDMResource.java
/** * multipart/mixed payload contains two body parts:<br/> * - first body part is the metadata (i.e. a JSON object with mandatory and obligatory properties for processing the * content):<br/>//from w ww . j a va2s. c o m * - "data_model_URI" (mandatory)<br/> * - "content_schema" (obligatory)<br/> * - "deprecate_missing_records" (obligatory)<br/> * - "record_class_uri" (mandatory for "deprecate_missing_records")<br/> * - second body part is the content (i.e. the real data) * * @param multiPart * @param database * @return * @throws DMPGraphException * @throws IOException */ @POST @Path("/put") @Consumes("multipart/mixed") public Response writeGDM(final MultiPart multiPart, @Context final GraphDatabaseService database, @Context final HttpHeaders requestHeaders) throws DMPGraphException, IOException { LOG.debug("try to process GDM statements and write them into graph db"); final String headers = readHeaders(requestHeaders); GDMResource.LOG.debug("try to process GDM statements and write them into graph db with\n{}", headers); final List<BodyPart> bodyParts = getBodyParts(multiPart); final ObjectNode metadata = getMetadata(bodyParts); final InputStream content = getContent(bodyParts); final Optional<String> optionalDataModelURI = getMetadataPart(DMPStatics.DATA_MODEL_URI_IDENTIFIER, metadata, true); final String dataModelURI = optionalDataModelURI.get(); final Optional<Boolean> optionalEnableVersioning = getEnableVersioningFlag(metadata); final boolean enableVersioning; if (optionalEnableVersioning.isPresent()) { enableVersioning = optionalEnableVersioning.get(); } else { enableVersioning = true; } final AtomicInteger counter = new AtomicInteger(0); final Tuple<Observable<Resource>, BufferedInputStream> modelTuple = getModel(content); final ConnectableObservable<Resource> model = modelTuple.v1() .doOnSubscribe(() -> LOG.debug("subscribed to model observable")).doOnNext(record -> { if (counter.incrementAndGet() == 1) { LOG.debug("read first records from model observable"); } }).doOnCompleted(() -> LOG.debug("read '{}' records from model observable", counter.get())) .onBackpressureBuffer(10000).publish(); final BufferedInputStream bis = modelTuple.v2(); LOG.debug("deserialized GDM statements that were serialised as JSON"); LOG.debug("try to write GDM statements into graph db"); final TransactionHandler tx = new Neo4jTransactionHandler(database); final NamespaceIndex namespaceIndex = new NamespaceIndex(database, tx); final String prefixedDataModelURI = namespaceIndex.createPrefixedURI(dataModelURI); final GDMNeo4jProcessor processor = new DataModelGDMNeo4jProcessor(database, tx, namespaceIndex, prefixedDataModelURI); LOG.info("process GDM statements and write them into graph db for data model '{}' ('{}')", dataModelURI, prefixedDataModelURI); try { final GDMNeo4jHandler handler = new DataModelGDMNeo4jHandler(processor, enableVersioning); final Observable<Resource> newModel; final Observable<Boolean> deprecateRecordsObservable; // note: versioning is enable by default if (enableVersioning) { LOG.info("do versioning with GDM statements for data model '{}' ('{}')", dataModelURI, prefixedDataModelURI); final Optional<ContentSchema> optionalPrefixedContentSchema = getPrefixedContentSchema(metadata, namespaceIndex); // = new resources model, since existing, modified resources were already written to the DB final Tuple<Observable<Resource>, Observable<Long>> result = calculateDeltaForDataModel(model, optionalPrefixedContentSchema, prefixedDataModelURI, database, handler, namespaceIndex); final Observable<Resource> deltaModel = result.v1().onBackpressureBuffer(10000); final Optional<Boolean> optionalDeprecateMissingRecords = getDeprecateMissingRecordsFlag(metadata); if (optionalDeprecateMissingRecords.isPresent() && optionalDeprecateMissingRecords.get()) { final Optional<String> optionalRecordClassURI = getMetadataPart( DMPStatics.RECORD_CLASS_URI_IDENTIFIER, metadata, false); if (!optionalRecordClassURI.isPresent()) { throw new DMPGraphException( "could not deprecate missing records, because no record class uri is given"); } // deprecate missing records in DB final Observable<Long> processedResources = result.v2(); deprecateRecordsObservable = deprecateMissingRecords(processedResources, optionalRecordClassURI.get(), dataModelURI, ((Neo4jUpdateHandler) handler.getHandler()).getVersionHandler().getLatestVersion(), processor); } else { deprecateRecordsObservable = Observable.empty(); } newModel = deltaModel; LOG.info("finished versioning with GDM statements for data model '{}' ('{}')", dataModelURI, prefixedDataModelURI); } else { newModel = model; deprecateRecordsObservable = Observable.empty(); } final AtomicInteger counter2 = new AtomicInteger(0); final ConnectableObservable<Resource> newModelLogged = newModel .doOnSubscribe(() -> LOG.debug("subscribed to new model observable")).doOnNext(record -> { if (counter2.incrementAndGet() == 1) { LOG.debug("read first records from new model observable"); } }).doOnCompleted(() -> LOG.debug("read '{}' records from new model observable", counter2.get())) .onBackpressureBuffer(10000).publish(); //if (deltaModel.size() > 0) { // parse model only, when model contains some resources final AtomicInteger counter3 = new AtomicInteger(0); final GDMParser parser = new GDMModelParser(newModelLogged); parser.setGDMHandler(handler); final Observable<Boolean> newResourcesObservable = parser.parse() .doOnSubscribe(() -> LOG.debug("subscribed to new resources observable")).doOnNext(record -> { if (counter3.incrementAndGet() == 1) { LOG.debug("read first records from new resources observable"); } }).doOnCompleted( () -> LOG.debug("read '{}' records from new resources observable", counter3.get())); try { final Observable<Boolean> connectedObservable = deprecateRecordsObservable .concatWith(newResourcesObservable); final BlockingObservable<Boolean> blockingObservable = connectedObservable.toBlocking(); final Iterator<Boolean> iterator = blockingObservable.getIterator(); newModelLogged.connect(); if (!enableVersioning) { model.connect(); } if (!iterator.hasNext()) { LOG.debug("model contains no resources, i.e., nothing needs to be written to the DB"); } while (iterator.hasNext()) { iterator.next(); } } catch (final RuntimeException e) { throw new DMPGraphException(e.getMessage(), e.getCause()); } final Long size = handler.getHandler().getCountedStatements(); if (enableVersioning && size > 0) { // update data model version only when some statements are written to the DB ((Neo4jUpdateHandler) handler.getHandler()).getVersionHandler().updateLatestVersion(); } handler.getHandler().closeTransaction(); bis.close(); content.close(); LOG.info( "finished writing {} resources with {} GDM statements (added {} relationships, added {} nodes (resources + bnodes + literals), added {} literals) into graph db for data model URI '{}' ('{}')", parser.parsedResources(), handler.getHandler().getCountedStatements(), handler.getHandler().getRelationshipsAdded(), handler.getHandler().getNodesAdded(), handler.getHandler().getCountedLiterals(), dataModelURI, prefixedDataModelURI); return Response.ok().build(); } catch (final Exception e) { processor.getProcessor().failTx(); bis.close(); content.close(); LOG.error("couldn't write GDM statements into graph db: {}", e.getMessage(), e); throw e; } }
From source file:de.bund.bfr.math.LeastSquaresOptimization.java
@Override public Result optimize(int nParameterSpace, int nOptimizations, boolean stopWhenSuccessful, Map<String, Double> minStartValues, Map<String, Double> maxStartValues, int maxIterations, DoubleConsumer progressListener, ExecutionContext exec) throws CanceledExecutionException { if (exec != null) { exec.checkCanceled();/* ww w . jav a 2s. co m*/ } progressListener.accept(0.0); List<ParamRange> ranges = MathUtils.getParamRanges(parameters, minStartValues, maxStartValues, nParameterSpace); RealVector targetVector = new ArrayRealVector(Doubles.toArray(targetValues)); List<StartValues> startValuesList = MathUtils.createStartValuesList(ranges, nOptimizations, values -> targetVector .getDistance(new ArrayRealVector(optimizerFunction.value(Doubles.toArray(values)))), progress -> progressListener.accept(0.5 * progress), exec); LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer(); Result result = new Result(); AtomicInteger count = new AtomicInteger(0); for (StartValues startValues : startValuesList) { if (exec != null) { exec.checkCanceled(); } progressListener.accept(0.5 * count.get() / startValuesList.size() + 0.5); try { LeastSquaresBuilder builder = createLeastSquaresBuilder(startValues.getValues(), maxIterations); builder.checker((iteration, previous, current) -> { double currentProgress = (double) iteration / (double) maxIterations; if (exec != null) { try { exec.checkCanceled(); } catch (CanceledExecutionException e) { return true; } } progressListener.accept(0.5 * (count.get() + currentProgress) / startValuesList.size() + 0.5); return iteration == maxIterations; }); LeastSquaresOptimizer.Optimum optimizerResults = optimizer.optimize(builder.build()); if (exec != null) { exec.checkCanceled(); } double cost = optimizerResults.getCost(); if (result.sse == null || cost * cost < result.sse) { result = getResults(optimizerResults); if (result.sse == 0.0) { break; } if (result.r2 != null && result.r2 > 0.0 && stopWhenSuccessful) { break; } } } catch (TooManyEvaluationsException | TooManyIterationsException | ConvergenceException e) { } count.incrementAndGet(); } return result; }
From source file:org.dasein.cloud.azure.tests.network.AzureVlanSupportTest.java
@Test public void removeVlanShouldPostCorrectRequest() throws CloudException, InternalException { final AtomicInteger putCount = new AtomicInteger(0); new MockUp<CloseableHttpClient>() { @Mock/*from w ww . j a v a 2 s . co m*/ public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException { if (request.getMethod().equals("GET") && VIRTUAL_NETWORK_SITES_URL.equals(request.getURI().toString())) { DaseinObjectToXmlEntity<VirtualNetworkSitesModel> daseinEntity = new DaseinObjectToXmlEntity<VirtualNetworkSitesModel>( createVirtualNetworkSitesModel(ID, NAME, REGION, CIDR, "Updating")); return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity, new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) }); } else if ("GET".equals(request.getMethod()) && NETWORK_CONFIG_URL.equals(request.getURI().toString())) { DaseinObjectToXmlEntity<NetworkConfigurationModel> daseinEntity = new DaseinObjectToXmlEntity<NetworkConfigurationModel>( createNetworkConfigurationModel(NAME, REGION, CIDR)); return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity, new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) }); } else if ("PUT".equals(request.getMethod())) { putCount.incrementAndGet(); NetworkConfigurationModel networkConfigurationModel = createNetworkConfigurationModel(null, null, null); assertPut(request, NETWORK_CONFIG_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") }, networkConfigurationModel); return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), null, new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) }); } else { throw new IOException("Request is not mocked"); } } }; vlanSupport.removeVlan(ID); assertEquals("removeVlan PUT network config should perform only 1 times", 1, putCount.get()); }
From source file:de.interactive_instruments.ShapeChange.Model.EA.EADocument.java
private void saveDiagrams(AtomicInteger imgIdCounter, String imgIdPrefix, java.io.File targetFolder, String relPathWithTargetFolder, PackageInfo pi) { if (!targetFolder.exists()) { targetFolder.mkdir();/*from w w w .j av a2s. c o m*/ } java.io.File pi_folder = new java.io.File(targetFolder, escapeFileName(pi.name())); if (!pi_folder.mkdir()) { result.addWarning(null, 32, pi_folder.getAbsolutePath()); } String newRelPathWithTargetFolder = relPathWithTargetFolder + "/" + escapeFileName(pi.name()); Project projectInterface = repository.GetProjectInterface(); PackageInfoEA piEa = fPackageById.get(pi.id()); List<Diagram> diagramList = getDiagramsOfPackage(piEa); String packageDiagramRegex = options.parameter("packageDiagramRegex"); if (packageDiagramRegex == null) { packageDiagramRegex = Options.IMAGE_INCLUSION_PACKAGE_REGEX; } String classDiagramRegex = options.parameter("classDiagramRegex"); if (classDiagramRegex == null) { classDiagramRegex = Options.IMAGE_INCLUSION_CLASS_REGEX; } String elementNameKeyForMatching = Options.ELEMENT_NAME_KEY_FOR_DIAGRAM_MATCHING; String regexForModelElement; for (Diagram d : diagramList) { String imgId = imgIdPrefix + imgIdCounter.incrementAndGet(); String imgFileName = escapeFileName(imgId + ".jpg"); String imgName = d.GetName(); java.io.File img = new java.io.File(pi_folder, imgFileName); String relPathToFile = newRelPathWithTargetFolder + "/" + imgFileName; String type = d.GetType(); /* * before saving the diagram, ensure that it is relevant for at * least one model element */ boolean relevantDiagram = false; if (type.equalsIgnoreCase("Package")) { regexForModelElement = packageDiagramRegex.replaceAll(elementNameKeyForMatching, pi.name()); if (imgName.matches(regexForModelElement)) { relevantDiagram = true; } } else if (type.equalsIgnoreCase("Logical")) { regexForModelElement = packageDiagramRegex.replaceAll(elementNameKeyForMatching, pi.name()); if (imgName.matches(regexForModelElement)) { relevantDiagram = true; } SortedSet<ClassInfo> clTmp = this.classes(pi); if (clTmp == null || clTmp.isEmpty()) { // no classes in package, thus the logical diagram cannot be // relevant } else if (relevantDiagram) { // we have already established that this is a relevant diagram } else { for (ClassInfo ci : clTmp) { // only process classes from this package if (ci.pkg() == pi) { regexForModelElement = classDiagramRegex.replaceAll(elementNameKeyForMatching, ci.name()); if (imgName.matches(regexForModelElement)) { relevantDiagram = true; // we established that this is a relevant diagram break; } } } } } else { // unsupported diagram type -> irrelevant } if (!relevantDiagram) { continue; } repository.OpenDiagram(d.GetDiagramID()); projectInterface.SaveDiagramImageToFile(img.getAbsolutePath()); repository.CloseDiagram(d.GetDiagramID()); BufferedImage bimg; int width = 400; int height = 400; try { bimg = ImageIO.read(img); width = bimg.getWidth(); height = bimg.getHeight(); } catch (IOException e) { result.addError(null, 33, imgName, pi.name()); e.printStackTrace(System.err); continue; } ImageMetadata imgMeta = new ImageMetadata(imgId, imgName, img, relPathToFile, width, height); if (type.equalsIgnoreCase("Package")) { // we already checked that the diagram is relevant for this // package addDiagramToPackage(pi, imgMeta); } else if (type.equalsIgnoreCase("Logical")) { regexForModelElement = packageDiagramRegex.replaceAll(elementNameKeyForMatching, pi.name()); if (imgName.matches(regexForModelElement)) { addDiagramToPackage(pi, imgMeta); } SortedSet<ClassInfo> clTmp = this.classes(pi); if (clTmp == null || clTmp.isEmpty()) { continue; } else { for (ClassInfo ci : clTmp) { // only process classes from this package if (ci.pkg() == pi) { regexForModelElement = classDiagramRegex.replaceAll(elementNameKeyForMatching, ci.name()); if (imgName.matches(regexForModelElement)) { addDiagramToClass(ci, imgMeta); } } } } } else { // unsupported diagram type - irrelevant } } SortedSet<PackageInfo> children = pi.containedPackages(); if (children != null) { for (PackageInfo piChild : children) { if (piChild.targetNamespace().equals(pi.targetNamespace())) { saveDiagrams(imgIdCounter, imgIdPrefix, pi_folder, newRelPathWithTargetFolder, piChild); } } } }
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java
@Test public void testKilledServerWithEnsembleProvider() throws Exception { final int CLIENT_QTY = 10; final Timing timing = new Timing(); final String PATH = "/foo/bar/lock"; ExecutorService executorService = Executors.newFixedThreadPool(CLIENT_QTY); ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService); TestingCluster cluster = new TestingCluster(3); try {//from w ww. j a v a2s. c om cluster.start(); final AtomicReference<String> connectionString = new AtomicReference<String>( cluster.getConnectString()); final EnsembleProvider provider = new EnsembleProvider() { @Override public void start() throws Exception { } @Override public String getConnectionString() { return connectionString.get(); } @Override public void close() throws IOException { } }; final Semaphore acquiredSemaphore = new Semaphore(0); final AtomicInteger acquireCount = new AtomicInteger(0); final CountDownLatch suspendedLatch = new CountDownLatch(CLIENT_QTY); for (int i = 0; i < CLIENT_QTY; ++i) { completionService.submit(new Callable<Void>() { @Override public Void call() throws Exception { CuratorFramework client = CuratorFrameworkFactory.builder().ensembleProvider(provider) .sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()) .retryPolicy(new ExponentialBackoffRetry(100, 3)).build(); try { final Semaphore suspendedSemaphore = new Semaphore(0); client.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { if ((newState == ConnectionState.SUSPENDED) || (newState == ConnectionState.LOST)) { suspendedLatch.countDown(); suspendedSemaphore.release(); } } }); client.start(); InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, PATH, 1); while (!Thread.currentThread().isInterrupted()) { Lease lease = null; try { lease = semaphore.acquire(); acquiredSemaphore.release(); acquireCount.incrementAndGet(); suspendedSemaphore.acquire(); } catch (Exception e) { // just retry } finally { if (lease != null) { acquireCount.decrementAndGet(); IOUtils.closeQuietly(lease); } } } } finally { IOUtils.closeQuietly(client); } return null; } }); } Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore)); Assert.assertEquals(1, acquireCount.get()); cluster.close(); timing.awaitLatch(suspendedLatch); timing.forWaiting().sleepABit(); Assert.assertEquals(0, acquireCount.get()); cluster = new TestingCluster(3); cluster.start(); connectionString.set(cluster.getConnectString()); timing.forWaiting().sleepABit(); Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore)); timing.forWaiting().sleepABit(); Assert.assertEquals(1, acquireCount.get()); } finally { executorService.shutdown(); executorService.awaitTermination(10, TimeUnit.SECONDS); executorService.shutdownNow(); IOUtils.closeQuietly(cluster); } }
From source file:com.nearinfinity.blur.thrift.AsyncClientPool.java
private TAsyncClient newClient(Class<?> c, Connection connection) throws InterruptedException { BlockingQueue<TAsyncClient> blockingQueue = getQueue(connection); TAsyncClient client = blockingQueue.poll(); if (client != null) { return client; }/*ww w . j a va 2 s . com*/ AtomicInteger counter; synchronized (_numberOfConnections) { counter = _numberOfConnections.get(connection.getHost()); if (counter == null) { counter = new AtomicInteger(); _numberOfConnections.put(connection.getHost(), counter); } } synchronized (counter) { int numOfConnections = counter.get(); while (numOfConnections >= _maxConnectionsPerHost) { client = blockingQueue.poll(_pollTime, TimeUnit.MILLISECONDS); if (client != null) { return client; } LOG.debug("Waiting for client number of connection [" + numOfConnections + "], max connection per host [" + _maxConnectionsPerHost + "]"); numOfConnections = counter.get(); } LOG.info("Creating a new client for [" + connection + "]"); String name = c.getName(); Constructor<?> constructor = _constructorCache.get(name); if (constructor == null) { String clientClassName = name.replace("$AsyncIface", "$AsyncClient"); try { Class<?> clazz = Class.forName(clientClassName); constructor = clazz.getConstructor(new Class[] { TProtocolFactory.class, TAsyncClientManager.class, TNonblockingTransport.class }); _constructorCache.put(name, constructor); } catch (Exception e) { throw new RuntimeException(e); } } try { TNonblockingSocket transport = newTransport(connection); client = (TAsyncClient) constructor .newInstance(new Object[] { _protocolFactory, _clientManager, transport }); client.setTimeout(_timeout); counter.incrementAndGet(); return client; } catch (Exception e) { throw new RuntimeException(e); } } }
From source file:automenta.climatenet.ImportKML.java
public void transformKML(String layer, String urlString, ElasticSpacetime st, final GISVisitor visitor) throws Exception { URL url = new URL(urlString); this.layer = layer; KmlReader reader = new KmlReader(url, proxy); reader.setRewriteStyleUrls(true);/*from w w w .j a v a 2 s . c om*/ SimpleField layerfield = new SimpleField("layer", Type.STRING); layerfield.setLength(32); final AtomicInteger exceptions = new AtomicInteger(); final Set<Class> exceptionClass = new HashSet(); serial = 1; path.clear(); visitor.start(layer); do { //for (IGISObject go; (go = reader.read()) != null;) { // do something with the gis object; e.g. check for placemark, NetworkLink, etc. try { IGISObject go = reader.read(); if (go == null) { break; } if (go instanceof DocumentStart) { DocumentStart ds = (DocumentStart) go; path.add(layer); } if (go instanceof ContainerEnd) { path.removeLast(); } if ((go instanceof ContainerStart) || (go instanceof Feature)) { serial++; } if (!visitor.on(go, getPath(path))) { break; } //add to the path after container start is processed if (go instanceof ContainerStart) { ContainerStart cs = (ContainerStart) go; //TODO startTime? //System.out.println(cs + " " + cs.getId()); String i = getSerial(layer, serial); path.add(i); } } catch (Throwable t) { System.err.println(t); exceptions.incrementAndGet(); exceptionClass.add(t.getClass()); } } while (true); // get list of network links that were retrieved from step above Set<URI> networkLinks = new HashSet(reader.getNetworkLinks()); reader.close(); if (!networkLinks.isEmpty()) { // Now import features from all referenced network links. // if Networklinks have nested network links then they will be added to end // of the list and processed one after another. The handleEvent() callback method // below will be called with each feature (i.e. Placemark, GroundOverlay, etc.) // as it is processed in the target KML resources. reader.importFromNetworkLinks(new KmlReader.ImportEventHandler() { public boolean handleEvent(UrlRef ref, IGISObject gisObj) { // if gisObj instanceOf Feature, GroundOverlay, etc. // do something with the gisObj // return false to abort the recursive network link parsing /*if (visited.contains(ref)) return false;*/ //System.out.println("Loading NetworkLink: " + ref + " " + gisObj); String r = ref.toString(); boolean pathChanged = false; if (!(!path.isEmpty()) && (path.getLast().equals(r))) { path.add(ref.toString()); pathChanged = true; } serial++; try { visitor.on(gisObj, getPath(path)); } catch (Throwable t) { System.err.println(t); } if (pathChanged) { path.removeLast(); } return true; } @Override public void handleError(URI uri, Exception excptn) { exceptions.incrementAndGet(); exceptionClass.add(excptn.getClass()); } }); } if (exceptions.get() > 0) { System.err.println(" Exceptions: " + exceptions + " of " + exceptionClass); } visitor.end(); }
From source file:org.apache.distributedlog.BKLogHandler.java
protected void readLogSegmentsFromStore(final Versioned<List<String>> logSegmentNames, final Comparator<LogSegmentMetadata> comparator, final LogSegmentFilter segmentFilter, final CompletableFuture<Versioned<List<LogSegmentMetadata>>> readResult) { Set<String> segmentsReceived = new HashSet<String>(); segmentsReceived.addAll(segmentFilter.filter(logSegmentNames.getValue())); Set<String> segmentsAdded; final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>()); final Map<String, LogSegmentMetadata> addedSegments = Collections .synchronizedMap(new HashMap<String, LogSegmentMetadata>()); Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived); segmentsAdded = segmentChanges.getLeft(); removedSegments.addAll(segmentChanges.getRight()); if (segmentsAdded.isEmpty()) { if (LOG.isTraceEnabled()) { LOG.trace("No segments added for {}.", getFullyQualifiedName()); }/* w w w . ja v a 2 s.c om*/ // update the cache before #getCachedLogSegments to return updateLogSegmentCache(removedSegments, addedSegments); List<LogSegmentMetadata> segmentList; try { segmentList = getCachedLogSegments(comparator); } catch (UnexpectedException e) { readResult.completeExceptionally(e); return; } readResult.complete(new Versioned<List<LogSegmentMetadata>>(segmentList, logSegmentNames.getVersion())); return; } final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size()); final AtomicInteger numFailures = new AtomicInteger(0); for (final String segment : segmentsAdded) { String logSegmentPath = logMetadata.getLogSegmentPath(segment); LogSegmentMetadata cachedSegment = metadataCache.get(logSegmentPath); if (null != cachedSegment) { addedSegments.put(segment, cachedSegment); completeReadLogSegmentsFromStore(removedSegments, addedSegments, comparator, readResult, logSegmentNames.getVersion(), numChildren, numFailures); continue; } metadataStore.getLogSegment(logSegmentPath).whenComplete(new FutureEventListener<LogSegmentMetadata>() { @Override public void onSuccess(LogSegmentMetadata result) { addedSegments.put(segment, result); complete(); } @Override public void onFailure(Throwable cause) { // LogSegmentNotFoundException exception is possible in two cases // 1. A log segment was deleted by truncation between the call to getChildren and read // attempt on the znode corresponding to the segment // 2. In progress segment has been completed => inprogress ZNode does not exist if (cause instanceof LogSegmentNotFoundException) { removedSegments.add(segment); complete(); } else { // fail fast if (1 == numFailures.incrementAndGet()) { readResult.completeExceptionally(cause); return; } } } private void complete() { completeReadLogSegmentsFromStore(removedSegments, addedSegments, comparator, readResult, logSegmentNames.getVersion(), numChildren, numFailures); } }); } }
From source file:org.dasein.cloud.azurepack.tests.compute.AzurePackVirtualMachineSupportTest.java
@Test(expected = InternalException.class) public void lauchShouldThrowExceptionIfLaunchFromVHDWithDefaultProduct() throws CloudException, InternalException { final AtomicInteger postCount = new AtomicInteger(0); new StartOrStopVirtualMachinesRequestExecutorMockUp("Start") { @Mock//from w w w . jav a2 s. c om public void $init(CloudProvider provider, HttpClientBuilder clientBuilder, HttpUriRequest request, ResponseHandler handler) { String requestUri = request.getURI().toString(); if (request.getMethod().equals("POST") && requestUri.equals(String.format(LIST_VM_RESOURCES, ENDPOINT, ACCOUNT_NO))) { requestResourceType = 21; WAPVirtualMachineModel wapVirtualMachineModel = new WAPVirtualMachineModel(); wapVirtualMachineModel.setName(VM_1_NAME); wapVirtualMachineModel.setCloudId(REGION); wapVirtualMachineModel.setStampId(DATACENTER_ID); wapVirtualMachineModel.setVirtualHardDiskId(VHD_1_ID); wapVirtualMachineModel.setHardwareProfileId(HWP_1_ID); List<WAPNewAdapterModel> adapters = new ArrayList<>(); WAPNewAdapterModel newAdapterModel = new WAPNewAdapterModel(); newAdapterModel.setVmNetworkName(VM_1_NETWORK_NAME); adapters.add(newAdapterModel); wapVirtualMachineModel.setNewVirtualNetworkAdapterInput(adapters); } else { super.$init(provider, clientBuilder, request, handler); } responseHandler = handler; } @Mock public Object execute() { if (requestResourceType == 21) { postCount.incrementAndGet(); return mapFromModel(this.responseHandler, createWAPVirtualMachineModel()); } else { return super.execute(); } } }; VMLaunchOptions vmLaunchOptions = VMLaunchOptions.getInstance("default", VHD_1_ID, VM_1_NAME, VM_1_DESCRIPTION); vmLaunchOptions.inVlan(null, DATACENTER_ID, VM_1_NETWORK_ID); azurePackVirtualMachineSupport.launch(vmLaunchOptions); }