List of usage examples for java.util.concurrent.atomic AtomicReference AtomicReference
public AtomicReference()
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java
@Override public GetOperation[] pendChanges(final String workspaceName, final String ownerName, final ChangeRequest[] changes, final PendChangesOptions pendChangesOptions, final SupportedFeatures supportedFeatures, final AtomicReference<Failure[]> failures, final String[] itemPropertyFilters, final String[] itemAttributeFilters, final boolean updateDisk, final AtomicBoolean onlineOperation, final AtomicReference<ChangePendedFlags> changePendedFlags) { onlineOperation.set(false);/*from w w w. j a v a 2s . c o m*/ // set this to none for local workspaces, if the call reaches the server // the flag will get overwritten changePendedFlags.set(ChangePendedFlags.NONE); int unlockCount = 0; final Workspace localWorkspace = getLocalWorkspace(workspaceName, ownerName); if (localWorkspace != null) { boolean attributeChange = false; boolean nonExecuteSymlinkBitPropertyChange = false; if (null != itemAttributeFilters && itemAttributeFilters.length > 0) { attributeChange = true; } // If the property filters are only for the executable bit, we can // handle that locally, otherwise we must go to the server. if (null != itemPropertyFilters && itemPropertyFilters.length > 0) { for (final String filter : itemPropertyFilters) { /* * Not using wildcard matching here: just because a wildcard * _does_ match the executable key _doesn't_ mean it * wouldn't match others on the server. So only consider a * direct match against the executable key to keep * processing locally. */ if (PropertyValue.comparePropertyNames(PropertyConstants.EXECUTABLE_KEY, filter) != 0 && PropertyValue.comparePropertyNames(PropertyConstants.SYMBOLIC_KEY, filter) != 0) { nonExecuteSymlinkBitPropertyChange = true; break; } } } RequestType requestType = RequestType.NONE; boolean requestingLock = false; for (final ChangeRequest changeRequest : changes) { if (RequestType.NONE == requestType) { requestType = changeRequest.getRequestType(); } else if (requestType != changeRequest.getRequestType()) { // TODO: Move string from server assembly throw new VersionControlException("Not all changes had the same request type"); //$NON-NLS-1$ } // If the caller is requesting a lock, then the call is a server // call, unless the user is performing an add and the LockLevel // is None. // Is it possible to have different locklevels on different // ChangeRequest objects? if (changeRequest.getLockLevel() != LockLevel.UNCHANGED && !(changeRequest.getLockLevel() == LockLevel.NONE && changeRequest.getRequestType() == RequestType.ADD)) { requestingLock = true; } if (changeRequest.getLockLevel() == LockLevel.NONE && changeRequest.getRequestType().equals(RequestType.LOCK)) { unlockCount++; } } final boolean silent = pendChangesOptions.contains(PendChangesOptions.SILENT); if (!requestingLock && !attributeChange && !nonExecuteSymlinkBitPropertyChange) { if (requestType == RequestType.ADD || requestType == RequestType.EDIT || requestType == RequestType.DELETE || requestType == RequestType.RENAME || requestType == RequestType.PROPERTY) { final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(localWorkspace); try { final AtomicReference<Failure[]> delegateFailures = new AtomicReference<Failure[]>(); final AtomicReference<GetOperation[]> toReturn = new AtomicReference<GetOperation[]>(); final RequestType transactionRequestType = requestType; transaction.execute(new AllTablesTransaction() { @Override public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv, final LocalPendingChangesTable pc) { if (transactionRequestType == RequestType.ADD) { toReturn.set(LocalDataAccessLayer.pendAdd(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.EDIT) { toReturn.set(LocalDataAccessLayer.pendEdit(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.DELETE) { toReturn.set(LocalDataAccessLayer.pendDelete(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.RENAME) { final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false); toReturn.set(LocalDataAccessLayer.pendRename(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, onlineOperationRequired, itemPropertyFilters)); if (onlineOperationRequired.get()) { toReturn.set(null); transaction.abort(); } else if (updateDisk) { // we don't want to file a conflict // while offline, so we check up front. for (final GetOperation getOp : toReturn.get()) { if (getOp.getTargetLocalItem() != null && !LocalPath.equals(getOp.getSourceLocalItem(), getOp.getTargetLocalItem()) && new File(getOp.getTargetLocalItem()).exists()) { throw new VersionControlException(MessageFormat.format( //@formatter:off Messages.getString( "WebServiceLayerLocalWorkspaces.FileExistsFormat"), //$NON-NLS-1$ //@formatter:on getOp.getTargetLocalItem())); } } } } if (transactionRequestType == RequestType.PROPERTY) { final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false); toReturn.set(LocalDataAccessLayer.pendPropertyChange(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, onlineOperationRequired, itemPropertyFilters)); if (onlineOperationRequired.get()) { toReturn.set(null); transaction.abort(); } } } }); if (toReturn.get() != null) { // Offline operation successfully completed. failures.set(delegateFailures.get()); return toReturn.get(); } } finally { try { transaction.close(); } catch (final IOException e) { throw new VersionControlException(e); } } } else if (requestType == RequestType.BRANCH || requestType == RequestType.UNDELETE || requestType == RequestType.LOCK) { // Forward to server } else { // TODO: Remove this when all RequestTypes are supported // here. throw new VersionControlException("Not currently implemented for local workspaces"); //$NON-NLS-1$ } } } if (null != localWorkspace) { // if we only have requests for unlocking, move on if the reconcile // fails this is needed for unlock other final Workspace w = reconcileIfLocal(workspaceName, ownerName, false, false, unlockCount == changes.length, null); // Lock the workspace which will receive the pending changes final WorkspaceLock lock = lockIfLocal(w); try { final GetOperation[] toReturn; try { if (getServiceLevel().getValue() >= WebServiceLevel.TFS_2012_QU1.getValue()) { final _Repository5Soap_PendChangesInLocalWorkspaceResponse response = getRepository5() .pendChangesInLocalWorkspace(workspaceName, ownerName, (_ChangeRequest[]) WrapperUtils.unwrap(_ChangeRequest.class, changes), pendChangesOptions.toIntFlags(), supportedFeatures.toIntFlags(), itemPropertyFilters, itemAttributeFilters, VersionControlConstants.MAX_SERVER_PATH_SIZE); toReturn = (GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getPendChangesInLocalWorkspaceResult()); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); } else { final _Repository4Soap_PendChangesInLocalWorkspaceResponse response = getRepository4() .pendChangesInLocalWorkspace(workspaceName, ownerName, (_ChangeRequest[]) WrapperUtils.unwrap(_ChangeRequest.class, changes), pendChangesOptions.toIntFlags(), supportedFeatures.toIntFlags(), itemPropertyFilters, itemAttributeFilters); toReturn = (GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getPendChangesInLocalWorkspaceResult()); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); } } catch (final ProxyException e) { throw VersionControlExceptionMapper.map(e); } syncWorkingFoldersIfNecessary(w, changePendedFlags.get()); syncPendingChangesIfLocal(w, toReturn, itemPropertyFilters); if (RequestType.ADD == changes[0].getRequestType()) { // The client does not process the getops returned from a // PendAdd call. Because the server has created local // version rows for us, we need to update the local version // table to contain these rows too. LocalDataAccessLayer.afterAdd(localWorkspace, toReturn); // When a pending add is created, the item on disk is not // touched; so we need to inform the scanner that the item // is invalidated so it is re-scanned. Rather than go // through the local paths on which adds were pended, we'll // invalidate the workspace. This is not a common code path. localWorkspace.getWorkspaceWatcher().markPathChanged(""); //$NON-NLS-1$ } onlineOperation.set(true); return toReturn; } finally { if (lock != null) { lock.close(); } } } else { return super.pendChanges(workspaceName, ownerName, changes, pendChangesOptions, supportedFeatures, failures, itemPropertyFilters, itemAttributeFilters, updateDisk, onlineOperation, changePendedFlags); } }
From source file:android.webkit.cts.WebViewTest.java
private Picture waitForPictureToHaveColor(int color, final TestPictureListener listener) throws Throwable { final int MAX_ON_NEW_PICTURE_ITERATIONS = 5; final AtomicReference<Picture> pictureRef = new AtomicReference<Picture>(); for (int i = 0; i < MAX_ON_NEW_PICTURE_ITERATIONS; i++) { final int oldCallCount = listener.callCount; runTestOnUiThread(new Runnable() { @Override//from w w w . j av a2 s . c o m public void run() { pictureRef.set(mWebView.capturePicture()); } }); if (isPictureFilledWithColor(pictureRef.get(), color)) break; new PollingCheck(TEST_TIMEOUT) { @Override protected boolean check() { return listener.callCount > oldCallCount; } }.run(); } return pictureRef.get(); }
From source file:de.schildbach.pte.AbstractHafasLegacyProvider.java
private QueryTripsResult queryTripsXml(final Context previousContext, final boolean later, final CharSequence conReq, final Location from, final @Nullable Location via, final Location to) throws IOException { final String request = wrapReqC(conReq, null); final HttpUrl endpoint = extXmlEndpoint != null ? extXmlEndpoint : queryEndpoint.newBuilder().addPathSegment(apiLanguage).build(); final AtomicReference<QueryTripsResult> result = new AtomicReference<>(); httpClient.getInputStream(new HttpClient.Callback() { @Override//from w w w . j a v a 2 s. c om public void onSuccessful(final CharSequence bodyPeek, final ResponseBody body) throws IOException { try { final XmlPullParserFactory factory = XmlPullParserFactory .newInstance(System.getProperty(XmlPullParserFactory.PROPERTY_NAME), null); final XmlPullParser pp = factory.newPullParser(); pp.setInput(body.charStream()); XmlPullUtil.require(pp, "ResC"); final String product = XmlPullUtil.attr(pp, "prod").split(" ")[0]; final ResultHeader header = new ResultHeader(network, SERVER_PRODUCT, product, null, 0, null); XmlPullUtil.enter(pp, "ResC"); if (XmlPullUtil.test(pp, "Err")) { final String code = XmlPullUtil.attr(pp, "code"); if (code.equals("I3")) { result.set(new QueryTripsResult(header, QueryTripsResult.Status.INVALID_DATE)); return; } if (code.equals("F1")) { result.set(new QueryTripsResult(header, QueryTripsResult.Status.SERVICE_DOWN)); return; } throw new IllegalStateException("error " + code + " " + XmlPullUtil.attr(pp, "text")); } XmlPullUtil.enter(pp, "ConRes"); if (XmlPullUtil.test(pp, "Err")) { final String code = XmlPullUtil.attr(pp, "code"); log.debug("Hafas error: {}", code); if (code.equals("K9260")) { // Unknown departure station result.set(new QueryTripsResult(header, QueryTripsResult.Status.UNKNOWN_FROM)); return; } if (code.equals("K9280")) { // Unknown intermediate station result.set(new QueryTripsResult(header, QueryTripsResult.Status.UNKNOWN_VIA)); return; } if (code.equals("K9300")) { // Unknown arrival station result.set(new QueryTripsResult(header, QueryTripsResult.Status.UNKNOWN_TO)); return; } if (code.equals("K9360")) { // Date outside of the timetable period result.set(new QueryTripsResult(header, QueryTripsResult.Status.INVALID_DATE)); return; } if (code.equals("K9380")) { // Dep./Arr./Intermed. or equivalent station defined more than once result.set(new QueryTripsResult(header, QueryTripsResult.Status.TOO_CLOSE)); return; } if (code.equals("K895")) { // Departure/Arrival are too near result.set(new QueryTripsResult(header, QueryTripsResult.Status.TOO_CLOSE)); return; } if (code.equals("K9220")) { // Nearby to the given address stations could not be found result.set(new QueryTripsResult(header, QueryTripsResult.Status.UNRESOLVABLE_ADDRESS)); return; } if (code.equals("K9240")) { // Internal error result.set(new QueryTripsResult(header, QueryTripsResult.Status.SERVICE_DOWN)); return; } if (code.equals("K890")) { // No connections found result.set(new QueryTripsResult(header, QueryTripsResult.Status.NO_TRIPS)); return; } if (code.equals("K891")) { // No route found (try entering an intermediate station) result.set(new QueryTripsResult(header, QueryTripsResult.Status.NO_TRIPS)); return; } if (code.equals("K899")) { // An error occurred result.set(new QueryTripsResult(header, QueryTripsResult.Status.SERVICE_DOWN)); return; } if (code.equals("K1:890")) { // Unsuccessful or incomplete search (direction: forward) result.set(new QueryTripsResult(header, QueryTripsResult.Status.NO_TRIPS)); return; } if (code.equals("K2:890")) { // Unsuccessful or incomplete search (direction: backward) result.set(new QueryTripsResult(header, QueryTripsResult.Status.NO_TRIPS)); return; } throw new IllegalStateException("error " + code + " " + XmlPullUtil.attr(pp, "text")); } // H9380 Dep./Arr./Intermed. or equivalent stations defined more than once // H9360 Error in data field // H9320 The input is incorrect or incomplete // H9300 Unknown arrival station // H9280 Unknown intermediate station // H9260 Unknown departure station // H9250 Part inquiry interrupted // H9240 Unsuccessful search // H9230 An internal error occurred // H9220 Nearby to the given address stations could not be found // H900 Unsuccessful or incomplete search (timetable change) // H892 Inquiry too complex (try entering less intermediate stations) // H891 No route found (try entering an intermediate station) // H890 Unsuccessful search. // H500 Because of too many trains the connection is not complete // H460 One or more stops are passed through multiple times. // H455 Prolonged stop // H410 Display may be incomplete due to change of timetable // H390 Departure/Arrival replaced by an equivalent station // H895 Departure/Arrival are too near // H899 Unsuccessful or incomplete search (timetable change final String c = XmlPullUtil.optValueTag(pp, "ConResCtxt", null); final Context context; if (previousContext == null) context = new Context(c, c, 0); else if (later) context = new Context(c, previousContext.earlierContext, previousContext.sequence + 1); else context = new Context(previousContext.laterContext, c, previousContext.sequence + 1); XmlPullUtil.enter(pp, "ConnectionList"); final List<Trip> trips = new ArrayList<>(); while (XmlPullUtil.test(pp, "Connection")) { final String id = context.sequence + "/" + XmlPullUtil.attr(pp, "id"); XmlPullUtil.enter(pp, "Connection"); while (pp.getName().equals("RtStateList")) XmlPullUtil.next(pp); XmlPullUtil.enter(pp, "Overview"); final Calendar currentDate = new GregorianCalendar(timeZone); currentDate.clear(); parseDate(currentDate, XmlPullUtil.valueTag(pp, "Date")); XmlPullUtil.enter(pp, "Departure"); XmlPullUtil.enter(pp, "BasicStop"); while (pp.getName().equals("StAttrList")) XmlPullUtil.next(pp); final Location departureLocation = parseLocation(pp); XmlPullUtil.enter(pp, "Dep"); XmlPullUtil.skipExit(pp, "Dep"); final int[] capacity; if (XmlPullUtil.test(pp, "StopPrognosis")) { XmlPullUtil.enter(pp, "StopPrognosis"); XmlPullUtil.optSkip(pp, "Arr"); XmlPullUtil.optSkip(pp, "Dep"); XmlPullUtil.enter(pp, "Status"); XmlPullUtil.skipExit(pp, "Status"); final int capacity1st = Integer .parseInt(XmlPullUtil.optValueTag(pp, "Capacity1st", "0")); final int capacity2nd = Integer .parseInt(XmlPullUtil.optValueTag(pp, "Capacity2nd", "0")); if (capacity1st > 0 || capacity2nd > 0) capacity = new int[] { capacity1st, capacity2nd }; else capacity = null; XmlPullUtil.skipExit(pp, "StopPrognosis"); } else { capacity = null; } XmlPullUtil.skipExit(pp, "BasicStop"); XmlPullUtil.skipExit(pp, "Departure"); XmlPullUtil.enter(pp, "Arrival"); XmlPullUtil.enter(pp, "BasicStop"); while (pp.getName().equals("StAttrList")) XmlPullUtil.next(pp); final Location arrivalLocation = parseLocation(pp); XmlPullUtil.skipExit(pp, "BasicStop"); XmlPullUtil.skipExit(pp, "Arrival"); final int numTransfers = Integer.parseInt(XmlPullUtil.valueTag(pp, "Transfers")); XmlPullUtil.skipExit(pp, "Overview"); final List<Trip.Leg> legs = new ArrayList<>(4); XmlPullUtil.enter(pp, "ConSectionList"); final Calendar time = new GregorianCalendar(timeZone); while (XmlPullUtil.test(pp, "ConSection")) { XmlPullUtil.enter(pp, "ConSection"); // departure XmlPullUtil.enter(pp, "Departure"); XmlPullUtil.enter(pp, "BasicStop"); while (pp.getName().equals("StAttrList")) XmlPullUtil.next(pp); final Location sectionDepartureLocation = parseLocation(pp); XmlPullUtil.optSkip(pp, "Arr"); XmlPullUtil.enter(pp, "Dep"); time.setTimeInMillis(currentDate.getTimeInMillis()); parseTime(time, XmlPullUtil.valueTag(pp, "Time")); final Date departureTime = time.getTime(); final Position departurePos = parsePlatform(pp); XmlPullUtil.skipExit(pp, "Dep"); XmlPullUtil.skipExit(pp, "BasicStop"); XmlPullUtil.skipExit(pp, "Departure"); // journey final Line line; Location destination = null; List<Stop> intermediateStops = null; final String tag = pp.getName(); if (tag.equals("Journey")) { XmlPullUtil.enter(pp, "Journey"); while (pp.getName().equals("JHandle")) XmlPullUtil.next(pp); XmlPullUtil.enter(pp, "JourneyAttributeList"); boolean wheelchairAccess = false; String name = null; String category = null; String shortCategory = null; while (XmlPullUtil.test(pp, "JourneyAttribute")) { XmlPullUtil.enter(pp, "JourneyAttribute"); XmlPullUtil.require(pp, "Attribute"); final String attrName = XmlPullUtil.attr(pp, "type"); final String code = XmlPullUtil.optAttr(pp, "code", null); XmlPullUtil.enter(pp, "Attribute"); final Map<String, String> attributeVariants = parseAttributeVariants(pp); XmlPullUtil.skipExit(pp, "Attribute"); XmlPullUtil.skipExit(pp, "JourneyAttribute"); if ("bf".equals(code)) { wheelchairAccess = true; } else if ("NAME".equals(attrName)) { name = attributeVariants.get("NORMAL"); } else if ("CATEGORY".equals(attrName)) { shortCategory = attributeVariants.get("SHORT"); category = attributeVariants.get("NORMAL"); // longCategory = attributeVariants.get("LONG"); } else if ("DIRECTION".equals(attrName)) { final String[] destinationPlaceAndName = splitStationName( attributeVariants.get("NORMAL")); destination = new Location(LocationType.ANY, null, destinationPlaceAndName[0], destinationPlaceAndName[1]); } } XmlPullUtil.skipExit(pp, "JourneyAttributeList"); if (XmlPullUtil.test(pp, "PassList")) { intermediateStops = new LinkedList<>(); XmlPullUtil.enter(pp, "PassList"); while (XmlPullUtil.test(pp, "BasicStop")) { XmlPullUtil.enter(pp, "BasicStop"); while (XmlPullUtil.test(pp, "StAttrList")) XmlPullUtil.next(pp); final Location location = parseLocation(pp); if (location.id != sectionDepartureLocation.id) { Date stopArrivalTime = null; Date stopDepartureTime = null; Position stopArrivalPosition = null; Position stopDeparturePosition = null; if (XmlPullUtil.test(pp, "Arr")) { XmlPullUtil.enter(pp, "Arr"); time.setTimeInMillis(currentDate.getTimeInMillis()); parseTime(time, XmlPullUtil.valueTag(pp, "Time")); stopArrivalTime = time.getTime(); stopArrivalPosition = parsePlatform(pp); XmlPullUtil.skipExit(pp, "Arr"); } if (XmlPullUtil.test(pp, "Dep")) { XmlPullUtil.enter(pp, "Dep"); time.setTimeInMillis(currentDate.getTimeInMillis()); parseTime(time, XmlPullUtil.valueTag(pp, "Time")); stopDepartureTime = time.getTime(); stopDeparturePosition = parsePlatform(pp); XmlPullUtil.skipExit(pp, "Dep"); } intermediateStops.add(new Stop(location, stopArrivalTime, stopArrivalPosition, stopDepartureTime, stopDeparturePosition)); } XmlPullUtil.skipExit(pp, "BasicStop"); } XmlPullUtil.skipExit(pp, "PassList"); } XmlPullUtil.skipExit(pp, "Journey"); if (category == null) category = shortCategory; line = parseLine(category, name, wheelchairAccess); } else if (tag.equals("Walk") || tag.equals("Transfer") || tag.equals("GisRoute")) { XmlPullUtil.enter(pp); XmlPullUtil.enter(pp, "Duration"); XmlPullUtil.skipExit(pp, "Duration"); XmlPullUtil.skipExit(pp); line = null; } else { throw new IllegalStateException("cannot handle: " + pp.getName()); } // polyline final List<Point> path; if (XmlPullUtil.test(pp, "Polyline")) { path = new LinkedList<>(); XmlPullUtil.enter(pp, "Polyline"); while (XmlPullUtil.test(pp, "Point")) { final int x = XmlPullUtil.intAttr(pp, "x"); final int y = XmlPullUtil.intAttr(pp, "y"); path.add(new Point(y, x)); XmlPullUtil.next(pp); } XmlPullUtil.skipExit(pp, "Polyline"); } else { path = null; } // arrival XmlPullUtil.enter(pp, "Arrival"); XmlPullUtil.enter(pp, "BasicStop"); while (pp.getName().equals("StAttrList")) XmlPullUtil.next(pp); final Location sectionArrivalLocation = parseLocation(pp); XmlPullUtil.enter(pp, "Arr"); time.setTimeInMillis(currentDate.getTimeInMillis()); parseTime(time, XmlPullUtil.valueTag(pp, "Time")); final Date arrivalTime = time.getTime(); final Position arrivalPos = parsePlatform(pp); XmlPullUtil.skipExit(pp, "Arr"); XmlPullUtil.skipExit(pp, "BasicStop"); XmlPullUtil.skipExit(pp, "Arrival"); // remove last intermediate if (intermediateStops != null) if (!intermediateStops.isEmpty()) if (!intermediateStops.get(intermediateStops.size() - 1).location .equals(sectionArrivalLocation)) intermediateStops.remove(intermediateStops.size() - 1); XmlPullUtil.skipExit(pp, "ConSection"); if (line != null) { final Stop departure = new Stop(sectionDepartureLocation, true, departureTime, null, departurePos, null); final Stop arrival = new Stop(sectionArrivalLocation, false, arrivalTime, null, arrivalPos, null); legs.add(new Trip.Public(line, destination, departure, arrival, intermediateStops, path, null)); } else { if (legs.size() > 0 && legs.get(legs.size() - 1) instanceof Trip.Individual) { final Trip.Individual lastIndividualLeg = (Trip.Individual) legs .remove(legs.size() - 1); legs.add(new Trip.Individual(Trip.Individual.Type.WALK, lastIndividualLeg.departure, lastIndividualLeg.departureTime, sectionArrivalLocation, arrivalTime, null, 0)); } else { legs.add( new Trip.Individual(Trip.Individual.Type.WALK, sectionDepartureLocation, departureTime, sectionArrivalLocation, arrivalTime, null, 0)); } } } XmlPullUtil.skipExit(pp, "ConSectionList"); XmlPullUtil.skipExit(pp, "Connection"); trips.add(new Trip(id, departureLocation, arrivalLocation, legs, null, capacity, numTransfers)); } XmlPullUtil.skipExit(pp, "ConnectionList"); result.set(new QueryTripsResult(header, null, from, via, to, context, trips)); } catch (final XmlPullParserException x) { throw new ParserException("cannot parse xml: " + bodyPeek, x); } } }, endpoint, request, "application/xml", null); return result.get(); }
From source file:de.schildbach.pte.AbstractEfaProvider.java
private NearbyLocationsResult nearbyStationsRequest(final String stationId, final int maxLocations) throws IOException { final HttpUrl.Builder url = departureMonitorEndpoint.newBuilder(); appendCommonRequestParams(url, "XML"); url.addEncodedQueryParameter("type_dm", "stop"); url.addEncodedQueryParameter("name_dm", ParserUtils.urlEncode(normalizeStationId(stationId), requestUrlEncoding)); url.addEncodedQueryParameter("itOptionsActive", "1"); url.addEncodedQueryParameter("ptOptionsActive", "1"); if (useProxFootSearch) url.addEncodedQueryParameter("useProxFootSearch", "1"); url.addEncodedQueryParameter("mergeDep", "1"); url.addEncodedQueryParameter("useAllStops", "1"); url.addEncodedQueryParameter("mode", "direct"); final AtomicReference<NearbyLocationsResult> result = new AtomicReference<>(); final HttpClient.Callback callback = new HttpClient.Callback() { @Override/*from w w w .jav a 2 s . c o m*/ public void onSuccessful(final CharSequence bodyPeek, final ResponseBody body) throws IOException { try { final XmlPullParser pp = parserFactory.newPullParser(); pp.setInput(body.byteStream(), null); // Read encoding from XML declaration final ResultHeader header = enterItdRequest(pp); XmlPullUtil.enter(pp, "itdDepartureMonitorRequest"); final AtomicReference<Location> ownStation = new AtomicReference<>(); final List<Location> stations = new ArrayList<>(); final String nameState = processItdOdv(pp, "dm", new ProcessItdOdvCallback() { @Override public void location(final String nameState, final Location location, final int matchQuality) { if (location.type == LocationType.STATION) { if ("identified".equals(nameState)) ownStation.set(location); else if ("assigned".equals(nameState)) stations.add(location); } else { throw new IllegalStateException("cannot handle: " + location.type); } } }); if ("notidentified".equals(nameState)) { result.set(new NearbyLocationsResult(header, NearbyLocationsResult.Status.INVALID_ID)); return; } if (ownStation.get() != null && !stations.contains(ownStation)) stations.add(ownStation.get()); if (maxLocations == 0 || maxLocations >= stations.size()) result.set(new NearbyLocationsResult(header, stations)); else result.set(new NearbyLocationsResult(header, stations.subList(0, maxLocations))); } catch (final XmlPullParserException x) { throw new ParserException("cannot parse xml: " + bodyPeek, x); } } }; if (httpPost) httpClient.getInputStream(callback, url.build(), url.build().encodedQuery(), "application/x-www-form-urlencoded", httpReferer); else httpClient.getInputStream(callback, url.build(), httpReferer); return result.get(); }
From source file:blusunrize.immersiveengineering.api.ApiUtils.java
public static Connection raytraceWires(World world, Vec3d start, Vec3d end, @Nullable Connection ignored) { Map<BlockPos, ImmersiveNetHandler.BlockWireInfo> inDim = ImmersiveNetHandler.INSTANCE.blockWireMap .lookup(world.provider.getDimension()); AtomicReference<Connection> ret = new AtomicReference<>(); AtomicDouble minDistSq = new AtomicDouble(Double.POSITIVE_INFINITY); if (inDim != null) { Utils.rayTrace(start, end, world, (pos) -> { if (inDim.containsKey(pos)) { ImmersiveNetHandler.BlockWireInfo info = inDim.get(pos); for (int i = 0; i < 2; i++) { Set<Triple<Connection, Vec3d, Vec3d>> conns = i == 0 ? info.in : info.near; for (Triple<Connection, Vec3d, Vec3d> conn : conns) { Connection c = conn.getLeft(); if (ignored == null || !c.hasSameConnectors(ignored)) { Vec3d startRelative = start.add(-pos.getX(), -pos.getY(), -pos.getZ()); Vec3d across = conn.getRight().subtract(conn.getMiddle()); double t = Utils.getCoeffForMinDistance(startRelative, conn.getMiddle(), across); t = MathHelper.clamp(0, t, 1); Vec3d closest = conn.getMiddle().add(t * across.x, t * across.y, t * across.z); double distSq = closest.squareDistanceTo(startRelative); if (distSq < minDistSq.get()) { ret.set(c); minDistSq.set(distSq); }// ww w .j a v a 2 s . c om } } } } }); } return ret.get(); }
From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java
/** * Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors. *//*from w w w . j ava2 s . c o m*/ @Test public void testSealWithStorageErrors() throws Exception { // Add some appends and seal, and then flush together. Verify that everything got flushed in one go. final int appendCount = 1000; final WriterConfig config = WriterConfig.builder() .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold. .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000) .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build(); @Cleanup TestContext context = new TestContext(config); context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); context.segmentAggregator.initialize(TIMEOUT, executorService()).join(); @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream(); // Part 1: flush triggered by accumulated size. for (int i = 0; i < appendCount; i++) { // Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that). StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); } // Generate and add a Seal Operation. StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context); context.segmentAggregator.add(sealOp); // Have the writes fail every few attempts with a well known exception. AtomicBoolean generateSyncException = new AtomicBoolean(true); AtomicBoolean generateAsyncException = new AtomicBoolean(true); AtomicReference<IntentionalException> setException = new AtomicReference<>(); Supplier<Exception> exceptionSupplier = () -> { IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis())); setException.set(ex); return ex; }; context.storage.setSealSyncErrorInjector( new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier)); context.storage.setSealAsyncErrorInjector( new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier)); // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage. int attemptCount = 4; for (int i = 0; i < attemptCount; i++) { // Repeat a number of times, at least once should work. setException.set(null); try { FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); Assert.assertNull("An exception was expected, but none was thrown.", setException.get()); Assert.assertNotNull("No FlushResult provided.", flushResult); } catch (Exception ex) { if (setException.get() != null) { Assert.assertEquals("Unexpected exception thrown.", setException.get(), ExceptionHelpers.getRealException(ex)); } else { // Not expecting any exception this time. throw ex; } } if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) { // We are done. We got at least one through. break; } } // Verify data. byte[] expectedData = writtenData.toByteArray(); byte[] actualData = new byte[expectedData.length]; SegmentProperties storageInfo = context.storage .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength()); Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed()); Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage()); context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join(); Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData); }
From source file:com.microsoft.tfs.core.clients.versioncontrol.Workstation.java
/** * This helper method inserts a workspace into the Workstation cache. The * caller is responsible for writing out the Workstation cache. *//* w ww. j a v a 2s . com*/ public WorkspaceInfo insertWorkspaceIntoCache(final Workspace localWorkspace) { final AtomicReference<InternalWorkspaceConflictInfo[]> conflictingWorkspaces = new AtomicReference<InternalWorkspaceConflictInfo[]>(); final WorkspaceInfo workspaceInfo = getCache().insertWorkspace(localWorkspace, conflictingWorkspaces); onNonFatalError(conflictingWorkspaces.get()); return workspaceInfo; }
From source file:com.microsoft.tfs.core.clients.versioncontrol.Workstation.java
/** * Internal method to save the cache if it has been modified. *//*www. j a v a 2 s. c o m*/ public void saveConfigIfDirty() { if (isCacheEnabled()) { final AtomicReference<InternalWorkspaceConflictInfo[]> conflictingWorkspaces = new AtomicReference<InternalWorkspaceConflictInfo[]>(); InternalCacheLoader.saveConfigIfDirty(getCache(), conflictingWorkspaces, cacheMutex, workspaceCacheFile); onNonFatalError(conflictingWorkspaces.get()); } }
From source file:com.microsoft.tfs.core.clients.versioncontrol.Workstation.java
/** * Internal method to get the {@link InternalCache}. *//*from w ww. java 2 s . c o m*/ public InternalCache getCache() { boolean cacheReloaded = false; InternalCache ret; final AtomicReference<InternalWorkspaceConflictInfo[]> outConflictingWorkspaces = new AtomicReference<InternalWorkspaceConflictInfo[]>(); synchronized (cacheMutex) { cacheReloaded = ensureCacheLoaded(outConflictingWorkspaces); ret = workspaceCache; } if (cacheReloaded) { onNonFatalError(outConflictingWorkspaces.get()); // Let the listeners (Client objects) know. onCacheFileReloaded(); } return ret; }
From source file:com.microsoft.tfs.core.clients.versioncontrol.Workstation.java
/** * Call Workstation.Current.ReloadCache() to force a reload of the cache * file from disk.//from w ww . ja v a 2 s.c o m */ public void reloadCache() { boolean cacheReloaded = false; final AtomicReference<InternalWorkspaceConflictInfo[]> outConflictingWorkspaces = new AtomicReference<InternalWorkspaceConflictInfo[]>(); synchronized (cacheMutex) { if (cacheEnabled) { // Force a reload of the cache file before raising the // notification. cacheFileChanged = true; cacheReloaded = ensureCacheLoaded(outConflictingWorkspaces); } } if (cacheReloaded) { onNonFatalError(outConflictingWorkspaces.get()); // Let the listeners (Client objects) know. onCacheFileReloaded(); } }