List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean
public AtomicBoolean()
From source file:com.microsoft.tfs.core.clients.versioncontrol.localworkspace.LocalWorkspaceScanner.java
private void scanPartTwo() { // The items in this set may be folders or files. They have local // version rows, but no local item on disk. We will mark the local // version row as MissingOnDisk. The row will persist until the // reconcile before the next Get. If it is still MissingOnDisk then, we // will remove the row and reconcile that delete to the server so that // the item will come back. for (final WorkspaceLocalItem lvEntry : markForRemoval) { // If the item is in the exclusion list for this scanner, do not // process the item. if (skippedItems.contains(lvEntry.getLocalItem())) { continue; }//from ww w . ja va2 s. c o m final LocalPendingChange pcEntry = pc.getByLocalVersion(lvEntry); // Pending adds do not have their local version row removed marked // as MissingFromDisk. if (null == pcEntry || !pcEntry.isAdd()) { if (!lvEntry.isMissingOnDisk()) { lv.removeByServerItem(lvEntry.getServerItem(), lvEntry.isCommitted(), false); lvEntry.setMissingOnDisk(true); lv.add(lvEntry); } final String targetServerItem = pc.getTargetServerItemForLocalVersion(lvEntry); if (lvEntry.isCommitted() && null == pcEntry && null != pc.getByTargetServerItem(targetServerItem)) { // if we don't have a pending change against the item, but // have a pending change against the target i.e. add, branch // or rename we don't want mark this as a candidate as there // is no way to actually pend the delete without moving the // item out of the way, we will let check-in take care of it // with a namespace conflict continue; } final LocalPendingChange newChange = new LocalPendingChange(lvEntry, targetServerItem, ChangeType.DELETE); newChange.setCandidate(true); addCandidateChange(newChange); } } // The items in this set may be folders or files. They have local // version rows, and were previously marked as MissingOnDisk. However, // their local item has reappeared. We will remove the MissingOnDisk // bit. for (final WorkspaceLocalItem lvEntry : reappearedOnDisk) { // If the item is in the exclusion list for this scanner, do not // process the item. if (skippedItems.contains(lvEntry.getLocalItem())) { continue; } lv.removeByServerItem(lvEntry.getServerItem(), lvEntry.isCommitted(), false); lvEntry.setMissingOnDisk(false); lv.add(lvEntry); } // The items in this set are all files. They were identified as having // identical content as the workspace version, but a different // timestamp. We will update the local version table to contain the new // timestamp. Additionally, if the item has a pending edit, we will // selective-undo the edit. for (final KeyValuePair<String, Long> undoOp : toUndo) { final String localItem = undoOp.getKey(); final long onDiskModifiedTime = undoOp.getValue(); if (skippedItems.contains(localItem)) { continue; } final WorkspaceLocalItem lvEntry = lv.getByLocalItem(localItem); // Bring the last-modified time on the item forward to match the // latest scan. if (lvEntry.getLastModifiedTime() != onDiskModifiedTime) { lvEntry.setLastModifiedTime(onDiskModifiedTime); lv.setDirty(true); } final LocalPendingChange pcEntry = pc.getByLocalVersion(lvEntry); // If the item has a pending edit, undo the pending edit, because // the content is identical to the workspace version. The only // uncommitted server items which can have their pending change // undone are those with changetype "branch, edit". if (null != pcEntry && pcEntry.isEdit() && !pcEntry.isAdd() && (!pcEntry.isEncoding() || pcEntry.getEncoding() == lvEntry.getEncoding())) { final AtomicReference<Failure[]> outFailures = new AtomicReference<Failure[]>(); final AtomicBoolean outOnlineOperationRequired = new AtomicBoolean(); final List<LocalPendingChange> pcEntries = new ArrayList<LocalPendingChange>(1); pcEntries.add(pcEntry); // The GetOperations returned are not processed. final GetOperation[] getOps = LocalDataAccessLayer.undoPendingChanges( LocalWorkspaceTransaction.getCurrent().getWorkspace(), wp, lv, pc, pcEntries, ChangeType.EDIT, outFailures, outOnlineOperationRequired); // No renames or locks are being undone, Check.isTrue(!outOnlineOperationRequired.get() && (1 == getOps.length), "!outOnlineOperationRequired.get() && (1 == getOps.length)"); //$NON-NLS-1$ // Since we've modified the pending changes table in a silent // way, we want to set the flag on the transaction we're a part // of that indicates the PendingChangesChanged event should be // raised for this workspace, once the transaction completes. LocalWorkspaceTransaction.getCurrent().setRaisePendingChangesChanged(true); } } }
From source file:io.vertx.config.git.GitConfigStoreTest.java
@Test public void testConfigurationUpdateWithMergeIssue_Edit(TestContext tc) throws IOException, GitAPIException { add(git, root, new File("src/test/resources/files/a.json"), "dir"); push(git);// ww w .j a va 2 s.c om retriever = ConfigRetriever.create(vertx, new ConfigRetrieverOptions().setScanPeriod(1000).addStore(new ConfigStoreOptions().setType("git") .setConfig(new JsonObject().put("url", bareRoot.getAbsolutePath()) .put("path", "target/junk/work").put("filesets", new JsonArray().add(new JsonObject().put("pattern", "dir/*.json")))))); AtomicBoolean done = new AtomicBoolean(); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().getString("a.name")).isEqualTo("A"); done.set(true); }); await().untilAtomic(done, is(true)); // Edit the file in the work dir File a = new File("target/junk/work/dir/a.json"); assertThat(a).isFile(); FileUtils.write(a, new JsonObject().put("a.name", "A-modified").put("conflict", "A").encodePrettily(), StandardCharsets.UTF_8); done.set(false); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().getString("a.name")).isEqualTo("A-modified"); done.set(true); }); await().untilAtomic(done, is(true)); updateA(); Async async = tc.async(); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isFalse(); assertThat(ar.cause().getMessage()).containsIgnoringCase("conflict"); async.complete(); }); }
From source file:com.datamelt.nifi.processors.ExecuteRuleEngine.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { // map used to store the attribute name and its value from the content of the flow file final Map<String, String> propertyMap = new HashMap<>(); // get a logger instance final ComponentLog logger = getLogger(); // a header from the content if present final AtomicReference<HeaderRow> header = new AtomicReference<>(); AtomicBoolean error = new AtomicBoolean(); // get the flow file FlowFile flowFile = session.get();//from w w w .j a v a 2s. c o m if (flowFile == null) { return; } // list of rows from splitting the original flow file content ArrayList<RuleEngineRow> flowFileRows = new ArrayList<RuleEngineRow>(); // list of rows containing the detailed results of the ruleengine ArrayList<RuleEngineRow> flowFileDetails = new ArrayList<RuleEngineRow>(); boolean headerPresent = context.getProperty(ATTRIBUTE_HEADER_PRESENT).getValue().equals("true"); // put the name of the ruleengine zip file in the list of properties propertyMap.put(PROPERTY_RULEENGINE_ZIPFILE_NAME, context.getProperty(ATTRIBUTE_RULEENGINE_ZIPFILE).getValue()); final int batchSize = Integer.parseInt(context.getProperty(BATCH_SIZE_NAME).getValue()); // read flow file into input stream session.read(flowFile, new InputStreamCallback() { public void process(InputStream in) throws IOException { try { // iterator over the lines from the input stream LineIterator iterator = IOUtils.lineIterator(in, "utf-8"); // check if configuration indicates that a header row is present in the flow file content if (headerPresent) { logger.debug("configuration indicates a header row is present in flow file content"); // if there is at least one row of data and the header is not defined yet if (iterator.hasNext() && header.get() == null) { // set the header from the content header.set(new HeaderRow(iterator.nextLine(), separator)); } } // if no header row is present in the flow file content else { logger.debug("configuration indicates no header row is present in flow file content"); // use the header from the field names header.set(headerFromFieldNames); } // loop over all rows of data while (iterator.hasNext()) { // we handle the error per row of data error.set(false); // get a row to process String row = iterator.nextLine(); // check that we have data if (row != null && !row.trim().equals("")) { RowFieldCollection rowFieldCollection = null; try { rowFieldCollection = getRowFieldCollection(row, header.get()); logger.debug("RowFieldCollection header contains: " + rowFieldCollection.getHeader().getNumberOfFields() + " fields"); logger.debug("RowFieldCollection contains: " + rowFieldCollection.getNumberOfFields() + " fields"); // run the ruleengine with the given data from the flow file logger.debug("running business ruleengine..."); // run the business logic/rules against the data ruleEngine.run("flowfile", rowFieldCollection); // add some debugging output that might be useful logger.debug("number of rulegroups: " + ruleEngine.getNumberOfGroups()); logger.debug( "number of rulegroups passed: " + ruleEngine.getNumberOfGroupsPassed()); logger.debug( "number of rulegroups failed: " + ruleEngine.getNumberOfGroupsFailed()); logger.debug( "number of rulegroups skipped: " + ruleEngine.getNumberOfGroupsSkipped()); logger.debug("number of rules: " + ruleEngine.getNumberOfRules()); logger.debug("number of rules passed: " + ruleEngine.getNumberOfRulesPassed()); logger.debug("number of rules failed: " + ruleEngine.getNumberOfRulesFailed()); logger.debug("number of actions: " + ruleEngine.getNumberOfActions()); // add some properties of the ruleengine execution to the map addRuleEngineProperties(propertyMap); } catch (Exception ex) { error.set(true); logger.error(ex.getMessage(), ex); } // if no error occurred we create a save the data for the creation of the flow files if (!error.get()) { // process only if the collection of fields was changed by // a ruleengine action. this means the data was updated so // we will have to re-write/re-create the flow file content. if (rowFieldCollection.isCollectionUpdated()) { // put an indicator that the data was modified by the ruleengine propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "true"); logger.debug( "data was modified - updating flow file content with ruleengine results"); // the RuleEngineRow instance will contain the row of data and the map of properties // and will later be used when the flow files are created flowFileRows .add(new RuleEngineRow(getResultRow(rowFieldCollection), propertyMap)); } else { // put an indicator that the data was NOT modified by the ruleengine propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "false"); logger.debug("data was not modified - using original content"); // the RuleEngineRow instance will contain the row of data and the map of properties // and will later be used when the flow files are created flowFileRows.add(new RuleEngineRow(row, propertyMap)); } if (flowFileRows.size() >= batchSize) { // generate flow files from the individual rows List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(), headerPresent); // transfer all individual rows to success relationship if (splitFlowFiles.size() > 0) { session.transfer(splitFlowFiles, SUCCESS); } } // if the user configured detailed results if (context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS).getValue() .equals("true")) { // get the configured output type String outputType = context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS_TYPE) .getValue(); logger.debug("configuration set to output detailed results with type [" + outputType + "]"); // we need to create a flow file only, if the ruleengine results are according to the output type settings if (outputType.equals(OUTPUT_TYPE_ALL_GROUPS_ALL_RULES) || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_ALL_RULES) && ruleEngine.getNumberOfGroupsFailed() > 0) || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_FAILED_RULES) && ruleEngine.getNumberOfGroupsFailed() > 0) || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_PASSED_RULES) && ruleEngine.getNumberOfGroupsFailed() > 0) || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_ALL_RULES) && ruleEngine.getNumberOfGroupsPassed() > 0) || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_FAILED_RULES) && ruleEngine.getNumberOfGroupsPassed() > 0 || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_PASSED_RULES) && ruleEngine.getNumberOfGroupsPassed() > 0))) { // create the content for the flow file String content = getFlowFileRuleEngineDetailsContent(header.get(), headerPresent, outputType, row); // add results to the list flowFileDetails.add(new RuleEngineRow(content, propertyMap)); if (flowFileDetails.size() >= batchSize) { List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails( context, session, flowFileDetails, header.get(), headerPresent); // transfer all individual rows to detailed relationship if (detailsFlowFiles.size() > 0) { session.transfer(detailsFlowFiles, DETAILED_RESULTS); } } } } // clear the collections of ruleengine results ruleEngine.getRuleExecutionCollection().clear(); } // if we have an error we create a flow file from the current row of data and send it to the failure relationsship else { FlowFile failureFlowFile = generateFailureFlowFile(context, session, row, header.get(), headerPresent); session.transfer(failureFlowFile, FAILURE); } } } LineIterator.closeQuietly(iterator); } catch (Exception ex) { ex.printStackTrace(); logger.error("error running the business ruleengine", ex); } } }); // generate flow files from the individual rows List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(), headerPresent); // generate flow files from the individual rows List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(context, session, flowFileDetails, header.get(), headerPresent); // transfer the original flow file session.transfer(flowFile, ORIGINAL); // transfer all individual rows to success relationship if (splitFlowFiles.size() > 0) { session.transfer(splitFlowFiles, SUCCESS); } // transfer all individual rows to success relationship if (detailsFlowFiles.size() > 0) { session.transfer(detailsFlowFiles, DETAILED_RESULTS); } }
From source file:io.vertx.config.git.GitConfigStoreTest.java
@Test public void testUsingAnExistingRepo() throws IOException, GitAPIException { git.close();//from w ww.j a v a2s . c o m root = new File("target/junk/work"); git = connect(bareRoot, root); add(git, root, new File("src/test/resources/files/a.json"), "dir"); push(git); retriever = ConfigRetriever.create(vertx, new ConfigRetrieverOptions().setScanPeriod(1000).addStore(new ConfigStoreOptions().setType("git") .setConfig(new JsonObject().put("url", bareRoot.getAbsolutePath()) .put("path", "target/junk/work").put("filesets", new JsonArray().add(new JsonObject().put("pattern", "dir/*.json")))))); AtomicBoolean done = new AtomicBoolean(); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().getString("a.name")).isEqualTo("A"); done.set(true); }); await().untilAtomic(done, is(true)); updateA(); await().until(() -> "A2".equals(retriever.getCachedConfig().getString("a.name")) && "B".equalsIgnoreCase(retriever.getCachedConfig().getString("b.name"))); }
From source file:io.vertx.config.git.GitConfigStoreTest.java
@Test public void testWithExistingRepoOnTheWrongBranch() throws Exception { git.close();//from w w w . ja va 2s .c om root = new File("target/junk/work"); git = connect(bareRoot, root); add(git, root, new File("src/test/resources/files/a.json"), "dir"); push(git); branch = "dev"; add(git, root, new File("src/test/resources/files/b.json"), "dir"); push(git); retriever = ConfigRetriever.create(vertx, new ConfigRetrieverOptions().setScanPeriod(1000).addStore(new ConfigStoreOptions().setType("git") .setConfig(new JsonObject().put("url", bareRoot.getAbsolutePath()) .put("path", "target/junk/work").put("filesets", new JsonArray().add(new JsonObject().put("pattern", "dir/*.json")))))); AtomicBoolean done = new AtomicBoolean(); retriever.getConfig(ar -> { assertThat(ar.succeeded()).isTrue(); assertThat(ar.result().getString("a.name")).isEqualTo("A"); done.set(true); }); await().untilAtomic(done, is(true)); updateA(); await().until(() -> "A2".equals(retriever.getCachedConfig().getString("a.name")) && "B".equalsIgnoreCase(retriever.getCachedConfig().getString("b.name"))); }
From source file:com.inqool.dcap.office.indexer.indexer.SolrBulkIndexer.java
protected SolrInputDocument modelToSolrInputDoc(ZdoModel model) { logger.debug("Constructing new SolrInputDocument..."); final Map<String, SolrInputField> fields = new HashMap<>(); //Add all Dublin Core terms for (String property : DCTools.getDcTermList()) { SolrInputField field = new SolrInputField(property); List<String> values = model.getAll(new PropertyImpl("http://purl.org/dc/terms/" + property)); if (values.isEmpty()) continue; //Skip fields that were not ticked to be published String visible = model.get(new PropertyImpl("http://purl.org/dc/terms/" + property + "_visibility")); if ("false".equals(visible) || "0".equals(visible)) { //0 should not occur any more continue; }//from w w w .j a v a 2 s. c om if ("isPartOf".equals(property)) { //remove ip address from isPartOf values.set(0, store.getOnlyIdFromUrl(values.get(0))); } if ("".equals(values.get(0))) { values.set(0, "unknown"); } field.addValue(values, INDEX_TIME_BOOST); fields.put(property, field); //Suggester data if ("title".equals(property) || "creator".equals(property)) { SolrInputDocument suggesterDoc = new SolrInputDocument(); String suggestVal = values.get(0).trim(); if (!suggestVal.isEmpty() && !suggestVal.equals("unknown")) { suggesterDoc.addField("suggesterData", values.get(0).trim()); dataForSuggester.add(suggesterDoc); } } } //Add system fields SolrInputField field = new SolrInputField("id"); field.addValue(store.getOnlyIdFromUrl(model.getUrl()), INDEX_TIME_BOOST); fields.put("id", field); addSolrFieldFromFedoraProperty("inventoryId", ZdoTerms.inventoryId, model, fields); addSolrFieldFromFedoraProperty("zdoType", ZdoTerms.zdoType, model, fields); addSolrFieldFromFedoraProperty("zdoGroup", ZdoTerms.group, model, fields); addSolrFieldFromFedoraProperty("orgIdmId", ZdoTerms.organization, model, fields); addSolrFieldFromFedoraProperty("allowContentPublicly", ZdoTerms.allowContentPublicly, model, fields); addSolrFieldFromFedoraProperty("allowPdfExport", ZdoTerms.allowPdfExport, model, fields); addSolrFieldFromFedoraProperty("allowEpubExport", ZdoTerms.allowEpubExport, model, fields); addSolrFieldFromFedoraProperty("watermark", ZdoTerms.watermark, model, fields); addSolrFieldFromFedoraProperty("watermarkPosition", ZdoTerms.watermarkPosition, model, fields); addSolrFieldFromFedoraProperty("imgThumb", ZdoTerms.imgThumb, model, fields); addSolrFieldFromFedoraProperty("imgNormal", ZdoTerms.imgNormal, model, fields); String publishFromStr = model.get(ZdoTerms.publishFrom); if (publishFromStr != null) { String publishFromUtc = ZonedDateTime .ofInstant(Instant.ofEpochSecond(Long.valueOf(publishFromStr)), ZoneId.systemDefault()) .withZoneSameInstant(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); addSolrField("publishFrom", publishFromUtc, fields); } String publishToStr = model.get(ZdoTerms.publishTo); if (publishToStr != null) { String publishToUtc = ZonedDateTime .ofInstant(Instant.ofEpochSecond(Long.valueOf(publishToStr)), ZoneId.systemDefault()) .withZoneSameInstant(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); addSolrField("publishTo", publishToUtc, fields); } String created = model.get(DCTerms.created); if (created != null) { AtomicInteger yearStart = new AtomicInteger(); AtomicInteger yearEnd = new AtomicInteger(); AtomicBoolean startValid = new AtomicBoolean(); AtomicBoolean endValid = new AtomicBoolean(); YearNormalizer.normalizeCreatedYear(created, yearStart, startValid, yearEnd, endValid); if (startValid.get()) { addSolrField("yearStart", yearStart.get(), fields); } else { logger.warn("Year could not be normalized for input string " + created); } if (endValid.get()) { addSolrField("yearEnd", yearEnd.get(), fields); } } String orgName = orgNameMapping.get(model.get(ZdoTerms.organization)); if (orgName == null) { orgName = "Neznm"; } addSolrField("organization", orgName, fields); String documentTypeId = model.get(ZdoTerms.documentType); //type and subtype names must be found for id String documentSubTypeId = model.get(ZdoTerms.documentSubType); if (documentTypeId != null) { addSolrField("documentType", documentTypeAccess.getTypeNameForId(Integer.valueOf(documentTypeId)), fields); } if (documentSubTypeId != null) { addSolrField("documentSubType", documentTypeAccess.getSubTypeNameForId(Integer.valueOf(documentSubTypeId)), fields); } //Add customFields int fieldIndex = 0; //we actually start from 1 do { fieldIndex++; String fieldName = model .get(new PropertyImpl("http://inqool.cz/zdo/1.0/customField_" + fieldIndex + "_name")); if (fieldName == null) break; fieldName = "customField_" + fieldName; String visible = model .get(new PropertyImpl("http://inqool.cz/zdo/1.0/customField_" + fieldIndex + "_visibility")); if ("false".equals(visible) || "0".equals(visible)) continue; List<String> fieldValues = model .getAll(new PropertyImpl("http://inqool.cz/zdo/1.0/customField_" + fieldIndex)); if ("".equals(fieldValues.get(0))) { fieldValues.set(0, "unknown"); } SolrInputField customField = new SolrInputField(fieldName); customField.addValue(fieldValues, INDEX_TIME_BOOST); fields.put(fieldName, customField); } while (true); SolrInputDocument solrInputDocument = new SolrInputDocument(fields); return solrInputDocument; }
From source file:com.google.dart.java2dart.SyntaxTranslator.java
@Override public boolean visit(org.eclipse.jdt.core.dom.ClassInstanceCreation node) { IMethodBinding binding = node.resolveConstructorBinding(); String signature = JavaUtils.getJdtSignature(binding); TypeName typeNameNode = (TypeName) translate(node.getType()); final List<Expression> arguments = translateArguments(binding, node.arguments()); final ClassDeclaration innerClass; {// w w w .j a v a2 s. c o m AnonymousClassDeclaration anoDeclaration = node.getAnonymousClassDeclaration(); if (anoDeclaration != null) { ITypeBinding superclass = anoDeclaration.resolveBinding().getSuperclass(); signature = superclass.getKey() + StringUtils.substringAfter(signature, ";"); String name = typeNameNode.getName().getName().replace('.', '_'); name = name + "_" + context.generateTechnicalAnonymousClassIndex(); innerClass = declareInnerClass(binding, anoDeclaration, name, ArrayUtils.EMPTY_STRING_ARRAY); typeNameNode = typeName(name); // prepare enclosing type final ITypeBinding enclosingTypeBinding = getEnclosingTypeBinding(node); final SimpleIdentifier enclosingTypeRef; final AtomicBoolean addEnclosingTypeRef = new AtomicBoolean(); { if (enclosingTypeBinding != null) { enclosingTypeRef = identifier(enclosingTypeBinding.getName() + "_this"); // add enclosing class references innerClass.accept(new RecursiveASTVisitor<Void>() { @Override public Void visitMethodInvocation(MethodInvocation node) { if (node.getTarget() == null) { IMethodBinding methodBinding = (IMethodBinding) context.getNodeBinding(node); if (methodBinding != null && methodBinding.getDeclaringClass() == enclosingTypeBinding) { addEnclosingTypeRef.set(true); node.setTarget(enclosingTypeRef); } } return super.visitMethodInvocation(node); } @Override public Void visitSimpleIdentifier(SimpleIdentifier node) { if (!(node.getParent() instanceof PropertyAccess) && !(node.getParent() instanceof PrefixedIdentifier)) { Object binding = context.getNodeBinding(node); if (binding instanceof IVariableBinding) { IVariableBinding variableBinding = (IVariableBinding) binding; if (variableBinding.isField() && variableBinding.getDeclaringClass() == enclosingTypeBinding) { addEnclosingTypeRef.set(true); replaceNode(node.getParent(), node, propertyAccess(enclosingTypeRef, node)); } } } return super.visitSimpleIdentifier(node); } }); } else { enclosingTypeRef = null; } } // declare referenced final variables XXX final String finalName = name; anoDeclaration.accept(new ASTVisitor() { final Set<org.eclipse.jdt.core.dom.IVariableBinding> addedParameters = Sets.newHashSet(); final List<FormalParameter> constructorParameters = Lists.newArrayList(); int index; @Override public void endVisit(AnonymousClassDeclaration node) { if (!constructorParameters.isEmpty()) { // add parameters to the existing "inner" constructor XXX for (ClassMember classMember : innerClass.getMembers()) { if (classMember instanceof ConstructorDeclaration) { ConstructorDeclaration innerConstructor = (ConstructorDeclaration) classMember; innerConstructor.getParameters().getParameters().addAll(constructorParameters); return; } } // create new "inner" constructor innerClass.getMembers().add(index, constructorDeclaration(identifier(finalName), null, formalParameterList(constructorParameters), null)); } super.endVisit(node); } @Override public void endVisit(SimpleName node) { IBinding nameBinding = node.resolveBinding(); if (nameBinding instanceof org.eclipse.jdt.core.dom.IVariableBinding) { org.eclipse.jdt.core.dom.IVariableBinding variableBinding = (org.eclipse.jdt.core.dom.IVariableBinding) nameBinding; org.eclipse.jdt.core.dom.MethodDeclaration enclosingMethod = getEnclosingMethod(node); if (!variableBinding.isField() && enclosingMethod != null && variableBinding.getDeclaringMethod() != enclosingMethod.resolveBinding() && addedParameters.add(variableBinding)) { TypeName parameterTypeName = translateTypeName(variableBinding.getType()); String parameterName = variableBinding.getName(); SimpleIdentifier parameterNameNode = identifier(parameterName); innerClass.getMembers().add(index++, fieldDeclaration(parameterTypeName, variableDeclaration(parameterNameNode))); constructorParameters.add(fieldFormalParameter(null, null, parameterNameNode)); arguments.add(parameterNameNode); context.putReference(parameterNameNode, variableBinding, null); } } super.endVisit(node); } @Override public boolean visit(AnonymousClassDeclaration node) { if (addEnclosingTypeRef.get()) { TypeName parameterTypeName = translateTypeName(enclosingTypeBinding); innerClass.getMembers().add(index++, fieldDeclaration(false, Keyword.FINAL, parameterTypeName, variableDeclaration(enclosingTypeRef))); constructorParameters.add(fieldFormalParameter(null, null, enclosingTypeRef)); arguments.add(thisExpression()); } return super.visit(node); } }); } else { innerClass = null; } } InstanceCreationExpression creation = instanceCreationExpression(Keyword.NEW, typeNameNode, null, arguments); context.putNodeBinding(creation, binding); context.putAnonymousDeclaration(creation, innerClass); context.getConstructorDescription(binding).instanceCreations.add(creation); return done(creation); }
From source file:com.microsoft.tfs.core.clients.versioncontrol.localworkspace.LocalWorkspaceScanner.java
private void diffItem(final EnumeratedLocalItem fromDisk, final WorkspaceLocalItem lvEntry) { if (fromDisk.isDirectory()) { if (!lvEntry.isDirectory()) { // Item is a directory on disk, but a file in the local version // table. Delete the local version row. markForRemoval.add(lvEntry); } else {//from ww w. jav a 2 s. co m if (lvEntry.isMissingOnDisk()) { reappearedOnDisk.add(lvEntry); } } } else { if (lvEntry.isDirectory()) { // Item is a file on disk, but a directory in the local version // table. Delete the local version row. markForRemoval.add(lvEntry); } else { if (lvEntry.isMissingOnDisk()) { reappearedOnDisk.add(lvEntry); } boolean pendEdit = false; boolean symlink = false; if (lvEntry.isSymbolicLink() || fromDisk.isSymbolicLink()) { symlink = true; } if (-1 == lvEntry.getLength() || 0 == lvEntry.getHashValue().length) { // The local version row does not contain the data we need // to compare against. pendEdit = false; } else if (lvEntry.getLength() != fromDisk.getFileSize() && !symlink) { // File size has changed. This is a pending edit. pendEdit = true; } else { final long onDiskModifiedTime = fromDisk.getLastWriteTime(); // 1. check content if modify time changed for normal file // 2. check whether link target has changed for symlink if (symlink || lvEntry.getLastModifiedTime() != onDiskModifiedTime) { // Last modified date has changed. Hash the file to see // if it has changed pendEdit = true; // If MD5 is a banned algorithm then the array will come // back zero-length byte[] onDiskHash = new byte[0]; try { onDiskHash = CheckinEngine.computeMD5Hash(lvEntry.getLocalItem(), null); } catch (final CoreCancelException e) { // Won't happen because we passed a null TaskMonitor } if (onDiskHash.length > 0 && Arrays.equals(onDiskHash, lvEntry.getHashValue())) { pendEdit = false; // We will update the local version row to reflect // the new last-modified time. // Additionally, if the item has a pending edit, we // will selective undo that pending edit. toUndo.add(new KeyValuePair<String, Long>(lvEntry.getLocalItem(), onDiskModifiedTime)); } } } if (pendEdit && !skippedItems.contains(lvEntry.getLocalItem())) { final LocalPendingChange pcEntry = pc.getByLocalVersion(lvEntry); if (null == pcEntry || !pcEntry.isEdit()) { final ChangeRequest changeRequest = new ChangeRequest( new ItemSpec(lvEntry.getLocalItem(), RecursionType.NONE), LatestVersionSpec.INSTANCE, RequestType.EDIT, ItemType.FILE, VersionControlConstants.ENCODING_UNCHANGED, LockLevel.UNCHANGED, 0, null, false); final AtomicReference<Failure[]> outDummy = new AtomicReference<Failure[]>(); final ChangeRequest[] changeRequests = new ChangeRequest[1]; changeRequests[0] = changeRequest; LocalDataAccessLayer.pendEdit(LocalWorkspaceTransaction.getCurrent().getWorkspace(), wp, lv, pc, changeRequests, true, outDummy, null); // Since we've modified the pending changes table in a // silent way, we want to set the flag on the // transaction we're a part of that indicates the // PendingChangesChanged event should be raised for this // workspace, once the transaction completes. LocalWorkspaceTransaction.getCurrent().setRaisePendingChangesChanged(true); } } /* * TEE-specific code to detect Unix symbolic links and execute * bit. */ if (Platform.isCurrentPlatform(Platform.GENERIC_UNIX) && LocalWorkspaceTransaction.getCurrent().getWorkspace().getClient().getServiceLevel() .getValue() >= WebServiceLevel.TFS_2012.getValue()) { PropertyValue pendProperty = null; if (PlatformMiscUtils.getInstance() .getEnvironmentVariable(EnvironmentVariables.DISABLE_SYMBOLIC_LINK_PROP) == null) { final boolean isSymlink = PropertyConstants.IS_SYMLINK.equals(PropertyUtils .selectMatching(lvEntry.getPropertyValues(), PropertyConstants.SYMBOLIC_KEY)); if (isSymlink != fromDisk.isSymbolicLink()) { pendProperty = fromDisk.isSymbolicLink() ? PropertyConstants.IS_SYMLINK : PropertyConstants.NOT_SYMLINK; } if (pendProperty != null && !skippedItems.contains(lvEntry.getLocalItem())) { final ChangeRequest changeRequest = new ChangeRequest( new ItemSpec(lvEntry.getLocalItem(), RecursionType.NONE), LatestVersionSpec.INSTANCE, RequestType.PROPERTY, ItemType.FILE, VersionControlConstants.ENCODING_UNCHANGED, LockLevel.UNCHANGED, 0, null, false); changeRequest.setProperties(new PropertyValue[] { pendProperty }); final AtomicBoolean outOnlineOperationRequired = new AtomicBoolean(); final AtomicReference<Failure[]> outDummy = new AtomicReference<Failure[]>(); final ChangeRequest[] changeRequests = new ChangeRequest[1]; changeRequests[0] = changeRequest; // Include property filters so any listeners have // them LocalDataAccessLayer.pendPropertyChange( LocalWorkspaceTransaction.getCurrent().getWorkspace(), wp, lv, pc, changeRequests, true, outDummy, outOnlineOperationRequired, new String[] { PropertyConstants.SYMBOLIC_KEY }); LocalWorkspaceTransaction.getCurrent().setRaisePendingChangesChanged(true); } } if (pendProperty == null && PlatformMiscUtils.getInstance() .getEnvironmentVariable(EnvironmentVariables.DISABLE_DETECT_EXECUTABLE_PROP) == null) { final boolean lvExecutable = PropertyConstants.EXECUTABLE_ENABLED_VALUE.equals(PropertyUtils .selectMatching(lvEntry.getPropertyValues(), PropertyConstants.EXECUTABLE_KEY)); if (lvExecutable != fromDisk.isExecutable()) { pendProperty = fromDisk.isExecutable() ? PropertyConstants.EXECUTABLE_ENABLED_VALUE : PropertyConstants.EXECUTABLE_DISABLED_VALUE; } if (pendProperty != null && !skippedItems.contains(lvEntry.getLocalItem())) { final ChangeRequest changeRequest = new ChangeRequest( new ItemSpec(lvEntry.getLocalItem(), RecursionType.NONE), LatestVersionSpec.INSTANCE, RequestType.PROPERTY, ItemType.FILE, VersionControlConstants.ENCODING_UNCHANGED, LockLevel.UNCHANGED, 0, null, false); changeRequest.setProperties(new PropertyValue[] { pendProperty }); final AtomicBoolean outOnlineOperationRequired = new AtomicBoolean(); final AtomicReference<Failure[]> outDummy = new AtomicReference<Failure[]>(); final ChangeRequest[] changeRequests = new ChangeRequest[1]; changeRequests[0] = changeRequest; // Include property filters so any listeners have // them LocalDataAccessLayer.pendPropertyChange( LocalWorkspaceTransaction.getCurrent().getWorkspace(), wp, lv, pc, changeRequests, true, outDummy, outOnlineOperationRequired, new String[] { PropertyConstants.EXECUTABLE_KEY }); /* * Since we've modified the pending changes table in * a silent way, we want to set the flag on the * transaction we're a part of that indicates the * PendingChangesChanged event should be raised for * this workspace, once the transaction completes. */ LocalWorkspaceTransaction.getCurrent().setRaisePendingChangesChanged(true); } } } } } }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
/** * Requests all updates from each peer in the provided list of peers. * <p>/*from w w w .j a v a 2s .co m*/ * The returned future will be completed once at least one peer bootstraps this map or bootstrap requests to all peers * fail. * * @param peers the list of peers from which to request updates * @return a future to be completed once updates have been received from at least one peer */ private CompletableFuture<Void> requestBootstrapFromPeers(List<MemberId> peers) { if (peers.isEmpty()) { return CompletableFuture.completedFuture(null); } CompletableFuture<Void> future = new CompletableFuture<>(); final int totalPeers = peers.size(); AtomicBoolean successful = new AtomicBoolean(); AtomicInteger totalCount = new AtomicInteger(); AtomicReference<Throwable> lastError = new AtomicReference<>(); // Iterate through all of the peers and send a bootstrap request. On the first peer that returns // a successful bootstrap response, complete the future. Otherwise, if no peers respond with any // successful bootstrap response, the future will be completed with the last exception. for (MemberId peer : peers) { requestBootstrapFromPeer(peer).whenComplete((result, error) -> { if (error == null) { if (successful.compareAndSet(false, true)) { future.complete(null); } else if (totalCount.incrementAndGet() == totalPeers) { Throwable e = lastError.get(); if (e != null) { future.completeExceptionally(e); } } } else { if (!successful.get() && totalCount.incrementAndGet() == totalPeers) { future.completeExceptionally(error); } else { lastError.set(error); } } }); } return future; }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java
@Override public GetOperation[][] get(final String workspaceName, final String ownerName, final GetRequest[] requests, final int maxResults, final GetOptions options, final String[] itemAttributeFilters, final String[] itemPropertyFilters, final boolean noGet) { final Workspace localWorkspace = getLocalWorkspace(workspaceName, ownerName); if (localWorkspace != null) { // The change #5491 has been rolled back as not related to the bug // it tried to fix: // Bug 6191: When using local workspaces, get latest does not get a // file that has been deleted from disk. boolean reconcileMissingLocalItems = false; for (final GetRequest getRequest : requests) { if (null == getRequest) { continue; }/*from ww w. j a va2s .co m*/ final VersionSpec versionSpec = getRequest.getVersionSpec(); if (null == versionSpec || (versionSpec instanceof WorkspaceVersionSpec && (!Workspace .matchName(localWorkspace.getName(), ((WorkspaceVersionSpec) versionSpec).getName()) || !localWorkspace.ownerNameMatches(((WorkspaceVersionSpec) versionSpec).getOwner())))) { reconcileMissingLocalItems = true; break; } } final AtomicBoolean reconciled = new AtomicBoolean(); reconcileIfLocal(workspaceName, ownerName, false /* unscannedReconcile */, reconcileMissingLocalItems /* reconcileMissingLocalItems */, false /* skipIfAccessDenied */, reconciled); /* * If the workspace is marked with the SetFileTimeToCheckin flag, * then we should take this opportunity to ensure that the file time * on items without a pending edit matches the checkin date. This * operation is scoped to the provided GetRequest objects. */ if (localWorkspace.getOptions().contains(WorkspaceOptions.SET_FILE_TO_CHECKIN)) { LocalDataAccessLayer.snapBackToCheckinDate(localWorkspace, requests); } } final GetOperation[][] result = super.get(workspaceName, ownerName, requests, maxResults, options, itemAttributeFilters, itemPropertyFilters, noGet); return result; }