List of usage examples for java.util.concurrent.atomic AtomicBoolean set
public final void set(boolean newValue)
From source file:com.microsoft.alm.plugin.idea.tfvc.ui.checkout.TfvcCheckoutModel.java
@Override public void doCheckout(final Project project, final CheckoutProvider.Listener listener, final ServerContext context, final VirtualFile destinationParent, final String directoryName, final String parentDirectory, final boolean isAdvancedChecked) { final String workspaceName = directoryName; final String teamProjectName = getRepositoryName(context); final String localPath = Path.combine(parentDirectory, directoryName); final AtomicBoolean checkoutResult = new AtomicBoolean(); (new Task.Backgroundable(project, TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_CREATING_WORKSPACE), true, PerformInBackgroundOption.DEAF) { public void run(@NotNull final ProgressIndicator indicator) { IdeaHelper.setProgress(indicator, 0.10, TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_CREATING)); try { // Create the workspace with default values final CreateWorkspaceCommand command = new CreateWorkspaceCommand(context, workspaceName, TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_WORKSPACE_COMMENT), null, null); command.runSynchronously(); } catch (final WorkspaceAlreadyExistsException e) { logger.warn("Error creating workspace: " + LocalizationServiceImpl.getInstance().getExceptionMessage(e)); // TODO: allow user to change name in the flow instead of starting over IdeaHelper.runOnUIThread(new Runnable() { @Override// w ww.ja v a 2s . co m public void run() { Messages.showErrorDialog(project, LocalizationServiceImpl.getInstance().getExceptionMessage(e), TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_FAILED_TITLE)); } }); // returning since the workspace failed to create so we can't proceed with the next steps return; } IdeaHelper.setProgress(indicator, 0.20, TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_ADD_ROOT)); // Map the project root to the local folder final String serverPath = VcsHelper.TFVC_ROOT + teamProjectName; final UpdateWorkspaceMappingCommand mappingCommand = new UpdateWorkspaceMappingCommand(context, workspaceName, new Workspace.Mapping(serverPath, localPath, false), false); mappingCommand.runSynchronously(); IdeaHelper.setProgress(indicator, 0.30, TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_CREATE_FOLDER)); // Ensure that the local folder exists final File file = new File(localPath); if (!file.mkdirs()) { //TODO should we throw here? } // if advanced is set, then sync just some of the files (those that we need for IntelliJ) // Otherwise, sync all the files for the team project if (!isAdvancedChecked) { IdeaHelper.setProgress(indicator, 0.50, TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_SYNC)); // Sync all files recursively CommandUtils.syncWorkspace(context, localPath); } IdeaHelper.setProgress(indicator, 1.00, "", true); // No exception means that it was successful checkoutResult.set(true); } public void onSuccess() { if (checkoutResult.get()) { // Check the isAdvanced flag if (isAdvancedChecked) { // The user wants to edit the workspace before syncing... final RepositoryContext repositoryContext = RepositoryContext.createTfvcContext(localPath, workspaceName, teamProjectName, context.getServerUri().toString()); final WorkspaceController controller = new WorkspaceController(project, repositoryContext, workspaceName); if (controller.showModalDialog(false)) { // Save and Sync the workspace (this will be backgrounded) controller.saveWorkspace(localPath, true, new Runnable() { @Override public void run() { // Files are all synchronized, so trigger the VCS update UpdateVersionControlSystem(project, parentDirectory, directoryName, destinationParent, listener); } }); } } else { // We don't have to wait for the workspace to be updated, so just trigger the VCS update UpdateVersionControlSystem(project, parentDirectory, directoryName, destinationParent, listener); } } } }).queue(); }
From source file:org.apache.hadoop.hbase.wal.TestWALSplit.java
/** * Simulates splitting a WAL out from under a regionserver that is still trying to write it. * Ensures we do not lose edits.//from w w w . j a va 2 s .c o m * @throws IOException * @throws InterruptedException */ @Test(timeout = 300000) public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException { final AtomicLong counter = new AtomicLong(0); AtomicBoolean stop = new AtomicBoolean(false); // Region we'll write edits too and then later examine to make sure they all made it in. final String region = REGIONS.get(0); final int numWriters = 3; Thread zombie = new ZombieLastLogWriterRegionServer(counter, stop, region, numWriters); try { long startCount = counter.get(); zombie.start(); // Wait till writer starts going. while (startCount == counter.get()) Threads.sleep(1); // Give it a second to write a few appends. Threads.sleep(1000); final Configuration conf2 = HBaseConfiguration.create(this.conf); final User robber = User.createUserForTesting(conf2, ROBBER, GROUP); int count = robber.runAs(new PrivilegedExceptionAction<Integer>() { @Override public Integer run() throws Exception { StringBuilder ls = new StringBuilder("Contents of WALDIR (").append(WALDIR).append("):\n"); for (FileStatus status : fs.listStatus(WALDIR)) { ls.append("\t").append(status.toString()).append("\n"); } LOG.debug(ls); LOG.info("Splitting WALs out from under zombie. Expecting " + numWriters + " files."); WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf2, wals); LOG.info("Finished splitting out from under zombie."); Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region); assertEquals("wrong number of split files for region", numWriters, logfiles.length); int count = 0; for (Path logfile : logfiles) { count += countWAL(logfile); } return count; } }); LOG.info("zombie=" + counter.get() + ", robber=" + count); assertTrue( "The log file could have at most 1 extra log entry, but can't have less. " + "Zombie could write " + counter.get() + " and logfile had only " + count, counter.get() == count || counter.get() + 1 == count); } finally { stop.set(true); zombie.interrupt(); Threads.threadDumpingIsAlive(zombie); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
/** * Create ledger async and schedule a timeout task to check ledger-creation is complete else it fails the callback * with TimeoutException./*from ww w. java2s . com*/ * * @param bookKeeper * @param config * @param digestType * @param cb * @param metadata */ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType, CreateCallback cb, Map<String, byte[]> metadata) { AtomicBoolean ledgerCreated = new AtomicBoolean(false); Map<String, byte[]> finalMetadata = new HashMap<>(); finalMetadata.putAll(ledgerMetadata); finalMetadata.putAll(metadata); if (log.isDebugEnabled()) { log.debug("creating ledger, metadata: " + finalMetadata); } bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(), config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerCreated, finalMetadata); scheduledExecutor.schedule(() -> { if (!ledgerCreated.get()) { ledgerCreated.set(true); cb.createComplete(BKException.Code.TimeoutException, null, null); } }, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); }
From source file:org.apache.hadoop.mapreduce.server.tasktracker.Localizer.java
/** * Initialize the local directories for a particular user on this TT. This * involves creation and setting permissions of the following directories * <ul>/* w w w .j av a 2 s .co m*/ * <li>$mapred.local.dir/taskTracker/$user</li> * <li>$mapred.local.dir/taskTracker/$user/jobcache</li> * <li>$mapred.local.dir/taskTracker/$user/distcache</li> * </ul> * * @param user * @throws IOException */ public void initializeUserDirs(String user) throws IOException { if (user == null) { // This shouldn't happen in general throw new IOException("User is null. Cannot initialized user-directories."); } AtomicBoolean localizedUser; synchronized (localizedUsers) { if (!localizedUsers.containsKey(user)) { localizedUsers.put(user, new AtomicBoolean(false)); } localizedUser = localizedUsers.get(user); } synchronized (localizedUser) { if (localizedUser.get()) { // User-directories are already localized for this user. LOG.info("User-directories for the user " + user + " are already initialized on this TT. Not doing anything."); return; } LOG.info("Initializing user " + user + " on this TT."); boolean userDirStatus = false; boolean jobCacheDirStatus = false; boolean distributedCacheDirStatus = false; for (String localDir : localDirs) { Path userDir = new Path(localDir, TaskTracker.getUserDir(user)); // Set up the user-directory. if (fs.exists(userDir) || fs.mkdirs(userDir)) { // Set permissions on the user-directory FsPermission userOnly = new FsPermission((short) 0700); FileUtil.setPermission(new File(userDir.toUri().getPath()), userOnly); userDirStatus = true; // Set up the jobcache directory File jobCacheDir = new File(localDir, TaskTracker.getJobCacheSubdir(user)); if (jobCacheDir.exists() || jobCacheDir.mkdirs()) { // Set permissions on the jobcache-directory FileUtil.setPermission(jobCacheDir, userOnly); jobCacheDirStatus = true; } else { LOG.warn("Unable to create job cache directory : " + jobCacheDir); } // Set up the cache directory used for distributed cache files File distributedCacheDir = new File(localDir, TaskTracker.getPrivateDistributedCacheDir(user)); if (distributedCacheDir.exists() || distributedCacheDir.mkdirs()) { // Set permissions on the distcache-directory FileUtil.setPermission(distributedCacheDir, userOnly); distributedCacheDirStatus = true; } else { LOG.warn("Unable to create distributed-cache directory : " + distributedCacheDir); } } else { LOG.warn("Unable to create the user directory : " + userDir); } } if (!userDirStatus) { throw new IOException("Not able to initialize user directories " + "in any of the configured local directories for user " + user); } if (!jobCacheDirStatus) { throw new IOException("Not able to initialize job-cache directories " + "in any of the configured local directories for user " + user); } if (!distributedCacheDirStatus) { throw new IOException("Not able to initialize distributed-cache directories " + "in any of the configured local directories for user " + user); } // Localization of the user is done localizedUser.set(true); } }
From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java
@Test @FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES) public void shouldReadWriteDetachedEdgeAsReferenceToGryo() throws Exception { final Vertex v1 = g.addVertex(T.label, "person"); final Vertex v2 = g.addVertex(T.label, "person"); final Edge e = DetachedFactory.detach(v1.addEdge("friend", v2, "weight", 0.5d, "acl", "rw"), false); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { final GryoWriter writer = g.io().gryoWriter().create(); writer.writeEdge(os, e);/* w w w .ja v a2 s .c o m*/ final AtomicBoolean called = new AtomicBoolean(false); final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create(); try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) { reader.readEdge(bais, detachedEdge -> { assertEquals(e.id(), detachedEdge.id()); assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id()); assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id()); assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label()); assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label()); assertEquals(e.label(), detachedEdge.label()); assertEquals(e.keys().size(), StreamFactory.stream(detachedEdge.iterators().propertyIterator()).count()); called.set(true); return null; }); } assertTrue(called.get()); } }
From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java
@Test @FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES) public void shouldReadWriteEdgeToGryo() throws Exception { final Vertex v1 = g.addVertex(T.label, "person"); final Vertex v2 = g.addVertex(T.label, "person"); final Edge e = v1.addEdge("friend", v2, "weight", 0.5d, "acl", "rw"); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { final GryoWriter writer = g.io().gryoWriter().create(); writer.writeEdge(os, e);/*from w w w . ja v a 2 s .co m*/ final AtomicBoolean called = new AtomicBoolean(false); final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create(); try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) { reader.readEdge(bais, detachedEdge -> { assertEquals(e.id(), detachedEdge.id()); assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id()); assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id()); assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label()); assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label()); assertEquals(e.label(), detachedEdge.label()); assertEquals(0.5d, e.iterators().propertyIterator("weight").next().value()); assertEquals("rw", e.iterators().propertyIterator("acl").next().value()); called.set(true); return null; }); } assertTrue(called.get()); } }
From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java
@Test @FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES) public void shouldReadWriteDetachedEdgeToGryo() throws Exception { final Vertex v1 = g.addVertex(T.label, "person"); final Vertex v2 = g.addVertex(T.label, "person"); final Edge e = DetachedFactory.detach(v1.addEdge("friend", v2, "weight", 0.5d, "acl", "rw"), true); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { final GryoWriter writer = g.io().gryoWriter().create(); writer.writeEdge(os, e);/*from ww w .j av a 2 s. co m*/ final AtomicBoolean called = new AtomicBoolean(false); final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create(); try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) { reader.readEdge(bais, detachedEdge -> { assertEquals(e.id(), detachedEdge.id()); assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id()); assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id()); assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label()); assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label()); assertEquals(e.label(), detachedEdge.label()); assertEquals(0.5d, detachedEdge.iterators().propertyIterator("weight").next().value()); assertEquals("rw", detachedEdge.iterators().propertyIterator("acl").next().value()); called.set(true); return null; }); } assertTrue(called.get()); } }
From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java
@Test @FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_FLOAT_VALUES) public void shouldReadWriteEdgeToGryoUsingFloatProperty() throws Exception { final Vertex v1 = g.addVertex(T.label, "person"); final Vertex v2 = g.addVertex(T.label, "person"); final Edge e = v1.addEdge("friend", v2, "weight", 0.5f, "acl", "rw"); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { final GryoWriter writer = g.io().gryoWriter().create(); writer.writeEdge(os, e);// w ww. ja v a2 s . co m final AtomicBoolean called = new AtomicBoolean(false); final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create(); try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) { reader.readEdge(bais, detachedEdge -> { assertEquals(e.id(), detachedEdge.id()); assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id()); assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id()); assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label()); assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label()); assertEquals(e.label(), detachedEdge.label()); assertEquals(0.5f, detachedEdge.iterators().propertyIterator("weight").next().value()); assertEquals("rw", detachedEdge.iterators().propertyIterator("acl").next().value()); called.set(true); return null; }); } assertTrue(called.get()); } }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java
public static boolean reconcileLocalWorkspace(final Workspace workspace, final WebServiceLayer webServiceLayer, final boolean unscannedReconcile, final boolean reconcileMissingFromDisk, final AtomicReference<Failure[]> failures, final AtomicBoolean pendingChangesUpdatedByServer) { int previousProjectRevisionId = -1; boolean processedProjectRenames = false; while (true) { try {//w ww . jav a2 s . c o m final boolean reconciled = reconcileLocalWorkspaceHelper(workspace, webServiceLayer, unscannedReconcile, reconcileMissingFromDisk, failures, pendingChangesUpdatedByServer); pendingChangesUpdatedByServer.set(pendingChangesUpdatedByServer.get() || processedProjectRenames); return reconciled; } catch (final ReconcileBlockedByProjectRenameException renameEx) { // Did we just ACK a new project revision ID, but the server // sent us another instruction // to move to the same revision ID we just moved to? Rather than // loop forever we will throw // in this case. if (previousProjectRevisionId >= 0 && previousProjectRevisionId == renameEx.getNewProjectRevisionId()) { final Failure failure = new Failure(MessageFormat.format( Messages.getString("LocalDataAccessLayer.RepeatedProjectRevisionIdFormat"), //$NON-NLS-1$ renameEx.getNewProjectRevisionId()), null, SeverityType.ERROR, null); throw new ReconcileFailedException(new Failure[] { failure }); } final List<KeyValuePair<String, String>> projectRenames = new ArrayList<KeyValuePair<String, String>>(); for (int i = 0; i < renameEx.getOldProjectNames().length; i++) { projectRenames.add(new KeyValuePair<String, String>(renameEx.getOldProjectNames()[i], renameEx.getNewProjectNames()[i])); } ProcessProjectRenames(workspace, webServiceLayer, projectRenames, renameEx.getNewProjectRevisionId()); previousProjectRevisionId = renameEx.getNewProjectRevisionId(); processedProjectRenames = true; continue; } } /* * Unreachable code in C# */ // throw new ReconcileFailedException(new Failure[] // { // new Failure("Could not drain all renames", null, SeverityType.ERROR, // null) //$NON-NLS-1$ // }); }
From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java
@Test @FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_FLOAT_VALUES) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_NUMERIC_IDS) @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = FEATURE_USER_SUPPLIED_IDS) public void shouldReadWriteEdgeToGraphSONNonLossy() throws Exception { final Vertex v1 = g.addVertex(T.id, 1l, T.label, "person"); final Vertex v2 = g.addVertex(T.id, 2l, T.label, "person"); final Edge e = v1.addEdge("friend", v2, "weight", 0.5f, "acl", "rw"); try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) { final GraphSONWriter writer = g.io().graphSONWriter() .mapper(g.io().graphSONMapper().embedTypes(true).create()).create(); writer.writeEdge(os, e);/* ww w. j a v a 2s . c o m*/ final AtomicBoolean called = new AtomicBoolean(false); final GraphSONReader reader = g.io().graphSONReader() .mapper(g.io().graphSONMapper().embedTypes(true).create()).create(); try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) { reader.readEdge(bais, detachedEdge -> { assertEquals(e.id(), detachedEdge.id()); assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id()); assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id()); assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label()); assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label()); assertEquals(e.label(), detachedEdge.label()); assertEquals(0.5f, detachedEdge.iterators().propertyIterator("weight").next().value()); assertEquals("rw", detachedEdge.iterators().propertyIterator("acl").next().value()); called.set(true); return null; }); } assertTrue(called.get()); } }