Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.commonjava.indy.ftest.core.fixture.ThreadDumper.java

public static TestRule timeoutRule(int timeout, TimeUnit units) {
    return (base, description) -> new Statement() {
        public void evaluate() throws Throwable {
            System.out.printf("Setting up timeout: %d %s to wrap: %s\n", timeout, units, base);
            AtomicReference<Throwable> error = new AtomicReference<>();
            CountDownLatch latch = new CountDownLatch(1);
            FutureTask<Void> task = new FutureTask<>(() -> {
                try {
                    latch.countDown();//from w  ww  .ja  v a  2 s  .  c  o m
                    base.evaluate();
                } catch (Throwable t) {
                    error.set(t);
                }

                return null;
            });

            ThreadGroup tg = new ThreadGroup("Test Timeout Group");
            Thread t = new Thread(tg, task, "Test Timeout Thread");
            t.setDaemon(true);
            t.start();

            try {
                System.out.println("Waiting for test to start.");
                latch.await();
            } catch (InterruptedException e) {
                error.set(e);
            }

            if (error.get() == null) {
                try {
                    System.out.println("Waiting for test to complete (or timeout)");
                    task.get(timeout, units);
                } catch (InterruptedException e) {
                    error.set(e);
                } catch (ExecutionException e) {
                    error.set(e.getCause());
                } catch (TimeoutException e) {
                    System.out.printf("Test timeout %d %s expired!\n", timeout, units.name());
                    dumpThreads();
                    StackTraceElement[] stackTrace = t.getStackTrace();
                    Exception currThreadException = new TestTimedOutException(timeout, units);
                    if (stackTrace != null) {
                        currThreadException.setStackTrace(stackTrace);
                        t.interrupt();
                    }

                    throw currThreadException;
                }
            }

            Throwable throwable = error.get();
            if (throwable != null) {
                throw throwable;
            }
        }
    };
}

From source file:com.opinionlab.woa.WallOfAwesome.java

private static SockJSHandler makeEventStream(Vertx vertx) {
    final SockJSHandlerOptions options = new SockJSHandlerOptions().setHeartbeatInterval(2000);
    final SockJSHandler sockJSHandler = SockJSHandler.create(vertx, options);

    sockJSHandler.socketHandler(socket -> {
        final AtomicInteger openCount = new AtomicInteger();
        final AtomicBoolean running = new AtomicBoolean(true);
        LOGGER.info(format("[OPEN] Sockets: %d", openCount.incrementAndGet()));

        socket.endHandler(aVoid -> {/*  w  w  w  .  ja  va  2 s.c om*/
            running.set(false);
            LOGGER.info(format("[CLOSE] Sockets: %d", openCount.decrementAndGet()));
        });

        socket.handler(buffer -> {
            String command = buffer.toString();
            if ("purge".equals(command)) {
                EXECUTOR.execute(() -> {
                    try {
                        AwesomeImap.purge(s -> socket.write(buffer(objectToJson(
                                HashTreePMap.empty().plus("deleted", true).plus("id", s.getId()), NO_TYPES))));
                    } catch (NoSuchProviderException e) {
                        LOGGER.error("Could not purge messages", e);
                    }
                });
            } else {
                LOGGER.error(format("Unknown command: %s", command));
            }
        });

        try {
            final AtomicReference<Date> latestDate = new AtomicReference<>(new Date(0));

            Consumer<Awesome> publishAwesome = awesome -> {
                socket.write(buffer(objectToJson(awesome, NO_TYPES)));

                final Date receivedDate = awesome.getReceivedDate();
                if (latestDate.get().before(receivedDate)) {
                    latestDate.set(receivedDate);
                }
            };
            AwesomeImap.fetchAwesome().forEach(publishAwesome);

            EXECUTOR.execute(() -> {
                LOGGER.info("Polling started.");
                try {
                    while (running.get()) {
                        AwesomeImap.fetchAwesomeSince(latestDate.get()).forEach(publishAwesome);
                        Thread.sleep(1000);
                    }
                } catch (Throwable t) {
                    running.set(false);
                    socket.close();
                    LOGGER.error("Polling ended ABNORMALLY", t);
                } finally {
                    LOGGER.info("Polling ended normally.");
                }
            });
        } catch (MessagingException e) {
            LOGGER.error("Unable to fetch messages.", e);
        }
    });
    return sockJSHandler;
}

From source file:com.splout.db.common.TestUtils.java

/**
 * Returns a QNode instance if, after a maximum of X trials, we can find a port to bind it to.
 * The configuration passed by instance might have been modified accordingly.
 *///from w  w w . j ava  2s. c  om
public static QNode getTestQNode(final SploutConfiguration testConfig, final IQNodeHandler handler)
        throws Throwable {
    final AtomicReference<QNode> reference = new AtomicReference<QNode>();
    CatchAndRetry qNodeInit = new CatchAndRetry(java.net.BindException.class, 50) {

        @Override
        public void businessLogic() throws Throwable {
            QNode qNode = new QNode();
            qNode.start(testConfig, handler);
            reference.set(qNode);
        }

        @Override
        public void retryLogic() {
            testConfig.setProperty(QNodeProperties.PORT, testConfig.getInt(QNodeProperties.PORT) + 1);
        }
    };
    qNodeInit.catchAndRetry();
    return reference.get();
}

From source file:com.igormaznitsa.ideamindmap.utils.IdeaUtils.java

@Nullable
public static VirtualFile findKnowledgeFolderForModule(@Nullable final Module module,
        final boolean createIfMissing) {
    final VirtualFile rootFolder = IdeaUtils.findPotentialRootFolderForModule(module);
    final AtomicReference<VirtualFile> result = new AtomicReference<VirtualFile>();
    if (rootFolder != null) {
        result.set(rootFolder.findChild(PROJECT_KNOWLEDGE_FOLDER_NAME));
        if (result.get() == null || !result.get().isDirectory()) {
            if (createIfMissing) {
                CommandProcessor.getInstance().executeCommand(module.getProject(), new Runnable() {
                    @Override// w w  w  .j a va 2 s.  c om
                    public void run() {
                        ApplicationManager.getApplication().runWriteAction(new Runnable() {
                            @Override
                            public void run() {
                                try {
                                    result.set(VfsUtil.createDirectoryIfMissing(rootFolder,
                                            PROJECT_KNOWLEDGE_FOLDER_NAME));
                                    LOGGER.info("Created knowledge folder for " + module);
                                } catch (IOException ex) {
                                    LOGGER.error("Can't create knowledge folder for " + module, ex);
                                }
                            }
                        });
                    }
                }, null, null);
            } else {
                result.set(null);
            }
        }
    }
    return result.get();
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.localworkspace.BaselineFolder.java

/**
 * Given the root baseline folder and a baseline file GUID, calculates the
 * path that this baseline file GUID would have in that root folder, without
 * the extension (.rw or .gz).//from w w  w  .j  a va 2s  .  co m
 *
 * Example values for baselineFolderRootPath: "D:\workspace\$tf" -- from the
 * instance method GetPathFromGuid immediately above
 * "C:\ProgramData\TFS\Offline\<guid>\<workspace>"
 *
 * @param baselineFolderRootPath
 *        Root folder of the baseline folder structure
 * @param baselineFileGuid
 *        Baseline file GUID whose path should be computed
 * @param String
 *        [out] A value equal to Path.GetDirectoryName(retval)
 * @return
 */
public static String getPathFromGUID(final String baselineFolderRootPath, final byte[] baselineFileGuid,
        final AtomicReference<String> individualBaselineFolder) {
    checkForValidBaselineFileGUID(baselineFileGuid);

    // i.e. @"D:\workspace\$tf\1"
    individualBaselineFolder.set(baselineFolderRootPath + File.separator
            + PARTITIONING_FOLDERS[((char) baselineFileGuid[0]) % PARTITIONING_FOLDER_COUNT]);

    // i.e. @"D:\workspace\$tf\1\408bed21-9023-47c3-8280-b1ec3ffacd94"
    return individualBaselineFolder.get() + File.separator + new GUID(baselineFileGuid).getGUIDString();
}

From source file:kn.uni.gis.foxhunt.context.GameContext.java

/**
 * creates a new fox game//from   w  w  w  .  j  a  va 2s.  c om
 * 
 * @param id
 * @param playerName
 */
public static void newFoxGame(String playerName) throws GameException {

    final AtomicReference<String> ref = new AtomicReference<String>();
    final AtomicReference<Exception> exc = new AtomicReference<Exception>();
    HttpContext.getInstance().put(SettingsContext.getInstance().getServerUrl(), null,
            new EntityHandlerAdapter() {
                @Override
                public void handleEntity(HttpEntity entity, int statusCode) {
                    if (statusCode == HttpStatus.SC_OK) {
                        try {
                            ref.set(EntityUtils.toString(entity));

                        } catch (ParseException e) {
                            exc.set(e);
                        } catch (IOException e) {
                            exc.set(e);
                        }
                    } else {
                        exc.set(new GameException("bad status code: " + statusCode));
                    }
                }

                @Override
                public void handleException(Exception exception) {
                    exc.set(exception);
                }
            });

    if (ref.get() == null) {
        throw exc.get() != null ? new GameException(exc.get())
                : new GameException("unrecognized error code from server");
    }
    currentGame = new Game(ref.get(), playerName, Util.createFoxUrl(ref.get()), Util.createGameUrl(ref.get()));
}

From source file:org.elasticsearch.xpack.test.rest.XPackRestTestHelper.java

/**
 * Waits for the Machine Learning templates to be created
 * and check the version is up to date/* w ww .j a  v a2 s .c  o  m*/
 */
public static void waitForMlTemplates(RestClient client) throws InterruptedException {
    AtomicReference<Version> masterNodeVersion = new AtomicReference<>();
    ESTestCase.awaitBusy(() -> {
        String response;
        try {
            response = EntityUtils.toString(client
                    .performRequest("GET", "/_cat/nodes", singletonMap("h", "master,version")).getEntity());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        for (String line : response.split("\n")) {
            if (line.startsWith("*")) {
                masterNodeVersion.set(Version.fromString(line.substring(2).trim()));
                return true;
            }
        }
        return false;
    });

    final List<String> templateNames = Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME,
            AnomalyDetectorsIndex.jobStateIndexName(), AnomalyDetectorsIndex.jobResultsIndexPrefix());
    for (String template : templateNames) {
        ESTestCase.awaitBusy(() -> {
            Map<?, ?> response;
            try {
                String string = EntityUtils
                        .toString(client.performRequest("GET", "/_template/" + template).getEntity());
                response = XContentHelper.convertToMap(JsonXContent.jsonXContent, string, false);
            } catch (ResponseException e) {
                if (e.getResponse().getStatusLine().getStatusCode() == 404) {
                    return false;
                }
                throw new RuntimeException(e);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
            Map<?, ?> templateDefinition = (Map<?, ?>) response.get(template);
            return Version.fromId((Integer) templateDefinition.get("version")).equals(masterNodeVersion.get());
        });
    }
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.workspacecache.internal.InternalCacheLoader.java

/**
 * Load the config data. This can be called more than once. When it is
 * called a second time, it means to discard all current config data and
 * reload./*from  www.ja v a  2s . c  o  m*/
 *
 * @param currentCache
 *        the current cache or <code>null</code>
 * @param dataDirectoryExists
 *        true if the data directory exists, false if it does not exist
 * @param conflictingWorkspaces
 *        list of workspaces that were removed from the cache due to
 *        conflicts (must not be <code>null</code>)
 * @param workstationMutex
 *        the synchronization object for the new instance to use (see
 *        {@link #InternalCache(Object)}) (must not be <code>null</code>)
 * @param file
 *        the file to read data from (must not be <code>null</code>)
 * @return the {@link Workstation}'s mutex object (must not be
 *         <code>null</code>), which is passed to the newly created
 *         {@link InternalCache}
 */
public static InternalCache loadConfig(InternalCache currentCache, final boolean dataDirectoryExists,
        final AtomicReference<InternalWorkspaceConflictInfo[]> conflictingWorkspaces,
        final Object workstationMutex, final File file) {
    Check.notNull(conflictingWorkspaces, "conflictingWorkspaces"); //$NON-NLS-1$
    Check.notNull(workstationMutex, "workstationMutex"); //$NON-NLS-1$
    Check.notNull(file, "file"); //$NON-NLS-1$

    conflictingWorkspaces.set(InternalWorkspaceConflictInfo.EMPTY_ARRAY);

    // If the cache directory is inaccessible, we're done.
    if (!dataDirectoryExists) {
        // There is no cache file, so create an empty cache.
        return new InternalCache(workstationMutex);
    }

    final TFSFileLock lock = acquireLockOrThrow(file);

    Document config = null;
    try {
        config = readCacheAsDocument(file);

        if (config == null) {
            // There is no cache file, so create an empty cache.
            currentCache = new InternalCache(workstationMutex);
        } else {
            if (currentCache == null) {
                // No cache has been loaded, so load what's on disk.
                currentCache = new InternalCache(workstationMutex);
                currentCache.load(DOMUtils.getFirstChildElement(config.getDocumentElement(), XML_SERVERS));
            } else {
                // We have already loaded a cache, so we need to merge
                // with what's on disk.
                currentCache.merge(DOMUtils.getFirstChildElement(config.getDocumentElement(), XML_SERVERS),
                        conflictingWorkspaces);
            }
        }
    } catch (final XMLException e) {
        log.warn(MessageFormat.format(Messages.getString("InternalCacheLoader.InvalidCacheFileFormat"), //$NON-NLS-1$
                file), e);

        throw new VersionControlException(
                MessageFormat.format(Messages.getString("InternalCacheLoader.InvalidCacheFileFormat"), file), //$NON-NLS-1$
                e);
    } finally {
        lock.release();
        lock.close();
    }

    return currentCache;
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.workspacecache.internal.InternalCacheLoader.java

/**
 * Save the local configuration data if it is dirty and the
 * {@link Workstation}'s cache is enabled.
 *
 * @param internalCache//from   w ww .j  a  va2  s  . co  m
 *        the cache to save (must not be <code>null</code>)
 * @param conflictingWorkspaces
 *        list of workspaces that were removed from the cache due to
 *        conflicts (must not be <code>null</code>)
 * @param workstationMutex
 *        the {@link Workstation}'s mutex object (must not be
 *        <code>null</code>), which is held during the save
 * @param file
 *        the file to save to (must not be <code>null</code>)
 */
public static void saveConfigIfDirty(final InternalCache internalCache,
        final AtomicReference<InternalWorkspaceConflictInfo[]> conflictingWorkspaces,
        final Object workstationMutex, final File file) {
    Check.notNull(conflictingWorkspaces, "conflictingWorkspaces"); //$NON-NLS-1$
    Check.notNull(workstationMutex, "workstationMutex"); //$NON-NLS-1$
    Check.notNull(file, "file"); //$NON-NLS-1$

    conflictingWorkspaces.set(InternalWorkspaceConflictInfo.EMPTY_ARRAY);

    // If the cached data hasn't changed in RAM or the cache directory is
    // inaccessible, we're done.
    if (!internalCache.isDirty()) {
        return;
    }

    // Create the document and the root node.
    final Document config = DOMCreateUtils.newDocument(XML_VERSION_CONTROL_SERVER);
    final Element rootNode = config.getDocumentElement();

    /*
     * We need to lock the cache file, if it exists, during the entire save
     * process. To prevent a deadlock where one thread grabs the file lock
     * and the other grabs the workstation lock, grab the workstation lock
     * first and hold it until the cache is marked clean (MarkClean()).
     */

    synchronized (workstationMutex) {
        final TFSFileLock lock = acquireLockOrThrow(file);

        try {
            // Read in the existing cache file, if any.
            final Document oldConfig = readCacheAsDocument(file);

            Element oldCacheNode = null;
            if (oldConfig != null) {
                oldCacheNode = DOMUtils.getFirstChildElement(oldConfig.getDocumentElement(), XML_SERVERS);
            }

            // Save the cached workspace data.
            final Element cacheNode = DOMUtils.appendChild(rootNode, XML_SERVERS);

            internalCache.save(oldCacheNode, cacheNode, conflictingWorkspaces);

            // Save the file.

            // Ensure the directories exist
            if (!file.exists() && file.getParentFile().exists() == false) {
                file.getParentFile().mkdirs();
            }

            OutputStream stream;
            try {
                stream = new FileOutputStream(file);

                DOMSerializeUtils.serializeToStream(config, stream, DOMSerializeUtils.ENCODING_UTF8,
                        DOMSerializeUtils.INDENT);
            } catch (final FileNotFoundException e) {
                // from FileOutputStream

                // We tried to create the directories above, so this may be
                // a permissions problem. Ignore the error and mark the
                // cache clean (below) during a normal exit.
            }
        } finally {
            lock.release();
            lock.close();
        }

        /*
         * The file has been saved and is no longer dirty. NOTE: This must
         * be inside the (synchronized) lock to prevent the race condition
         * where the cache gets marked clean right after another thread
         * modifies it (lock must be taken here and where the cache is
         * modified).
         */
        internalCache.markClean();
    }
}

From source file:org.debux.webmotion.netbeans.javacc.lexer.impl.LexerUtils.java

public static TokenSequence<? extends TokenId> getMostEmbeddedTokenSequence(final Document doc,
        final int offset, boolean runUnderLock) {
    final AtomicReference<TokenSequence<? extends TokenId>> ref = new AtomicReference<TokenSequence<? extends TokenId>>();
    Runnable r = new Runnable() {
        @Override//  w w  w.  jav a2s. c  o m
        public void run() {
            TokenHierarchy th = TokenHierarchy.get(doc);
            List<TokenSequence<? extends TokenId>> sequences = th.embeddedTokenSequences(offset, false);
            if (sequences.isEmpty()) {
                //no embedding, return top level sequence;
                ref.set(th.tokenSequence());
            } else {
                ref.set(sequences.get(sequences.size() - 1)); //return the most embedded one
            }
        }
    };

    if (runUnderLock) {
        doc.render(r);
    } else {
        r.run();
    }

    return ref.get();
}