List of usage examples for java.util.concurrent.atomic AtomicBoolean set
public final void set(boolean newValue)
From source file:org.apache.solr.handler.dataimport.processor.XPathEntityProcessor.java
private Iterator<Map<String, Object>> getRowIterator(final Reader data, final String s) { //nothing atomic about it. I just needed a StongReference final AtomicReference<Exception> exp = new AtomicReference<Exception>(); final BlockingQueue<Map<String, Object>> blockingQueue = new ArrayBlockingQueue<Map<String, Object>>( blockingQueueSize);/* w ww.ja v a 2s .co m*/ final AtomicBoolean isEnd = new AtomicBoolean(false); final AtomicBoolean throwExp = new AtomicBoolean(true); publisherThread = new Thread() { @Override public void run() { try { xpathReader.streamRecords(data, new XPathRecordReader.Handler() { @Override @SuppressWarnings("unchecked") public void handle(Map<String, Object> record, String xpath) { if (isEnd.get()) { throwExp.set(false); //To end the streaming . otherwise the parsing will go on forever //though consumer has gone away throw new RuntimeException("BREAK"); } Map<String, Object> row; try { row = readRow(record, xpath); } catch (final Exception e) { isEnd.set(true); return; } offer(row); } }); } catch (final Exception e) { if (throwExp.get()) exp.set(e); } finally { closeIt(data); if (!isEnd.get()) { offer(END_MARKER); } } } private void offer(Map<String, Object> row) { try { while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) { if (isEnd.get()) return; LOG.debug("Timeout elapsed writing records. Perhaps buffer size should be increased."); } } catch (final InterruptedException e) { return; } finally { synchronized (this) { notifyAll(); } } } }; publisherThread.start(); return new Iterator<Map<String, Object>>() { private Map<String, Object> lastRow; int count = 0; @Override public boolean hasNext() { return !isEnd.get(); } @Override public Map<String, Object> next() { Map<String, Object> row; do { try { row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits); if (row == null) { LOG.debug("Timeout elapsed reading records."); } } catch (final InterruptedException e) { LOG.debug("Caught InterruptedException while waiting for row. Aborting."); isEnd.set(true); return null; } } while (row == null); if (row == END_MARKER) { isEnd.set(true); if (exp.get() != null) { String msg = "Parsing failed for xml, url:" + s + " rows processed in this xml:" + count; if (lastRow != null) msg += " last row in this xml:" + lastRow; if (ABORT.equals(onError)) { wrapAndThrow(SEVERE, exp.get(), msg); } else if (SKIP.equals(onError)) { wrapAndThrow(DataImportHandlerException.SKIP, exp.get()); } else { LOG.warn(msg, exp.get()); } } return null; } count++; return lastRow = row; } @Override public void remove() { /*no op*/ } }; }
From source file:com.facebook.AccessTokenManager.java
private void refreshCurrentAccessTokenImpl(final AccessToken.AccessTokenRefreshCallback callback) { final AccessToken accessToken = currentAccessToken; if (accessToken == null) { if (callback != null) { callback.OnTokenRefreshFailed(new FacebookException("No current access token to refresh")); }//from w w w .jav a 2 s . c om return; } if (!tokenRefreshInProgress.compareAndSet(false, true)) { if (callback != null) { callback.OnTokenRefreshFailed(new FacebookException("Refresh already in progress")); } return; } lastAttemptedTokenExtendDate = new Date(); final Set<String> permissions = new HashSet<>(); final Set<String> declinedPermissions = new HashSet<>(); final AtomicBoolean permissionsCallSucceeded = new AtomicBoolean(false); final RefreshResult refreshResult = new RefreshResult(); GraphRequestBatch batch = new GraphRequestBatch( createGrantedPermissionsRequest(accessToken, new GraphRequest.Callback() { @Override public void onCompleted(GraphResponse response) { JSONObject result = response.getJSONObject(); if (result == null) { return; } JSONArray permissionsArray = result.optJSONArray("data"); if (permissionsArray == null) { return; } permissionsCallSucceeded.set(true); for (int i = 0; i < permissionsArray.length(); i++) { JSONObject permissionEntry = permissionsArray.optJSONObject(i); if (permissionEntry == null) { continue; } String permission = permissionEntry.optString("permission"); String status = permissionEntry.optString("status"); if (!Utility.isNullOrEmpty(permission) && !Utility.isNullOrEmpty(status)) { status = status.toLowerCase(Locale.US); if (status.equals("granted")) { permissions.add(permission); } else if (status.equals("declined")) { declinedPermissions.add(permission); } else { Log.w(TAG, "Unexpected status: " + status); } } } } }), createExtendAccessTokenRequest(accessToken, new GraphRequest.Callback() { @Override public void onCompleted(GraphResponse response) { JSONObject data = response.getJSONObject(); if (data == null) { return; } refreshResult.accessToken = data.optString("access_token"); refreshResult.expiresAt = data.optInt("expires_at"); } })); batch.addCallback(new GraphRequestBatch.Callback() { @Override public void onBatchCompleted(GraphRequestBatch batch) { AccessToken newAccessToken = null; try { if (getInstance().getCurrentAccessToken() == null || getInstance().getCurrentAccessToken().getUserId() != accessToken.getUserId()) { if (callback != null) { callback.OnTokenRefreshFailed( new FacebookException("No current access token to refresh")); } return; } if (permissionsCallSucceeded.get() == false && refreshResult.accessToken == null && refreshResult.expiresAt == 0) { if (callback != null) { callback.OnTokenRefreshFailed(new FacebookException("Failed to refresh access token")); } return; } newAccessToken = new AccessToken( refreshResult.accessToken != null ? refreshResult.accessToken : accessToken.getToken(), accessToken.getApplicationId(), accessToken.getUserId(), permissionsCallSucceeded.get() ? permissions : accessToken.getPermissions(), permissionsCallSucceeded.get() ? declinedPermissions : accessToken.getDeclinedPermissions(), accessToken.getSource(), refreshResult.expiresAt != 0 ? new Date(refreshResult.expiresAt * 1000l) : accessToken.getExpires(), new Date()); getInstance().setCurrentAccessToken(newAccessToken); } finally { tokenRefreshInProgress.set(false); if (callback != null && newAccessToken != null) { callback.OnTokenRefreshed(newAccessToken); } } } }); batch.executeAsync(); }
From source file:org.opennms.newts.gsod.ImportRunner.java
public void execute(String... args) throws Exception { CmdLineParser parser = new CmdLineParser(this); try {//from www. j av a2s . c o m parser.parseArgument(args); } catch (CmdLineException e) { // handling of wrong arguments System.err.println(e.getMessage()); parser.printUsage(System.err); return; } // Setup the slf4j metrics reporter MetricRegistry metrics = new MetricRegistry(); final long start = System.currentTimeMillis(); metrics.register("elapsed-seconds", new Gauge<Double>() { @Override public Double getValue() { return (System.currentTimeMillis() - start) / 1000.0; } }); final ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).outputTo(System.err) .convertRatesTo(SECONDS).convertDurationsTo(MILLISECONDS).build(); reporter.start(10, SECONDS); if (m_restUrl == null) { // we are using a direct importer so use a NewtsReporter for storing metrics NewtsReporter newtsReporter = NewtsReporter.forRegistry(metrics).name("importer") .convertRatesTo(SECONDS).convertDurationsTo(MILLISECONDS).build(repository()); newtsReporter.start(1, SECONDS); } LOG.debug("Scanning {} for GSOD data files...", m_source); // walk the files in the directory given Observable<Sample> samples = fileTreeWalker(m_source.toPath()).subscribeOn(Schedulers.io()) // set up a meter for each file processed .map(meter(metrics.meter("files"), Path.class)) // report file .map(reportFile()) // read all the files and convert them into lines .mergeMap(lines()) // excluding the header lines .filter(exclude("YEARMODA")) // turn each line into a list of samples .mergeMap(samples()) // adjust time on samples according to arguments .map(adjustTime()) // meter the samples .map(meter(metrics.meter("samples"), Sample.class)); Observable<List<Sample>> batches = samples // create batches each second or of size m_samplesPerBatch whichever comes first .buffer(m_samplesPerBatch); Observable<Boolean> doImport = m_restUrl != null ? restPoster(batches, metrics) : directPoster(batches, metrics); System.err.println("doImport = " + doImport); // GO!!! final AtomicReference<Subscription> subscription = new AtomicReference<>(); final AtomicBoolean failed = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(1); Subscription s = doImport.subscribe(new Observer<Boolean>() { @Override public void onCompleted() { System.err.println("Finished Importing Everything!"); reporter.report(); latch.countDown(); System.exit(0); } @Override public void onError(Throwable e) { failed.set(true); System.err.println("Error importing!"); e.printStackTrace(); try { //latch.await(); Subscription s = subscription.get(); if (s != null) s.unsubscribe(); } catch (Exception ex) { System.err.println("Failed to close httpClient!"); ex.printStackTrace(); } finally { //dumpThreads(); } } @Override public void onNext(Boolean t) { System.err.println("Received a boolen: " + t); } }); subscription.set(s); if (failed.get()) { s.unsubscribe(); } //latch.countDown(); System.err.println("Return from Subscribe!"); latch.await(); //dumpThreads(); }
From source file:org.apache.any23.extractor.microdata.MicrodataParserTest.java
@Test public void testGetDateConcurrent() throws Exception { final Date expectedDate = new GregorianCalendar(2009, Calendar.MAY, 10).getTime(); // 2009-05-10 final byte[] content = IOUtils .toByteArray(getClass().getResourceAsStream("/microdata/microdata-basic.html")); final int threadCount = 10; final int attemptCount = 100; final List<Thread> threads = new ArrayList<Thread>(); final CountDownLatch beforeLatch = new CountDownLatch(1); final CountDownLatch afterLatch = new CountDownLatch(threadCount); final AtomicBoolean foundFailure = new AtomicBoolean(false); for (int i = 0; i < threadCount; i++) { threads.add(new Thread("Test-thread-" + i) { @Override/* www. j ava2 s. c om*/ public void run() { try { beforeLatch.await(); int counter = 0; while (counter++ < attemptCount && !foundFailure.get()) { final Document document = getDom(content); final MicrodataParserReport report = MicrodataParser.getMicrodata(document); final ItemScope target = report.getDetectedItemScopes()[4]; Date actualDate = target.getProperties().get("birthday").get(0).getValue().getAsDate(); if (!expectedDate.equals(actualDate)) { foundFailure.set(true); } } } catch (Exception ex) { ex.printStackTrace(); foundFailure.set(true); } finally { afterLatch.countDown(); } } }); } for (Thread thread : threads) { thread.start(); } // Let threads start computation beforeLatch.countDown(); // Wait for all threads to complete afterLatch.await(); assertFalse(foundFailure.get()); }
From source file:com.google.dart.engine.services.internal.refactoring.ExtractMethodRefactoringImpl.java
/** * @return <code>true</code> if the given {@link VariableElement} is referenced after the * {@link #selectionRange}.//from www.j av a 2s. co m */ private boolean isUsedAfterSelection(final VariableElement element) { final AtomicBoolean result = new AtomicBoolean(); parentMember.accept(new GeneralizingASTVisitor<Void>() { @Override public Void visitSimpleIdentifier(SimpleIdentifier node) { VariableElement nodeElement = CorrectionUtils.getLocalVariableElement(node); if (nodeElement == element) { int nodeOffset = node.getOffset(); if (nodeOffset > selectionRange.getEnd()) { result.set(true); } } return null; } }); return result.get(); }
From source file:org.alfresco.repo.activities.feed.FeedNotifierImpl.java
public void execute(int repeatIntervalMins) { checkProperties();//from w w w .ja va 2 s .c om // Bypass if the system is in read-only mode if (transactionService.isReadOnly()) { if (logger.isDebugEnabled()) { logger.debug("Activities email notification bypassed; the system is read-only"); } return; } String lockToken = null; // Use a flag to keep track of the running job final AtomicBoolean running = new AtomicBoolean(true); try { lockToken = jobLockService.getLock(LOCK_QNAME, LOCK_TTL); if (lockToken == null) { logger.info("Can't get lock. Assume multiple feed notifiers..."); return; } if (logger.isTraceEnabled()) { logger.trace("Activities email notification started"); } jobLockService.refreshLock(lockToken, LOCK_QNAME, LOCK_TTL, new JobLockRefreshCallback() { @Override public boolean isActive() { return running.get(); } @Override public void lockReleased() { running.set(false); } }); executeInternal(repeatIntervalMins); // Done if (logger.isTraceEnabled()) { logger.trace("Activities email notification completed"); } } catch (LockAcquisitionException e) { // Job being done by another process if (logger.isDebugEnabled()) { logger.debug("Activities email notification already underway"); } } catch (VmShutdownException e) { // Aborted if (logger.isDebugEnabled()) { logger.debug("Activities email notification aborted"); } } finally { // The lock will self-release if answer isActive in the negative running.set(false); if (lockToken != null) { jobLockService.releaseLock(lockToken, LOCK_QNAME); } } }
From source file:org.apache.camel.component.bean.MethodInfo.java
public MethodInvocation createMethodInvocation(final Object pojo, final Exchange exchange) { final Object[] arguments = parametersExpression.evaluate(exchange, Object[].class); return new MethodInvocation() { public Method getMethod() { return method; }/*from w w w . j a v a 2s.c o m*/ public Object[] getArguments() { return arguments; } public Object proceed(AsyncCallback callback, AtomicBoolean doneSync) throws Exception { // dynamic router should be invoked beforehand if (dynamicRouter != null) { if (!dynamicRouter.isStarted()) { ServiceHelper.startService(dynamicRouter); } // use a expression which invokes the method to be used by dynamic router Expression expression = new DynamicRouterExpression(pojo); boolean sync = dynamicRouter.doRoutingSlip(exchange, expression, callback); // must remember the done sync returned from the dynamic router doneSync.set(sync); return Void.TYPE; } // invoke pojo if (LOG.isTraceEnabled()) { LOG.trace(">>>> invoking: " + method + " on bean: " + pojo + " with arguments: " + asString(arguments) + " for exchange: " + exchange); } Object result = invoke(method, pojo, arguments, exchange); if (recipientList != null) { // ensure its started if (!recipientList.isStarted()) { ServiceHelper.startService(recipientList); } boolean sync = recipientList.sendToRecipientList(exchange, result, callback); // must remember the done sync returned from the recipient list doneSync.set(sync); // we don't want to return the list of endpoints // return Void to indicate to BeanProcessor that there is no reply return Void.TYPE; } if (routingSlip != null) { if (!routingSlip.isStarted()) { ServiceHelper.startService(routingSlip); } boolean sync = routingSlip.doRoutingSlip(exchange, result, callback); // must remember the done sync returned from the routing slip doneSync.set(sync); return Void.TYPE; } return result; } public Object getThis() { return pojo; } public AccessibleObject getStaticPart() { return method; } }; }
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutor.java
private ScriptEngines createScriptEngines() { // plugins already on the path - ones static to the classpath final List<GremlinPlugin> globalPlugins = new ArrayList<>(); ServiceLoader.load(GremlinPlugin.class).forEach(globalPlugins::add); return new ScriptEngines(se -> { // this first part initializes the scriptengines Map for (Map.Entry<String, EngineSettings> config : settings.entrySet()) { final String language = config.getKey(); se.reload(language, new HashSet<>(config.getValue().getImports()), new HashSet<>(config.getValue().getStaticImports()), config.getValue().getConfig()); }/*from ww w . j a v a 2s. c o m*/ // use grabs dependencies and returns plugins to load final List<GremlinPlugin> pluginsToLoad = new ArrayList<>(globalPlugins); use.forEach(u -> { if (u.size() != 3) logger.warn( "Could not resolve dependencies for [{}]. Each entry for the 'use' configuration must include [groupId, artifactId, version]", u); else { logger.info("Getting dependencies for [{}]", u); pluginsToLoad.addAll(se.use(u.get(0), u.get(1), u.get(2))); } }); // now that all dependencies are in place, the imports can't get messed up if a plugin tries to execute // a script (as the script engine appends the import list to the top of all scripts passed to the engine). // only enable those plugins that are configured to be enabled. se.loadPlugins(pluginsToLoad.stream().filter(plugin -> enabledPlugins.contains(plugin.getName())) .collect(Collectors.toList())); // initialization script eval can now be performed now that dependencies are present with "use" for (Map.Entry<String, EngineSettings> config : settings.entrySet()) { final String language = config.getKey(); // script engine initialization files that fail will only log warnings - not fail server initialization final AtomicBoolean hasErrors = new AtomicBoolean(false); config.getValue().getScripts().stream().map(File::new).filter(f -> { if (!f.exists()) { logger.warn("Could not initialize {} ScriptEngine with {} as file does not exist", language, f); hasErrors.set(true); } return f.exists(); }).map(f -> { try { return Pair.with(f, Optional.of(new FileReader(f))); } catch (IOException ioe) { logger.warn("Could not initialize {} ScriptEngine with {} as file could not be read - {}", language, f, ioe.getMessage()); hasErrors.set(true); return Pair.with(f, Optional.<FileReader>empty()); } }).filter(p -> p.getValue1().isPresent()).map(p -> Pair.with(p.getValue0(), p.getValue1().get())) .forEachOrdered(p -> { try { final Bindings bindings = new SimpleBindings(); bindings.putAll(globalBindings); // evaluate init scripts with hard reference so as to ensure it doesn't get garbage collected bindings.put(GremlinGroovyScriptEngine.KEY_REFERENCE_TYPE, GremlinGroovyScriptEngine.REFERENCE_TYPE_HARD); // the returned object should be a Map of initialized global bindings final Object initializedBindings = se.eval(p.getValue1(), bindings, language); if (initializedBindings != null && initializedBindings instanceof Map) globalBindings.putAll((Map) initializedBindings); else logger.warn( "Initialization script {} did not return a Map - no global bindings specified", p.getValue0()); logger.info("Initialized {} ScriptEngine with {}", language, p.getValue0()); } catch (ScriptException sx) { hasErrors.set(true); logger.warn( "Could not initialize {} ScriptEngine with {} as script could not be evaluated - {}", language, p.getValue0(), sx.getMessage()); } }); } }); }
From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java
public void testConcurrentCreateNewLog(boolean entryLogFilePreAllocationEnabled) throws Exception { ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); // Creating a new configuration with a number of // ledger directories. conf.setLedgerDirNames(ledgerDirs);/*ww w .j a v a2 s.c o m*/ conf.setEntryLogFilePreAllocationEnabled(entryLogFilePreAllocationEnabled); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); EntryLogger el = new EntryLogger(conf, ledgerDirsManager); EntryLogManagerBase entryLogManager = (EntryLogManagerBase) el.getEntryLogManager(); // set same thread executor for entryLoggerAllocator's allocatorExecutor setSameThreadExecutorForEntryLoggerAllocator(el.getEntryLoggerAllocator()); Assert.assertEquals("previousAllocatedEntryLogId after initialization", -1, el.getPreviousAllocatedEntryLogId()); Assert.assertEquals("leastUnflushedLogId after initialization", 0, el.getLeastUnflushedLogId()); int createNewLogNumOfTimes = 10; AtomicBoolean receivedException = new AtomicBoolean(false); IntStream.range(0, createNewLogNumOfTimes).parallel().forEach((i) -> { try { (entryLogManager).createNewLog((long) i); } catch (IOException e) { LOG.error("Received exception while creating newLog", e); receivedException.set(true); } }); Assert.assertFalse("There shouldn't be any exceptions while creating newlog", receivedException.get()); int expectedPreviousAllocatedEntryLogId = createNewLogNumOfTimes - 1; if (entryLogFilePreAllocationEnabled) { expectedPreviousAllocatedEntryLogId = createNewLogNumOfTimes; } Assert.assertEquals( "previousAllocatedEntryLogId after " + createNewLogNumOfTimes + " number of times createNewLog is called", expectedPreviousAllocatedEntryLogId, el.getPreviousAllocatedEntryLogId()); Assert.assertEquals("Number of RotatedLogChannels", createNewLogNumOfTimes - 1, entryLogManager.getRotatedLogChannels().size()); }
From source file:com.netflix.curator.framework.recipes.atomic.TestDistributedAtomicLong.java
@Test public void testCompareAndSet() throws Exception { final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start();//w w w . ja v a 2s. c o m try { final AtomicBoolean doIncrement = new AtomicBoolean(false); DistributedAtomicLong dal = new DistributedAtomicLong(client, "/counter", new RetryOneTime(1)) { @Override public byte[] valueToBytes(Long newValue) { if (doIncrement.get()) { DistributedAtomicLong inc = new DistributedAtomicLong(client, "/counter", new RetryOneTime(1)); try { // this will force a bad version exception inc.increment(); } catch (Exception e) { throw new Error(e); } } return super.valueToBytes(newValue); } }; dal.forceSet(1L); Assert.assertTrue(dal.compareAndSet(1L, 5L).succeeded()); Assert.assertFalse(dal.compareAndSet(1L, 5L).succeeded()); doIncrement.set(true); Assert.assertFalse(dal.compareAndSet(5L, 10L).succeeded()); } finally { client.close(); } }