List of usage examples for java.util.concurrent.atomic AtomicBoolean set
public final void set(boolean newValue)
From source file:org.piraso.server.service.ResponseLoggerServiceImplTest.java
@Test public void testLogging() throws IOException, TransformerConfigurationException, ParserConfigurationException, ExecutionException, InterruptedException, SAXException { final AtomicBoolean fail = new AtomicBoolean(false); ExecutorService executor = Executors.newFixedThreadPool(2); final List<MessageEntry> expectedEntries = new ArrayList<MessageEntry>() { {//w w w .j a va 2 s .c om for (int i = 0; i < 1000; i++) { add(new MessageEntry(1l, "test_" + (i + 1))); } } }; // stop the service when number of entries is reached. stopOnWriteTimes(expectedEntries.size()); Runnable startServiceRunnable = new Runnable() { public void run() { try { service.start(); } catch (Exception e) { fail.set(true); e.printStackTrace(); } } }; Runnable logMessagesRunnable = new Runnable() { public void run() { try { // this entry should be ignored since this will throw an exception service.log(new ExceptionThrowEntry(1l)); // these entries should succeed for (MessageEntry entry : expectedEntries) { service.log(entry); } } catch (IOException e) { fail.set(true); e.printStackTrace(); } } }; Future future = executor.submit(startServiceRunnable); executor.submit(logMessagesRunnable); future.get(); executor.shutdown(); if (fail.get()) { fail("failure see exception trace."); } final List<Entry> entriesRead = new ArrayList<Entry>(); PirasoEntryReader reader = new PirasoEntryReader( new ByteArrayInputStream(response.getContentAsByteArray())); reader.addListener(new EntryReadAdapter() { @Override public void readEntry(EntryReadEvent evt) { entriesRead.add(evt.getEntry()); } }); // start reading reader.start(); assertEquals(service.getId(), reader.getId()); assertEquals(expectedEntries.size(), entriesRead.size()); }
From source file:com.google.dart.java2dart.SyntaxTranslator.java
private static boolean hasConstructorInvocation(org.eclipse.jdt.core.dom.ASTNode node) { final AtomicBoolean result = new AtomicBoolean(); node.accept(new ASTVisitor() { @Override//from w w w.ja v a2 s . c o m public boolean visit(ConstructorInvocation node) { result.set(true); return false; } }); return result.get(); }
From source file:ch.cyberduck.core.b2.B2LargeUploadServiceTest.java
@Test public void testAppendSecondPart() throws Exception { final B2Session session = new B2Session(new Host(new B2Protocol(), new B2Protocol().getDefaultHostname(), new Credentials(System.getProperties().getProperty("b2.user"), System.getProperties().getProperty("b2.key")))); session.open(new DisabledHostKeyCallback()); session.login(new DisabledPasswordStore(), new DisabledLoginCallback(), new DisabledCancelCallback()); final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final int length = 102 * 1024 * 1024; final byte[] content = RandomUtils.nextBytes(length); IOUtils.write(content, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setLength(content.length);/* w ww.ja v a 2 s . c o m*/ final AtomicBoolean interrupt = new AtomicBoolean(); try { new B2LargeUploadService(session, new B2WriteFeature(session), 100L * 1024L * 1024L, 1).upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener() { long count; @Override public void sent(final long bytes) { count += bytes; if (count >= 101L * 1024L * 1024L) { throw new RuntimeException(); } } }, status, new DisabledLoginCallback()); } catch (BackgroundException e) { // Expected interrupt.set(true); } assertTrue(interrupt.get()); assertEquals(100L * 1024L * 1024L, status.getOffset(), 0L); assertFalse(status.isComplete()); final TransferStatus append = new TransferStatus().append(true).length(2L * 1024L * 1024L) .skip(100L * 1024L * 1024L); new B2LargeUploadService(session, new B2WriteFeature(session), 100L * 1024L * 1024L, 1).upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), append, new DisabledLoginCallback()); assertEquals(102L * 1024L * 1024L, append.getOffset(), 0L); assertTrue(append.isComplete()); assertTrue(new B2FindFeature(session).find(test)); assertEquals(102L * 1024L * 1024L, new B2AttributesFinderFeature(session).find(test).getSize(), 0L); final byte[] buffer = new byte[content.length]; final InputStream in = new B2ReadFeature(session).read(test, new TransferStatus(), new DisabledConnectionCallback()); IOUtils.readFully(in, buffer); in.close(); assertArrayEquals(content, buffer); new B2DeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); session.close(); }
From source file:com.netflix.curator.framework.recipes.leader.TestLeaderLatch.java
@Test public void testWaiting() throws Exception { final int PARTICIPANT_QTY = 10; ExecutorService executorService = Executors.newFixedThreadPool(PARTICIPANT_QTY); ExecutorCompletionService<Void> service = new ExecutorCompletionService<Void>(executorService); final Timing timing = new Timing(); final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); try {// w w w . j a v a 2s. c om client.start(); final AtomicBoolean thereIsALeader = new AtomicBoolean(false); for (int i = 0; i < PARTICIPANT_QTY; ++i) { service.submit(new Callable<Void>() { @Override public Void call() throws Exception { LeaderLatch latch = new LeaderLatch(client, PATH_NAME); try { latch.start(); Assert.assertTrue(latch.await(timing.forWaiting().seconds(), TimeUnit.SECONDS)); Assert.assertTrue(thereIsALeader.compareAndSet(false, true)); Thread.sleep((int) (10 * Math.random())); } finally { thereIsALeader.set(false); latch.close(); } return null; } }); } for (int i = 0; i < PARTICIPANT_QTY; ++i) { service.take().get(); } } finally { executorService.shutdown(); IOUtils.closeQuietly(client); } }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.WorkspaceVersionTable.java
public ServerItemLocalVersionUpdate[] getUpdatesForReconcile(final LocalPendingChange[] pendingChanges, final boolean reconcileMissingOnDisk, final AtomicBoolean outClearLocalVersionTable) { // Start out by presuming we are going to clear the local version table outClearLocalVersionTable.set(true); final Set<ServerItemLocalVersionUpdate> updates = new HashSet<ServerItemLocalVersionUpdate>(); // Add an update for every row in the table which is marked // PendingReconcile. final Iterable<WorkspaceLocalItemPair> pairs = server.EnumSubTreeReferencedObjects(ServerPath.ROOT, EnumSubTreeOptions.ENUMERATE_SUB_TREE_ROOT, Integer.MAX_VALUE); for (final WorkspaceLocalItemPair pair : pairs) { if (pair.getCommitted() != null) { if (outClearLocalVersionTable.get() && !pair.getCommitted().isPendingReconcile()) { // There's a row in our local table which has been // reconciled. So this is not a recovery situation where the // local version table has been lost locally. Don't clear // the local version table as a part of this reconcile. outClearLocalVersionTable.set(false); }//from w w w .j a va 2 s .c o m final ServerItemLocalVersionUpdate update = pair.getCommitted() .getLocalVersionUpdate(reconcileMissingOnDisk); if (null != update) { updates.add(update); } } if (pair.getUncommitted() != null) { if (outClearLocalVersionTable.get() && !pair.getUncommitted().isPendingReconcile()) { // There's a row in our local table which has been // reconciled. So this is not a recovery situation where the // local version table has been lost locally. Don't clear // the local version table as a part of this reconcile. outClearLocalVersionTable.set(false); } final ServerItemLocalVersionUpdate update = pair.getUncommitted() .getLocalVersionUpdate(reconcileMissingOnDisk); if (null != update) { updates.add(update); } } } // Next, add an update for every item which is in the removed items // list. for (final WorkspaceLocalItem lvEntry : removedItems) { // Make sure that the item has not been resurrected in the local // version table. if (null == getByServerItem(lvEntry.getServerItem(), lvEntry.isCommitted())) { updates.add(lvEntry.getLocalVersionUpdate()); } } // For safety, always enqueue local version updates for pending changes. for (final LocalPendingChange pc : pendingChanges) { final WorkspaceLocalItem lvEntry = getByServerItem( pc.isCommitted() ? pc.getCommittedServerItem() : pc.getTargetServerItem(), pc.isCommitted()); if (lvEntry != null) { if (!lvEntry.isPendingReconcile()) { lvEntry.setPendingReconcile(true); pendingReconcileCount++; } updates.add(lvEntry.getLocalVersionUpdate(reconcileMissingOnDisk)); } } return updates.toArray(new ServerItemLocalVersionUpdate[updates.size()]); }
From source file:netbeanstypescript.TSDeclarationFinder.java
@Override public OffsetRange getReferenceSpan(final Document doc, final int caretOffset) { final OffsetRange[] tokenRange = new OffsetRange[1]; doc.render(new Runnable() { @Override/*w ww . ja v a 2 s . c o m*/ public void run() { TokenSequence<?> ts = TokenHierarchy.get(doc).tokenSequence(JsTokenId.javascriptLanguage()); int offsetWithinToken = ts.move(caretOffset); if (ts.moveNext()) { Token<?> tok = ts.token(); if (possibleRefs.contains(tok.id().primaryCategory())) { int start = caretOffset - offsetWithinToken; tokenRange[0] = new OffsetRange(start, start + tok.length()); return; } } // If we're right between two tokens, check the previous if (offsetWithinToken == 0 && ts.movePrevious()) { Token<?> tok = ts.token(); if (possibleRefs.contains(tok.id().primaryCategory())) { tokenRange[0] = new OffsetRange(caretOffset - tok.length(), caretOffset); } } } }); if (tokenRange[0] == null) { return OffsetRange.NONE; } // Now query the language service to see if this is actually a reference final AtomicBoolean isReference = new AtomicBoolean(); class ReferenceSpanTask extends UserTask implements Runnable { @Override public void run() { try { ParserManager.parse(Collections.singleton(Source.create(doc)), this); } catch (ParseException e) { TSService.log.log(Level.WARNING, null, e); } } @Override public void run(ResultIterator ri) throws ParseException { // Calling ResultIterator#getParserResult() ensures latest snapshot pushed to server Object defs = TSService.call("getDefsAtPosition", ri.getParserResult().getSnapshot().getSource().getFileObject(), caretOffset); isReference.set(defs != null); } } // Don't block the UI thread for too long in case server is busy RequestProcessor.Task task = RP.post(new ReferenceSpanTask()); try { task.waitFinished(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { task.cancel(); } return isReference.get() ? tokenRange[0] : OffsetRange.NONE; }
From source file:li.strolch.runtime.main.MainStarter.java
public int keepAlive() { final AtomicBoolean atomicBoolean = new AtomicBoolean(); Runtime.getRuntime().addShutdownHook(new Thread() { @Override/* w w w.j a v a2 s . c om*/ public void run() { synchronized (MainStarter.class) { System.out.println("VM Shutting down. Stopping Strolch..."); System.out.println(""); System.out.println("Strolch application " + MainStarter.this.getAgent().getApplicationName() + " shutting down..."); MainStarter.this.getAgent().stop(); MainStarter.this.getAgent().destroy(); System.out.println("Strolch application " + MainStarter.this.getAgent().getApplicationName() + " shut down. Exiting VM."); atomicBoolean.set(true); MainStarter.class.notify(); } } }); logger.info(""); logger.info("Strolch application " + getAgent().getApplicationName() + " started "); while (!atomicBoolean.get()) { synchronized (MainStarter.class) { try { MainStarter.class.wait(); } catch (InterruptedException e) { logger.error("Interrupted."); return 1; } } } return 0; }
From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureQueue.java
@Test public void testConcurrentCreateDelete() throws Exception { final MasterProcedureQueue procQueue = queue; final TableName table = TableName.valueOf("testtb"); final AtomicBoolean running = new AtomicBoolean(true); final AtomicBoolean failure = new AtomicBoolean(false); Thread createThread = new Thread() { @Override/*from w w w . j a v a 2s . c om*/ public void run() { try { while (running.get() && !failure.get()) { if (procQueue.tryAcquireTableExclusiveLock(table, "create")) { procQueue.releaseTableExclusiveLock(table); } } } catch (Throwable e) { LOG.error("create failed", e); failure.set(true); } } }; Thread deleteThread = new Thread() { @Override public void run() { try { while (running.get() && !failure.get()) { if (procQueue.tryAcquireTableExclusiveLock(table, "delete")) { procQueue.releaseTableExclusiveLock(table); } procQueue.markTableAsDeleted(table); } } catch (Throwable e) { LOG.error("delete failed", e); failure.set(true); } } }; createThread.start(); deleteThread.start(); for (int i = 0; i < 100 && running.get() && !failure.get(); ++i) { Thread.sleep(100); } running.set(false); createThread.join(); deleteThread.join(); assertEquals(false, failure.get()); }
From source file:com.streamsets.pipeline.stage.bigquery.destination.BigQueryTarget.java
@Override public void write(Batch batch) throws StageException { Map<TableId, List<Record>> tableIdToRecords = new LinkedHashMap<>(); Map<Long, Record> requestIndexToRecords = new LinkedHashMap<>(); if (batch.getRecords().hasNext()) { ELVars elVars = getContext().createELVars(); batch.getRecords().forEachRemaining(record -> { RecordEL.setRecordInContext(elVars, record); try { String datasetName = dataSetEval.eval(elVars, conf.datasetEL, String.class); String tableName = tableNameELEval.eval(elVars, conf.tableNameEL, String.class); TableId tableId = TableId.of(datasetName, tableName); if (tableIdExistsCache.get(tableId)) { List<Record> tableIdRecords = tableIdToRecords.computeIfAbsent(tableId, t -> new ArrayList<>()); tableIdRecords.add(record); } else { getContext().toError(record, Errors.BIGQUERY_17, datasetName, tableName, conf.credentials.projectId); }//from www . ja v a 2 s . co m } catch (ELEvalException e) { LOG.error("Error evaluating DataSet/TableName EL", e); getContext().toError(record, Errors.BIGQUERY_10, e); } catch (ExecutionException e) { LOG.error("Error when checking exists for tableId, Reason : {}", e); Throwable rootCause = Throwables.getRootCause(e); getContext().toError(record, Errors.BIGQUERY_13, rootCause); } }); tableIdToRecords.forEach((tableId, records) -> { final AtomicLong index = new AtomicLong(0); final AtomicBoolean areThereRecordsToWrite = new AtomicBoolean(false); InsertAllRequest.Builder insertAllRequestBuilder = InsertAllRequest.newBuilder(tableId); records.forEach(record -> { try { String insertId = getInsertIdForRecord(elVars, record); Map<String, ?> rowContent = convertToRowObjectFromRecord(record); if (rowContent.isEmpty()) { throw new OnRecordErrorException(record, Errors.BIGQUERY_14); } insertAllRequestBuilder.addRow(insertId, rowContent); areThereRecordsToWrite.set(true); requestIndexToRecords.put(index.getAndIncrement(), record); } catch (OnRecordErrorException e) { LOG.error("Error when converting record {} to row, Reason : {} ", record.getHeader().getSourceId(), e.getMessage()); getContext().toError(record, e.getErrorCode(), e.getParams()); } }); if (areThereRecordsToWrite.get()) { insertAllRequestBuilder.setIgnoreUnknownValues(conf.ignoreInvalidColumn); insertAllRequestBuilder.setSkipInvalidRows(false); InsertAllRequest request = insertAllRequestBuilder.build(); if (!request.getRows().isEmpty()) { try { InsertAllResponse response = bigQuery.insertAll(request); if (response.hasErrors()) { response.getInsertErrors().forEach((requestIdx, errors) -> { Record record = requestIndexToRecords.get(requestIdx); String messages = COMMA_JOINER.join(errors.stream() .map(BigQueryError::getMessage).collect(Collectors.toList())); String reasons = COMMA_JOINER.join(errors.stream().map(BigQueryError::getReason) .collect(Collectors.toList())); LOG.error("Error when inserting record {}, Reasons : {}, Messages : {}", record.getHeader().getSourceId(), reasons, messages); getContext().toError(record, Errors.BIGQUERY_11, reasons, messages); }); } } catch (BigQueryException e) { LOG.error(Errors.BIGQUERY_13.getMessage(), e); //Put all records to error. for (long i = 0; i < request.getRows().size(); i++) { Record record = requestIndexToRecords.get(i); getContext().toError(record, Errors.BIGQUERY_13, e); } } } } }); } }
From source file:org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.java
public void testDSShell(boolean haveDomain) throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; if (haveDomain) { String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" }; List<String> argsList = new ArrayList<String>(Arrays.asList(args)); argsList.addAll(Arrays.asList(domainArgs)); args = argsList.toArray(new String[argsList.size()]); }/*from ww w . j a va2 s.co m*/ LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); Thread t = new Thread() { public void run() { try { result.set(client.run()); } catch (Exception e) { throw new RuntimeException(e); } } }; t.start(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); String hostName = NetUtils.getHostname(); boolean verified = false; String errorMessage = ""; while (!verified) { List<ApplicationReport> apps = yarnClient.getApplications(); if (apps.size() == 0) { Thread.sleep(10); continue; } ApplicationReport appReport = apps.get(0); if (appReport.getHost().equals("N/A")) { Thread.sleep(10); continue; } errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'."; if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) { verified = true; } if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) { break; } } Assert.assertTrue(errorMessage, verified); t.join(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(result.get()); if (timelineVersionWatcher.getTimelineVersion() == 1.5f) { long scanInterval = conf.getLong( YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS_DEFAULT); Path doneDir = new Path(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT); // Wait till the data is moved to done dir, or timeout and fail while (true) { RemoteIterator<FileStatus> iterApps = fs.listStatusIterator(doneDir); if (iterApps.hasNext()) { break; } Thread.sleep(scanInterval * 2); } } TimelineDomain domain = null; if (haveDomain) { domain = yarnCluster.getApplicationHistoryServer().getTimelineStore().getDomain("TEST_DOMAIN"); Assert.assertNotNull(domain); Assert.assertEquals("reader_user reader_group", domain.getReaders()); Assert.assertEquals("writer_user writer_group", domain.getWriters()); } TimelineEntities entitiesAttempts = yarnCluster.getApplicationHistoryServer().getTimelineStore() .getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(), null, null, null, null, null, null, null, null, null); Assert.assertNotNull(entitiesAttempts); Assert.assertEquals(1, entitiesAttempts.getEntities().size()); Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents().size()); Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entitiesAttempts.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entitiesAttempts.getEntities().get(0).getDomainId()); } String currAttemptEntityId = entitiesAttempts.getEntities().get(0).getEntityId(); ApplicationAttemptId attemptId = ApplicationAttemptId.fromString(currAttemptEntityId); NameValuePair primaryFilter = new NameValuePair(ApplicationMaster.APPID_TIMELINE_FILTER_NAME, attemptId.getApplicationId().toString()); TimelineEntities entities = yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities( ApplicationMaster.DSEntity.DS_CONTAINER.toString(), null, null, null, null, null, primaryFilter, null, null, null); Assert.assertNotNull(entities); Assert.assertEquals(2, entities.getEntities().size()); Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_CONTAINER.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entities.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entities.getEntities().get(0).getDomainId()); } }