List of usage examples for java.util.concurrent.atomic AtomicBoolean get
public final boolean get()
From source file:org.apache.blur.command.ShardCommandManagerTest.java
@Test public void testShardCommandManagerNormalWithCancel() throws IOException, TimeoutException, ExceptionCollector, BlurException, InterruptedException { String commandExecutionId = "TEST_COMMAND_ID1"; BlurObjectSerDe serDe = new BlurObjectSerDe(); WaitForSeconds waitForSeconds = new WaitForSeconds(); waitForSeconds.setTable("test"); waitForSeconds.setSeconds(5);/*from w w w.jav a 2 s. c o m*/ waitForSeconds.setCommandExecutionId(commandExecutionId); Arguments arguments = CommandUtil.toArguments(waitForSeconds, serDe); BlurObject args = CommandUtil.toBlurObject(arguments); System.out.println(args.toString(1)); final ArgumentOverlay argumentOverlay = new ArgumentOverlay(args, serDe); final AtomicBoolean fail = new AtomicBoolean(); final AtomicBoolean running = new AtomicBoolean(true); new Thread(new Runnable() { @Override public void run() { TableContextFactory tableContextFactory = getTableContextFactory(); Long instanceExecutionId = null; while (true) { try { Response response; if (instanceExecutionId == null) { response = _manager.execute(tableContextFactory, "wait", argumentOverlay); } else { response = _manager.reconnect(instanceExecutionId); } fail.set(true); System.out.println(response); return; } catch (IOException e) { if (e.getCause() instanceof CancellationException) { return; } e.printStackTrace(); fail.set(true); return; } catch (TimeoutException e) { instanceExecutionId = e.getInstanceExecutionId(); } catch (Exception e) { e.printStackTrace(); fail.set(true); return; } finally { running.set(false); } } } }).start(); Thread.sleep(1000); _manager.cancelCommand(commandExecutionId); Thread.sleep(5000); if (fail.get() || running.get()) { fail("Fail [" + fail.get() + "] Running [" + running.get() + "]"); } }
From source file:android.support.v7.widget.BaseRecyclerViewInstrumentationTest.java
void smoothScrollToPosition(final int position, boolean assertArrival) throws Throwable { if (mDebug) { Log.d(TAG, "SMOOTH scrolling to " + position); }//from w w w . j a va 2 s. c om final CountDownLatch viewAdded = new CountDownLatch(1); final RecyclerView.OnChildAttachStateChangeListener listener = new RecyclerView.OnChildAttachStateChangeListener() { @Override public void onChildViewAttachedToWindow(View view) { if (position == mRecyclerView.getChildAdapterPosition(view)) { viewAdded.countDown(); } } @Override public void onChildViewDetachedFromWindow(View view) { } }; final AtomicBoolean addedListener = new AtomicBoolean(false); runTestOnUiThread(new Runnable() { @Override public void run() { RecyclerView.ViewHolder viewHolderForAdapterPosition = mRecyclerView .findViewHolderForAdapterPosition(position); if (viewHolderForAdapterPosition != null) { viewAdded.countDown(); } else { mRecyclerView.addOnChildAttachStateChangeListener(listener); addedListener.set(true); } } }); runTestOnUiThread(new Runnable() { @Override public void run() { mRecyclerView.smoothScrollToPosition(position); } }); getInstrumentation().waitForIdleSync(); assertThat("should be able to scroll in 10 seconds", !assertArrival || viewAdded.await(10, TimeUnit.SECONDS), CoreMatchers.is(true)); waitForIdleScroll(mRecyclerView); if (mDebug) { Log.d(TAG, "SMOOTH scrolling done"); } if (addedListener.get()) { runTestOnUiThread(new Runnable() { @Override public void run() { mRecyclerView.removeOnChildAttachStateChangeListener(listener); } }); } getInstrumentation().waitForIdleSync(); }
From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java
@Test public void should_dsl_update_value_if_equal() throws Exception { //Given/*from w w w.j a v a 2s .c o m*/ final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); final Date date = buildDateKey(); final AtomicBoolean success = new AtomicBoolean(false); scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql", ImmutableMap.of("id", id, "table", "simple")); final CassandraLogAsserter logAsserter = new CassandraLogAsserter(); logAsserter.prepareLogLevelForDriverConnection(); //When manager.dsl().update().fromBaseTable().value_Set("new value").where().id_Eq(id).date_Eq(date) .ifValue_Eq("0 AM").withLwtResultListener(new LWTResultListener() { @Override public void onSuccess() { success.getAndSet(true); } @Override public void onError(LWTResult lwtResult) { } }).withResultSetAsyncListener(rs -> { assertThat(rs.wasApplied()).isTrue(); return rs; }).withSerialConsistencyLevel(SERIAL).execute(); //Then final Row row = session.execute("SELECT value FROM simple WHERE id = " + id).one(); assertThat(row).isNotNull(); assertThat(row.getString("value")).isEqualTo("new value"); assertThat(success.get()).isTrue(); logAsserter.assertSerialConsistencyLevels(SERIAL); }
From source file:de.acosix.alfresco.mtsupport.repo.sync.TenantAwareChainingUserRegistrySynchronizer.java
protected void synchronize(final boolean forceUpdate, final boolean isFullSync, final boolean splitTxns) { final String currentDomain = TenantUtil.getCurrentDomain(); LOGGER.debug("Running {} sync with deletions {}allowed in tenant {}", forceUpdate ? "full" : "differential", this.allowDeletions ? "" : "not ", TenantService.DEFAULT_DOMAIN.equals(currentDomain) ? "-default-" : currentDomain); final QName lockQName = this.getLockQNameForCurrentTenant(); String lockToken;//from ww w . j a va2 s . co m try { // splitTxns = true: likely startup-triggered so give up immediately if lock not available // (startup of a cluster node already performs synchronisation) // splitTxns = false: likely login-triggered so wait for the lock and retry lockToken = this.jobLockService.getLock(lockQName, LOCK_TTL, splitTxns ? 0 : LOCK_TTL, splitTxns ? 1 : 10); } catch (final LockAcquisitionException laex) { LOGGER.warn( "User registry synchronization already running in another thread / on another cluster node. Synchronize aborted"); lockToken = null; } if (lockToken != null) { final AtomicBoolean synchRunning = new AtomicBoolean(true); final AtomicBoolean lockReleased = new AtomicBoolean(false); try { // original class used complex setup with asynch refresher thread // this was legacy never adapted when JobLockRefreshCallback was introduced in 3.4 this.jobLockService.refreshLock(lockToken, lockQName, LOCK_TTL, new JobLockRefreshCallback() { /** * {@inheritDoc} */ @Override public void lockReleased() { lockReleased.set(true); } @Override public boolean isActive() { return synchRunning.get(); } }); final Map<String, UserRegistry> plugins = this.getPluginsToSync(); final Set<String> visitedIds = new TreeSet<>(); this.notifySyncStart(plugins.keySet()); for (final Entry<String, UserRegistry> pluginEntry : plugins.entrySet()) { final String id = pluginEntry.getKey(); final UserRegistry plugin = pluginEntry.getValue(); if (LOGGER.isDebugEnabled() && this.mbeanServer != null) { this.logPluginConfig(id); } LOGGER.info("Synchronizing users and groups with user registry {} in tenant {}", id, TenantService.DEFAULT_DOMAIN.equals(currentDomain) ? "-default-" : currentDomain); if (isFullSync) { LOGGER.info( "Full synchronisation with user registry {} in tenant {} - deletions enabled: {} (if true, some users and groups previously created by synchronization with this user registry may be removed, otherwise users / groups removed from this registry will be logged only and remain in the repository while users previously found in a different registry will be moved in the repository rather than recreated)", id, TenantService.DEFAULT_DOMAIN.equals(currentDomain) ? "-default-" : currentDomain, this.allowDeletions); } final boolean requiresNew = splitTxns || AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY; this.syncWithPlugin(id, plugin, forceUpdate, isFullSync, requiresNew, visitedIds, plugins.keySet()); this.applicationEventPublisher.publishEvent(new SynchronizeDirectoryEndEvent(this, id)); } this.notifySyncEnd(); } catch (final RuntimeException re) { this.notifySyncEnd(re); LOGGER.error("Synchronization aborted due to error", re); throw re; } finally { synchRunning.set(false); this.jobLockService.releaseLock(lockToken, lockQName); } } }
From source file:com.spotify.docker.client.DefaultDockerClientTest.java
@Test public void testBuildWithPull() throws Exception { assumeTrue("We need Docker API >= v1.19 to run this test." + "This Docker API is " + sut.version().apiVersion(), compareVersion(sut.version().apiVersion(), "1.19") >= 0); final String dockerDirectory = Resources.getResource("dockerDirectory").getPath(); final String pullMsg = "Pulling from"; // Build once to make sure we have cached images. sut.build(Paths.get(dockerDirectory)); // Build again with PULL set, and verify we pulled the base image final AtomicBoolean pulled = new AtomicBoolean(false); sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() { @Override/* w w w . j a v a 2s . co m*/ public void progress(ProgressMessage message) throws DockerException { if (!isNullOrEmpty(message.status()) && message.status().contains(pullMsg)) { pulled.set(true); } } }, PULL_NEWER_IMAGE); assertTrue(pulled.get()); }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java
@Override public GetOperation[] pendChanges(final String workspaceName, final String ownerName, final ChangeRequest[] changes, final PendChangesOptions pendChangesOptions, final SupportedFeatures supportedFeatures, final AtomicReference<Failure[]> failures, final String[] itemPropertyFilters, final String[] itemAttributeFilters, final boolean updateDisk, final AtomicBoolean onlineOperation, final AtomicReference<ChangePendedFlags> changePendedFlags) { onlineOperation.set(false);/*from ww w . jav a2 s . co m*/ // set this to none for local workspaces, if the call reaches the server // the flag will get overwritten changePendedFlags.set(ChangePendedFlags.NONE); int unlockCount = 0; final Workspace localWorkspace = getLocalWorkspace(workspaceName, ownerName); if (localWorkspace != null) { boolean attributeChange = false; boolean nonExecuteSymlinkBitPropertyChange = false; if (null != itemAttributeFilters && itemAttributeFilters.length > 0) { attributeChange = true; } // If the property filters are only for the executable bit, we can // handle that locally, otherwise we must go to the server. if (null != itemPropertyFilters && itemPropertyFilters.length > 0) { for (final String filter : itemPropertyFilters) { /* * Not using wildcard matching here: just because a wildcard * _does_ match the executable key _doesn't_ mean it * wouldn't match others on the server. So only consider a * direct match against the executable key to keep * processing locally. */ if (PropertyValue.comparePropertyNames(PropertyConstants.EXECUTABLE_KEY, filter) != 0 && PropertyValue.comparePropertyNames(PropertyConstants.SYMBOLIC_KEY, filter) != 0) { nonExecuteSymlinkBitPropertyChange = true; break; } } } RequestType requestType = RequestType.NONE; boolean requestingLock = false; for (final ChangeRequest changeRequest : changes) { if (RequestType.NONE == requestType) { requestType = changeRequest.getRequestType(); } else if (requestType != changeRequest.getRequestType()) { // TODO: Move string from server assembly throw new VersionControlException("Not all changes had the same request type"); //$NON-NLS-1$ } // If the caller is requesting a lock, then the call is a server // call, unless the user is performing an add and the LockLevel // is None. // Is it possible to have different locklevels on different // ChangeRequest objects? if (changeRequest.getLockLevel() != LockLevel.UNCHANGED && !(changeRequest.getLockLevel() == LockLevel.NONE && changeRequest.getRequestType() == RequestType.ADD)) { requestingLock = true; } if (changeRequest.getLockLevel() == LockLevel.NONE && changeRequest.getRequestType().equals(RequestType.LOCK)) { unlockCount++; } } final boolean silent = pendChangesOptions.contains(PendChangesOptions.SILENT); if (!requestingLock && !attributeChange && !nonExecuteSymlinkBitPropertyChange) { if (requestType == RequestType.ADD || requestType == RequestType.EDIT || requestType == RequestType.DELETE || requestType == RequestType.RENAME || requestType == RequestType.PROPERTY) { final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(localWorkspace); try { final AtomicReference<Failure[]> delegateFailures = new AtomicReference<Failure[]>(); final AtomicReference<GetOperation[]> toReturn = new AtomicReference<GetOperation[]>(); final RequestType transactionRequestType = requestType; transaction.execute(new AllTablesTransaction() { @Override public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv, final LocalPendingChangesTable pc) { if (transactionRequestType == RequestType.ADD) { toReturn.set(LocalDataAccessLayer.pendAdd(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.EDIT) { toReturn.set(LocalDataAccessLayer.pendEdit(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.DELETE) { toReturn.set(LocalDataAccessLayer.pendDelete(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.RENAME) { final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false); toReturn.set(LocalDataAccessLayer.pendRename(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, onlineOperationRequired, itemPropertyFilters)); if (onlineOperationRequired.get()) { toReturn.set(null); transaction.abort(); } else if (updateDisk) { // we don't want to file a conflict // while offline, so we check up front. for (final GetOperation getOp : toReturn.get()) { if (getOp.getTargetLocalItem() != null && !LocalPath.equals(getOp.getSourceLocalItem(), getOp.getTargetLocalItem()) && new File(getOp.getTargetLocalItem()).exists()) { throw new VersionControlException(MessageFormat.format( //@formatter:off Messages.getString( "WebServiceLayerLocalWorkspaces.FileExistsFormat"), //$NON-NLS-1$ //@formatter:on getOp.getTargetLocalItem())); } } } } if (transactionRequestType == RequestType.PROPERTY) { final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false); toReturn.set(LocalDataAccessLayer.pendPropertyChange(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, onlineOperationRequired, itemPropertyFilters)); if (onlineOperationRequired.get()) { toReturn.set(null); transaction.abort(); } } } }); if (toReturn.get() != null) { // Offline operation successfully completed. failures.set(delegateFailures.get()); return toReturn.get(); } } finally { try { transaction.close(); } catch (final IOException e) { throw new VersionControlException(e); } } } else if (requestType == RequestType.BRANCH || requestType == RequestType.UNDELETE || requestType == RequestType.LOCK) { // Forward to server } else { // TODO: Remove this when all RequestTypes are supported // here. throw new VersionControlException("Not currently implemented for local workspaces"); //$NON-NLS-1$ } } } if (null != localWorkspace) { // if we only have requests for unlocking, move on if the reconcile // fails this is needed for unlock other final Workspace w = reconcileIfLocal(workspaceName, ownerName, false, false, unlockCount == changes.length, null); // Lock the workspace which will receive the pending changes final WorkspaceLock lock = lockIfLocal(w); try { final GetOperation[] toReturn; try { if (getServiceLevel().getValue() >= WebServiceLevel.TFS_2012_QU1.getValue()) { final _Repository5Soap_PendChangesInLocalWorkspaceResponse response = getRepository5() .pendChangesInLocalWorkspace(workspaceName, ownerName, (_ChangeRequest[]) WrapperUtils.unwrap(_ChangeRequest.class, changes), pendChangesOptions.toIntFlags(), supportedFeatures.toIntFlags(), itemPropertyFilters, itemAttributeFilters, VersionControlConstants.MAX_SERVER_PATH_SIZE); toReturn = (GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getPendChangesInLocalWorkspaceResult()); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); } else { final _Repository4Soap_PendChangesInLocalWorkspaceResponse response = getRepository4() .pendChangesInLocalWorkspace(workspaceName, ownerName, (_ChangeRequest[]) WrapperUtils.unwrap(_ChangeRequest.class, changes), pendChangesOptions.toIntFlags(), supportedFeatures.toIntFlags(), itemPropertyFilters, itemAttributeFilters); toReturn = (GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getPendChangesInLocalWorkspaceResult()); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); } } catch (final ProxyException e) { throw VersionControlExceptionMapper.map(e); } syncWorkingFoldersIfNecessary(w, changePendedFlags.get()); syncPendingChangesIfLocal(w, toReturn, itemPropertyFilters); if (RequestType.ADD == changes[0].getRequestType()) { // The client does not process the getops returned from a // PendAdd call. Because the server has created local // version rows for us, we need to update the local version // table to contain these rows too. LocalDataAccessLayer.afterAdd(localWorkspace, toReturn); // When a pending add is created, the item on disk is not // touched; so we need to inform the scanner that the item // is invalidated so it is re-scanned. Rather than go // through the local paths on which adds were pended, we'll // invalidate the workspace. This is not a common code path. localWorkspace.getWorkspaceWatcher().markPathChanged(""); //$NON-NLS-1$ } onlineOperation.set(true); return toReturn; } finally { if (lock != null) { lock.close(); } } } else { return super.pendChanges(workspaceName, ownerName, changes, pendChangesOptions, supportedFeatures, failures, itemPropertyFilters, itemAttributeFilters, updateDisk, onlineOperation, changePendedFlags); } }
From source file:org.apache.hadoop.yarn.applications.ivic.TestDistributedShell.java
public void testDSShell(boolean haveDomain) throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; if (haveDomain) { String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" }; List<String> argsList = new ArrayList<String>(Arrays.asList(args)); argsList.addAll(Arrays.asList(domainArgs)); args = argsList.toArray(new String[argsList.size()]); }/*from w w w. jav a 2s . c om*/ LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); Thread t = new Thread() { public void run() { try { result.set(client.run()); } catch (Exception e) { throw new RuntimeException(e); } } }; t.start(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); String hostName = NetUtils.getHostname(); boolean verified = false; String errorMessage = ""; while (!verified) { List<ApplicationReport> apps = yarnClient.getApplications(); if (apps.size() == 0) { Thread.sleep(10); continue; } ApplicationReport appReport = apps.get(0); if (appReport.getHost().equals("N/A")) { Thread.sleep(10); continue; } errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'."; if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) { verified = true; } if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) { break; } } Assert.assertTrue(errorMessage, verified); t.join(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(result.get()); TimelineDomain domain = null; if (haveDomain) { domain = yarnCluster.getApplicationHistoryServer().getTimelineStore().getDomain("TEST_DOMAIN"); Assert.assertNotNull(domain); Assert.assertEquals("reader_user reader_group", domain.getReaders()); Assert.assertEquals("writer_user writer_group", domain.getWriters()); } TimelineEntities entitiesAttempts = yarnCluster.getApplicationHistoryServer().getTimelineStore() .getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(), null, null, null, null, null, null, null, null); Assert.assertNotNull(entitiesAttempts); Assert.assertEquals(1, entitiesAttempts.getEntities().size()); Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents().size()); Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entitiesAttempts.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entitiesAttempts.getEntities().get(0).getDomainId()); } TimelineEntities entities = yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities( ApplicationMaster.DSEntity.DS_CONTAINER.toString(), null, null, null, null, null, null, null, null); Assert.assertNotNull(entities); Assert.assertEquals(2, entities.getEntities().size()); Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_CONTAINER.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entities.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entities.getEntities().get(0).getDomainId()); } }
From source file:org.apache.hadoop.yarn.applications.amonly.TestDistributedShell.java
public void testDSShell(boolean haveDomain) throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; if (haveDomain) { String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" }; List<String> argsList = new ArrayList<String>(Arrays.asList(args)); argsList.addAll(Arrays.asList(domainArgs)); args = argsList.toArray(new String[argsList.size()]); }/*from w w w . ja v a2 s.com*/ LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); Thread t = new Thread() { public void run() { try { result.set(client.run()); } catch (Exception e) { throw new RuntimeException(e); } } }; t.start(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); String hostName = NetUtils.getHostname(); boolean verified = false; String errorMessage = ""; while (!verified) { List<ApplicationReport> apps = yarnClient.getApplications(); if (apps.size() == 0) { Thread.sleep(10); continue; } ApplicationReport appReport = apps.get(0); if (appReport.getHost().equals("N/A")) { Thread.sleep(10); continue; } errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'."; if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) { verified = true; } if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) { break; } } Assert.assertTrue(errorMessage, verified); t.join(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(result.get()); TimelineDomain domain = null; if (haveDomain) { domain = yarnCluster.getApplicationHistoryServer().getTimelineStore().getDomain("TEST_DOMAIN"); Assert.assertNotNull(domain); Assert.assertEquals("reader_user reader_group", domain.getReaders()); Assert.assertEquals("writer_user writer_group", domain.getWriters()); } TimelineEntities entitiesAttempts = yarnCluster.getApplicationHistoryServer().getTimelineStore() .getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(), null, null, null, null, null, null, null, null, null); Assert.assertNotNull(entitiesAttempts); Assert.assertEquals(1, entitiesAttempts.getEntities().size()); Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents().size()); Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entitiesAttempts.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entitiesAttempts.getEntities().get(0).getDomainId()); } TimelineEntities entities = yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities( ApplicationMaster.DSEntity.DS_CONTAINER.toString(), null, null, null, null, null, null, null, null, null); Assert.assertNotNull(entities); Assert.assertEquals(2, entities.getEntities().size()); Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_CONTAINER.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entities.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entities.getEntities().get(0).getDomainId()); } }
From source file:ch.entwine.weblounge.preview.phantomjs.PhantomJsPagePreviewGenerator.java
/** * {@inheritDoc}// www .j a v a 2s . c o m * * @see ch.entwine.weblounge.common.content.PreviewGenerator#createPreview(ch.entwine.weblounge.common.content.Resource, * ch.entwine.weblounge.common.site.Environment, * ch.entwine.weblounge.common.language.Language, * ch.entwine.weblounge.common.content.image.ImageStyle, String, * java.io.InputStream, java.io.OutputStream) */ public void createPreview(Resource<?> resource, Environment environment, Language language, ImageStyle style, String format, InputStream is, OutputStream os) throws IOException { // We don't need the input stream IOUtils.closeQuietly(is); // Find a suitable image preview generator for scaling ImagePreviewGenerator imagePreviewGenerator = null; synchronized (previewGenerators) { for (ImagePreviewGenerator generator : previewGenerators) { if (generator.supports(format)) { imagePreviewGenerator = generator; break; } } if (imagePreviewGenerator == null) { logger.debug("Unable to generate page previews since no image renderer is available"); return; } } // Find the relevant metadata to start the request ResourceURI uri = resource.getURI(); long version = resource.getVersion(); Site site = uri.getSite(); // Create the url URL pageURL = new URL(UrlUtils.concat(site.getHostname(environment).toExternalForm(), PAGE_HANDLER_PREFIX, uri.getIdentifier())); if (version == Resource.WORK) { pageURL = new URL( UrlUtils.concat(pageURL.toExternalForm(), "work_" + language.getIdentifier() + ".html")); } else { pageURL = new URL( UrlUtils.concat(pageURL.toExternalForm(), "index_" + language.getIdentifier() + ".html")); } // Create a temporary file final File rendererdFile = File.createTempFile("phantomjs-", "." + format, phantomTmpDir); final URL finalPageURL = pageURL; final AtomicBoolean success = new AtomicBoolean(); // Call PhantomJS to render the page try { final PhantomJsProcessExecutor phantomjs = new PhantomJsProcessExecutor(scriptFile.getAbsolutePath(), pageURL.toExternalForm(), rendererdFile.getAbsolutePath()) { @Override protected void onProcessFinished(int exitCode) throws IOException { super.onProcessFinished(exitCode); switch (exitCode) { case 0: if (rendererdFile.length() > 0) { success.set(true); logger.debug("Page preview of {} created at {}", finalPageURL, rendererdFile.getAbsolutePath()); } else { logger.warn("Error creating page preview of {}", finalPageURL); success.set(false); FileUtils.deleteQuietly(rendererdFile); } break; default: success.set(false); logger.warn("Error creating page preview of {}", finalPageURL); FileUtils.deleteQuietly(rendererdFile); } } }; // Finally have PhantomJS create the preview logger.debug("Creating preview of {}", finalPageURL); phantomjs.execute(); } catch (ProcessExcecutorException e) { logger.warn("Error creating page preview of {}: {}", pageURL, e.getMessage()); throw new IOException(e); } finally { // If page preview rendering failed, there is no point in scaling the // images if (!success.get()) { logger.debug("Skipping scaling of failed preview rendering {}", pageURL); FileUtils.deleteQuietly(rendererdFile); return; } } FileInputStream imageIs = null; // Scale the image to the correct size try { imageIs = new FileInputStream(rendererdFile); imagePreviewGenerator.createPreview(resource, environment, language, style, PREVIEW_FORMAT, imageIs, os); } catch (IOException e) { logger.error("Error reading original page preview from " + rendererdFile, e); throw e; } catch (Throwable t) { logger.warn("Error scaling page preview at " + uri + ": " + t.getMessage(), t); throw new IOException(t); } finally { IOUtils.closeQuietly(imageIs); FileUtils.deleteQuietly(rendererdFile); } }
From source file:org.apache.tez.dag.app.TestMockDAGAppMaster.java
@Test(timeout = 100000) public void testConcurrencyLimit() throws Exception { // the test relies on local mode behavior of launching a new container per task. // so task concurrency == container concurrency TezConfiguration tezconf = new TezConfiguration(defaultConf); final int concurrencyLimit = 5; MockTezClient tezClient = new MockTezClient("testMockAM", tezconf, true, null, null, null, null, false, false, concurrencyLimit * 4, 1000); tezClient.start();/*from w ww .ja v a 2s . c o m*/ MockDAGAppMaster mockApp = tezClient.getLocalClient().getMockApp(); MockContainerLauncher mockLauncher = mockApp.getContainerLauncher(); mockLauncher.startScheduling(false); final AtomicInteger concurrency = new AtomicInteger(0); final AtomicBoolean exceededConcurrency = new AtomicBoolean(false); mockApp.containerDelegate = new ContainerDelegate() { @Override public void stop(ContainerStopRequest event) { concurrency.decrementAndGet(); } @Override public void launch(ContainerLaunchRequest event) { int maxConc = concurrency.incrementAndGet(); if (maxConc > concurrencyLimit) { exceededConcurrency.set(true); } System.out.println("Launched: " + maxConc); } }; DAG dag = DAG.create("testConcurrencyLimit"); Vertex vA = Vertex.create("A", ProcessorDescriptor.create("Proc.class"), 20) .setConf(TezConfiguration.TEZ_AM_VERTEX_MAX_TASK_CONCURRENCY, String.valueOf(concurrencyLimit)); dag.addVertex(vA); mockLauncher.startScheduling(true); DAGClient dagClient = tezClient.submitDAG(dag); dagClient.waitForCompletion(); Assert.assertEquals(DAGStatus.State.SUCCEEDED, dagClient.getDAGStatus(null).getState()); Assert.assertFalse(exceededConcurrency.get()); tezClient.stop(); }