List of usage examples for java.util.concurrent.atomic AtomicBoolean get
public final boolean get()
From source file:com.screenslicer.core.scrape.Scrape.java
private static String getHelper(final RemoteWebDriver driver, final Node urlNode, final String url, final boolean p_cached, final String runGuid, final HtmlNode[] clicks) { final String urlHash = CommonUtil.isEmpty(url) ? null : Crypto.fastHash(url); final long time = System.currentTimeMillis(); if (urlHash != null) { synchronized (fetchLocalCacheLock) { if (fetchLocalCache.containsKey(urlHash)) { if (time - fetchLocalCache.get(urlHash) < FETCH_LOCAL_CACHE_EXPIRES) { try { return FileUtils.readFileToString(new File("./fetch_local_cache/" + urlHash), "utf-8"); } catch (Throwable t) { Log.exception(t); fetchLocalCache.remove(urlHash); }//from www . j av a2s . c om } else { fetchLocalCache.remove(urlHash); } } } } if (!CommonUtil.isEmpty(url)) { final Object resultLock = new Object(); final String initVal; final String[] result; synchronized (resultLock) { initVal = Random.next(); result = new String[] { initVal }; } final AtomicBoolean started = new AtomicBoolean(); Thread thread = new Thread(new Runnable() { @Override public void run() { started.set(true); boolean cached = p_cached; String newHandle = null; String origHandle = null; try { origHandle = driver.getWindowHandle(); String content = null; if (!cached) { try { Util.get(driver, url, urlNode, false); } catch (Throwable t) { if (urlNode != null) { Util.newWindow(driver); } Util.get(driver, url, false); } if (urlNode != null) { newHandle = driver.getWindowHandle(); } Util.doClicks(driver, clicks, null); content = driver.getPageSource(); if (CommonUtil.isEmpty(content)) { cached = true; } } if (cached) { if (ScreenSlicerBatch.isCancelled(runGuid)) { return; } Util.get(driver, toCacheUrl(url), false); content = driver.getPageSource(); } content = Util.clean(content, driver.getCurrentUrl()).outerHtml(); if (WebApp.DEBUG) { try { FileUtils.writeStringToFile(new File("./" + System.currentTimeMillis()), content); } catch (IOException e) { } } //TODO make iframes work // if (!CommonUtil.isEmpty(content)) { // Document doc = Jsoup.parse(content); // Elements docFrames = doc.getElementsByTag("iframe"); // List<WebElement> iframes = driver.findElementsByTagName("iframe"); // int cur = 0; // for (WebElement iframe : iframes) { // try { // driver.switchTo().frame(iframe); // String frameSrc = driver.getPageSource(); // if (!CommonUtil.isEmpty(frameSrc) && cur < docFrames.size()) { // docFrames.get(cur).html( // Util.outerHtml(Jsoup.parse(frameSrc).body().childNodes())); // } // } catch (Throwable t) { // Log.exception(t); // } // ++cur; // } // driver.switchTo().defaultContent(); // content = doc.outerHtml(); // } synchronized (resultLock) { result[0] = content; } } catch (Throwable t) { Log.exception(t); } finally { synchronized (resultLock) { if (initVal.equals(result[0])) { result[0] = null; } } Util.driverSleepRandLong(); if (newHandle != null && origHandle != null) { try { Util.cleanUpNewWindows(driver, origHandle); } catch (Throwable t) { Log.exception(t); } } } } }); thread.start(); try { while (!started.get()) { try { Thread.sleep(WAIT); } catch (Throwable t) { Log.exception(t); } } thread.join(HANG_TIME); synchronized (resultLock) { if (initVal.equals(result[0])) { try { Log.exception(new Exception("Browser is hanging")); forceQuit(); thread.interrupt(); } catch (Throwable t) { Log.exception(t); } throw new ActionFailed(); } else if (urlHash != null && !CommonUtil.isEmpty(result[0]) && result[0].length() > MIN_FETCH_CACHE_PAGE_LEN) { synchronized (fetchLocalCacheLock) { if (fetchLocalCache.size() > MAX_FETCH_LOCAL_CACHE) { try { FileUtils.deleteQuietly(new File("./fetch_local_cache")); FileUtils.forceMkdir(new File("./fetch_local_cache")); } catch (Throwable t) { Log.exception(t); } fetchLocalCache.clear(); } FileUtils.writeStringToFile(new File("./fetch_local_cache/" + urlHash), result[0], "utf-8", false); fetchLocalCache.put(urlHash, time); } } return result[0]; } } catch (Throwable t) { Log.exception(t); } } return null; }
From source file:com.amazonaws.services.kinesis.clientlibrary.lib.worker.WorkerTest.java
/** * This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of * {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads. * This behavior makes the test a bit racy, since we need to ensure a specific order of events. * // w w w. ja v a2 s . c o m * @throws Exception */ @Test public final void testWorkerForcefulShutdown() throws Exception { final List<Shard> shardList = createShardListWithOneShard(); final boolean callProcessRecordsForEmptyRecordList = true; final long failoverTimeMillis = 50L; final int numberOfRecordsPerShard = 10; final List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>(); for (Shard shard : shardList) { KinesisClientLease lease = ShardSyncer.newKCLLease(shard); lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); initialLeases.add(lease); } final File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard, "normalShutdownUnitTest"); final IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath()); // Get executor service that will be owned by the worker, so we can get interrupts. ExecutorService executorService = getWorkerThreadPoolExecutor(); // Make test case as efficient as possible. final CountDownLatch processRecordsLatch = new CountDownLatch(1); final AtomicBoolean recordProcessorInterrupted = new AtomicBoolean(false); when(v2RecordProcessorFactory.createProcessor()).thenReturn(v2RecordProcessor); final Semaphore actionBlocker = new Semaphore(1); final Semaphore shutdownBlocker = new Semaphore(1); actionBlocker.acquire(); doAnswer(new Answer<Object>() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { // Signal that record processor has started processing records. processRecordsLatch.countDown(); // Block for some time now to test forceful shutdown. Also, check if record processor // was interrupted or not. final long startTimeMillis = System.currentTimeMillis(); long elapsedTimeMillis = 0; LOG.info("Entering sleep @ " + startTimeMillis + " with elapsedMills: " + elapsedTimeMillis); shutdownBlocker.acquire(); try { actionBlocker.acquire(); } catch (InterruptedException e) { LOG.info("Sleep interrupted @ " + System.currentTimeMillis() + " elapsedMillis: " + (System.currentTimeMillis() - startTimeMillis)); recordProcessorInterrupted.getAndSet(true); } shutdownBlocker.release(); elapsedTimeMillis = System.currentTimeMillis() - startTimeMillis; LOG.info( "Sleep completed @ " + System.currentTimeMillis() + " elapsedMillis: " + elapsedTimeMillis); return null; } }).when(v2RecordProcessor).processRecords(any(ProcessRecordsInput.class)); WorkerThread workerThread = runWorker(shardList, initialLeases, callProcessRecordsForEmptyRecordList, failoverTimeMillis, numberOfRecordsPerShard, fileBasedProxy, v2RecordProcessorFactory, executorService, nullMetricsFactory); // Only sleep for time that is required. processRecordsLatch.await(); // Make sure record processor is initialized and processing records. verify(v2RecordProcessorFactory, times(1)).createProcessor(); verify(v2RecordProcessor, times(1)).initialize(any(InitializationInput.class)); verify(v2RecordProcessor, atLeast(1)).processRecords(any(ProcessRecordsInput.class)); verify(v2RecordProcessor, times(0)).shutdown(any(ShutdownInput.class)); workerThread.getWorker().shutdown(); workerThread.join(); Assert.assertTrue(workerThread.getState() == State.TERMINATED); // Shutdown should not be called in this case because record processor is blocked. verify(v2RecordProcessor, times(0)).shutdown(any(ShutdownInput.class)); // // Release the worker thread // actionBlocker.release(); // // Give the worker thread time to execute it's interrupted handler. // shutdownBlocker.tryAcquire(100, TimeUnit.MILLISECONDS); // // Now we can see if it was actually interrupted. It's possible it wasn't and this will fail. // assertThat(recordProcessorInterrupted.get(), equalTo(true)); }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java
private static boolean reconcileLocalWorkspaceHelper(final Workspace workspace, final WebServiceLayer webServiceLayer, final boolean unscannedReconcile, final boolean reconcileMissingFromDisk, final AtomicReference<Failure[]> failures, final AtomicBoolean pendingChangesUpdatedByServer) { Check.notNull(workspace, "workspace"); //$NON-NLS-1$ pendingChangesUpdatedByServer.set(false); final List<PendingChange> convertedAdds = new ArrayList<PendingChange>(); final boolean throwOnProjectRenamed; if (EnvironmentVariables.isDefined(EnvironmentVariables.DD_SUITES_PROJECT_RENAME_UNPATCHED_CLIENT)) { throwOnProjectRenamed = false;//from w ww . jav a 2s . c o m } else { throwOnProjectRenamed = true; } final AtomicReference<GUID> serverPendingChangeSignature = new AtomicReference<GUID>(GUID.EMPTY); // No optimization away of reconciles when sending up MissingFromDisk // rows, since the bit in the header (lvHeader.PendingReconcile) may be // false when we actually have work to do (there are rows in the table // marked MissingFromDisk). if ((unscannedReconcile || !workspace.getWorkspaceWatcher().isScanNecessary()) && !reconcileMissingFromDisk) { // Pre-reconcile final AtomicBoolean hasPendingLocalVersionRows = new AtomicBoolean(true); LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(workspace); try { transaction.execute(new LocalVersionHeaderTransaction() { @Override public void invoke(final WorkspaceVersionTableHeader lvh) { hasPendingLocalVersionRows.set(lvh.getPendingReconcile()); } }); } finally { try { transaction.close(); } catch (final IOException e) { throw new VersionControlException(e); } } final AtomicReference<GUID> clientPendingChangeSignature = new AtomicReference<GUID>(GUID.EMPTY); if (!hasPendingLocalVersionRows.get()) { transaction = new LocalWorkspaceTransaction(workspace); try { transaction.execute(new PendingChangesHeaderTransaction() { @Override public void invoke(final LocalPendingChangesTableHeader pch) { clientPendingChangeSignature.set(pch.getClientSignature()); } }); } finally { try { transaction.close(); } catch (final IOException e) { throw new VersionControlException(e); } } final GUID lastServerPendingChangeGuid = workspace.getOfflineCacheData() .getLastServerPendingChangeSignature(); final Calendar lastReconcileTime = workspace.getOfflineCacheData().getLastReconcileTime(); lastReconcileTime.add(Calendar.SECOND, 8); if (lastServerPendingChangeGuid != GUID.EMPTY && clientPendingChangeSignature.get().equals(lastServerPendingChangeGuid) && lastReconcileTime.after(Calendar.getInstance())) { // This reconcile was optimized away with no server call. failures.set(new Failure[0]); return false; } serverPendingChangeSignature.set( webServiceLayer.queryPendingChangeSignature(workspace.getName(), workspace.getOwnerName())); if (serverPendingChangeSignature.get() != GUID.EMPTY && clientPendingChangeSignature.get().equals(serverPendingChangeSignature)) { // This reconcile was optimized away. workspace.getOfflineCacheData() .setLastServerPendingChangeSignature(serverPendingChangeSignature.get()); workspace.getOfflineCacheData().setLastReconcileTime(Calendar.getInstance()); failures.set(new Failure[0]); return false; } } } final AtomicBoolean toReturn = new AtomicBoolean(true); final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(workspace); try { final AtomicReference<Failure[]> delegateFailures = new AtomicReference<Failure[]>(new Failure[0]); final AtomicBoolean delegatePCUpdated = new AtomicBoolean(false); transaction.execute(new AllTablesTransaction() { @Override public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv, final LocalPendingChangesTable pc) { if (!unscannedReconcile) { // The line below has been commented out because we // decided not to force a full scan here, because it // causes significant degradation in UI performance. // // workspace.getWorkspaceWatcher().markPathChanged(null); // // It was an attempt to fix the bug: // Bug 6191: When using local workspaces, get latest // does not get a file that has been deleted from disk. // // The customer has to explicitly invoke // Pending Changes>Actions>Detect local changes // in Team Explorer. // // Note that none of customers reported that as an // issue. // It was detected on our tests only. workspace.getWorkspaceWatcher().scan(wp, lv, pc); } // Pre-reconcile if (!lv.getPendingReconcile() && !reconcileMissingFromDisk && GUID.EMPTY == serverPendingChangeSignature.get()) { serverPendingChangeSignature.set(webServiceLayer .queryPendingChangeSignature(workspace.getName(), workspace.getOwnerName())); if (serverPendingChangeSignature.get() != GUID.EMPTY && pc.getClientSignature().equals(serverPendingChangeSignature.get())) { // This reconcile was optimized away. delegateFailures.set(new Failure[0]); workspace.getOfflineCacheData() .setLastServerPendingChangeSignature(serverPendingChangeSignature.get()); workspace.getOfflineCacheData().setLastReconcileTime(Calendar.getInstance()); toReturn.set(true); return; } } // Acknowledgment of team project renames, if any have been // completed if (wp.getNewProjectRevisionId() > 0) { webServiceLayer.promotePendingWorkspaceMappings(workspace.getName(), workspace.getOwnerName(), wp.getNewProjectRevisionId()); wp.setNewProjectRevisionId(0); } final LocalPendingChange[] pendingChanges = pc.queryByTargetServerItem(ServerPath.ROOT, RecursionType.FULL, null); /* * TEE-specific Code * * In order to support offline property changes, which * cannot be reconciled with * WebServiceLayer.reconcileLocalWorkspace (the property * values can't be sent), we have to pull out the pended * property changes and send them to the server before * reconciling. */ final List<ChangeRequest> propertyRequests = new ArrayList<ChangeRequest>(); for (final LocalPendingChange lpc : pendingChanges) { if (lpc.getChangeType().contains(ChangeType.PROPERTY)) { final PropertyValue[] pv = lpc.getPropertyValues(); final String serverItem = lpc.getTargetServerItem(); if (pv != null && pv.length > 0 && serverItem != null) { final ChangeRequest request = new ChangeRequest( new ItemSpec(serverItem, RecursionType.NONE), new WorkspaceVersionSpec(workspace), RequestType.PROPERTY, ItemType.ANY, VersionControlConstants.ENCODING_UNCHANGED, LockLevel.UNCHANGED, 0, null, false); request.setProperties(pv); propertyRequests.add(request); } } } if (propertyRequests.size() > 0) { ((WebServiceLayerLocalWorkspaces) webServiceLayer).pendChangesInLocalWorkspace( workspace.getName(), workspace.getOwnerName(), propertyRequests.toArray(new ChangeRequest[propertyRequests.size()]), PendChangesOptions.NONE, SupportedFeatures.ALL, new AtomicReference<Failure[]>(), null, null, new AtomicBoolean(), new AtomicReference<ChangePendedFlags>()); // TODO handle failures? } // Back to normal, non-TEE behavior final AtomicBoolean outClearLocalVersionTable = new AtomicBoolean(); final ServerItemLocalVersionUpdate[] lvUpdates = lv.getUpdatesForReconcile(pendingChanges, reconcileMissingFromDisk, outClearLocalVersionTable); ReconcileResult result = webServiceLayer.reconcileLocalWorkspace(workspace.getName(), workspace.getOwnerName(), pc.getClientSignature(), pendingChanges, lvUpdates, outClearLocalVersionTable.get(), throwOnProjectRenamed); // report any failures Failure[] reconcileFailures = result.getFailures(); workspace.getClient().reportFailures(workspace, reconcileFailures); if (reconcileFailures.length > 0) { throw new ReconcileFailedException(reconcileFailures); } GUID newSignature = new GUID(result.getNewSignature()); PendingChange[] newPendingChanges = result.getNewPendingChanges(); // If the local version rows for this local workspace have // been purged from the server, then the server will set // this flag on the result of the next reconcile. if (result.isReplayLocalVersionsRequired()) { // Reconcile a second time. This time, set the // clearLocalVersionTable flag. This way, we know // we have cleared out any lingering local version rows // for this workspace. if (!outClearLocalVersionTable.get()) { result = webServiceLayer.reconcileLocalWorkspace(workspace.getName(), workspace.getOwnerName(), pc.getClientSignature(), pendingChanges, lvUpdates, true /* clearLocalVersionTable */, throwOnProjectRenamed); // Report any failures reconcileFailures = result.getFailures(); workspace.getClient().reportFailures(workspace, reconcileFailures); if (reconcileFailures.length > 0) { throw new ReconcileFailedException(reconcileFailures); } // Grab the new signature and new pending changes newSignature = new GUID(result.getNewSignature()); newPendingChanges = result.getNewPendingChanges(); } // Now, go through the local version table and replay // every row that we have. final List<ServerItemLocalVersionUpdate> replayUpdates = new ArrayList<ServerItemLocalVersionUpdate>( Math.min(lv.getLocalItemsCount(), 1000)); for (final WorkspaceLocalItem lvEntry : lv.queryByServerItem(ServerPath.ROOT, RecursionType.FULL, null, true /* includeDeleted */)) { final ServerItemLocalVersionUpdate replayUpdate = lvEntry .getLocalVersionUpdate(reconcileMissingFromDisk, true /* force */); if (replayUpdate != null) { replayUpdates.add(replayUpdate); // Batch these updates in groups of 1000 items. if (replayUpdates.size() >= 1000) { webServiceLayer.updateLocalVersion(workspace.getName(), workspace.getOwnerName(), replayUpdates.toArray( new ServerItemLocalVersionUpdate[replayUpdates.size()])); replayUpdates.clear(); } } } if (replayUpdates.size() > 0) { webServiceLayer.updateLocalVersion(workspace.getName(), workspace.getOwnerName(), replayUpdates.toArray(new ServerItemLocalVersionUpdate[replayUpdates.size()])); } } if (result.isPendingChangesUpdated()) { delegatePCUpdated.set(true); final Map<String, ItemType> newPendingDeletes = new TreeMap<String, ItemType>( String.CASE_INSENSITIVE_ORDER); for (final PendingChange pendingChange : newPendingChanges) { if (pendingChange.isAdd()) { final LocalPendingChange oldPendingChange = pc .getByTargetServerItem(pendingChange.getServerItem()); if (null == oldPendingChange || !oldPendingChange.isAdd()) { // Before calling ReconcileLocalWorkspace, // we did not have a pending add at this // target server item. convertedAdds.add(pendingChange); } } else if (pendingChange.isDelete()) { // If the server removed any of our presented // pending deletes, we want to know about it so // we can get rid of the local version rows that // we have in the deleted state. The server will // remove our pending deletes when the item has // been destroyed on the server. newPendingDeletes.put( pendingChange.getSourceServerItem() == null ? pendingChange.getServerItem() : pendingChange.getSourceServerItem(), pendingChange.getItemType()); } } for (final LocalPendingChange oldPendingChange : pc .queryByCommittedServerItem(ServerPath.ROOT, RecursionType.FULL, null)) { if (oldPendingChange.isDelete() && !newPendingDeletes.containsKey(oldPendingChange.getCommittedServerItem())) { // We presented this delete to the server for // Reconcile, but the server removed it from the // pending changes manifest. We need to get rid // of the LV rows for // oldPendingChange.CommittedServerItem since // this item is now destroyed. final List<ServerItemIsCommittedTuple> lvRowsToRemove = new ArrayList<ServerItemIsCommittedTuple>(); final RecursionType recursion = oldPendingChange.isRecursiveChange() ? RecursionType.FULL : RecursionType.NONE; // Aggregate up the deleted local version // entries at this committed server item // (or below if it's a folder), and we'll remove // them. for (final WorkspaceLocalItem lvEntry : lv.queryByServerItem( oldPendingChange.getCommittedServerItem(), recursion, null, true /* includeDeleted */)) { if (lvEntry.isDeleted()) { lvRowsToRemove.add(new ServerItemIsCommittedTuple(lvEntry.getServerItem(), lvEntry.isCommitted())); } } for (final ServerItemIsCommittedTuple tuple : lvRowsToRemove) { // We don't need to reconcile the removal of // LV entries marked IsDeleted since they // don't exist on the server anyway. lv.removeByServerItem(tuple.getCommittedServerItem(), tuple.isCommitted(), false); } } } pc.replacePendingChanges(newPendingChanges); } // If all we're doing to LV is marking it reconciled, then // don't use TxF to commit // both tables atomically as this is slower if (!lv.isDirty()) { transaction.setAllowTxF(false); } if (lvUpdates.length > 0) { lv.markAsReconciled(wp, reconcileMissingFromDisk); // If we removed all missing-from-disk items from the // local version table, then we need to remove // the corresponding candidate delete rows for those // items as well. if (reconcileMissingFromDisk) { List<String> candidatesToRemove = null; for (final LocalPendingChange candidateChange : pc .queryCandidatesByTargetServerItem(ServerPath.ROOT, RecursionType.FULL, null)) { if (candidateChange.isDelete()) { if (null == candidatesToRemove) { candidatesToRemove = new ArrayList<String>(); } candidatesToRemove.add(candidateChange.getTargetServerItem()); } } if (null != candidatesToRemove) { for (final String candidateDeleteTargetServerItem : candidatesToRemove) { pc.removeCandidateByTargetServerItem(candidateDeleteTargetServerItem); } // Set the candidates changed to true so that it // refreshes the UI LocalWorkspaceTransaction.getCurrent().setRaisePendingChangeCandidatesChanged(true); } } } newSignature = webServiceLayer.queryPendingChangeSignature(workspace.getName(), workspace.getOwnerName()); if (!newSignature.equals(pc.getClientSignature())) { pc.setClientSignature(newSignature); workspace.getOfflineCacheData().setLastServerPendingChangeSignature(newSignature); } if (!newSignature.equals(pc.getClientSignature())) { pc.setClientSignature(newSignature); workspace.getOfflineCacheData().setLastServerPendingChangeSignature(newSignature); } workspace.getOfflineCacheData().setLastReconcileTime(Calendar.getInstance()); } }); failures.set(delegateFailures.get()); pendingChangesUpdatedByServer.set(delegatePCUpdated.get()); } finally { try { transaction.close(); } catch (final IOException e) { throw new VersionControlException(e); } } if (convertedAdds.size() > 0) { final UpdateLocalVersionQueueOptions options = UpdateLocalVersionQueueOptions.UPDATE_BOTH; final UpdateLocalVersionQueue ulvq = new UpdateLocalVersionQueue(workspace, options); try { for (final PendingChange pc : convertedAdds) { // Every item in this list has a ChangeType of Add. As a // result they are uncommitted items with no committed hash // value, no committed length, and no baseline file GUID. ulvq.queueUpdate(new ClientLocalVersionUpdate(pc.getServerItem(), pc.getItemID(), pc.getLocalItem(), 0 /* localVersion */, DotNETDate.MIN_CALENDAR, pc.getEncoding(), null /* committedHashValue */, 0 /* committedLength */, null /* baselineFileGuid */, null /* pendingChangeTargetServerItem */, null /* properties */)); } } finally { ulvq.close(); } } return toReturn.get(); }
From source file:com.atlassian.jira.bc.group.TestDefaultGroupService.java
@Test public void testValidateAddUsersToGroupHappy() { final AtomicBoolean validateGroupNamesExistCalled = new AtomicBoolean(false); final AtomicBoolean isUserNullCalled = new AtomicBoolean(false); final AtomicBoolean isExternalUserManagementEnabledCalled = new AtomicBoolean(false); final AtomicBoolean getNonMemberGroupsCalled = new AtomicBoolean(false); final AtomicBoolean validateUserIsNotInSelectedGroupsCalled = new AtomicBoolean(false); final DefaultGroupService defaultGroupService = new DefaultGroupService(null, null, null, null, null, null, null, null, null, null, null, null, null, null) { @Override/*from w w w . j a va 2 s . c o m*/ boolean validateGroupNamesExist(final Collection groupNames, final ErrorCollection errorCollection, final I18nHelper i18n) { validateGroupNamesExistCalled.set(true); return true; } @Override boolean isUserNull(final User user) { isUserNullCalled.set(true); return false; } @Override boolean isExternalUserManagementEnabled() { isExternalUserManagementEnabledCalled.set(true); return false; } @Override List getGroupNamesUserCanSee(final com.atlassian.crowd.embedded.api.User currentUser) { getNonMemberGroupsCalled.set(true); return EasyList.build("SomeOtherGroup", "SomeGroup"); } @Override boolean validateUserIsNotInSelectedGroups(final JiraServiceContext jiraServiceContext, final Collection selectedGroupsNames, final User user) { validateUserIsNotInSelectedGroupsCalled.set(true); return true; } @Override User getUser(final String userName) { return null; } @Override boolean userHasAdminPermission(final User user) { return true; } @Override boolean groupsHaveGlobalUsePermissions(final Collection /* <String> */ groupNames) { return false; } }; final SimpleErrorCollection errors = new SimpleErrorCollection(); final JiraServiceContext jiraServiceContext = getContext(errors); assertTrue(defaultGroupService .validateAddUsersToGroup(jiraServiceContext, EasyList.build("SomeGroup"), EasyList.build("dude")) .isSuccess()); assertTrue(validateGroupNamesExistCalled.get()); assertTrue(isUserNullCalled.get()); assertTrue(isExternalUserManagementEnabledCalled.get()); assertTrue(getNonMemberGroupsCalled.get()); assertTrue(validateUserIsNotInSelectedGroupsCalled.get()); }
From source file:com.atlassian.jira.bc.group.TestDefaultGroupService.java
@Test public void testValidateAddUsersToGroupWillExceedLicenseLimit() { final AtomicBoolean validateGroupNamesExistCalled = new AtomicBoolean(false); final AtomicBoolean isUserNullCalled = new AtomicBoolean(false); final AtomicBoolean isExternalUserManagementEnabledCalled = new AtomicBoolean(false); final AtomicBoolean getNonMemberGroupsCalled = new AtomicBoolean(false); final AtomicBoolean validateUserIsNotInSelectedGroupsCalled = new AtomicBoolean(false); final Mock mockUserUtil = new Mock(UserUtil.class); mockUserUtil.expectAndReturn("canActivateNumberOfUsers", new Constraint[] { P.eq(1) }, Boolean.FALSE); final DefaultGroupService defaultGroupService = new DefaultGroupService(null, null, null, null, null, null, null, null, (UserUtil) mockUserUtil.proxy(), null, null, null, null, null) { @Override//from w w w. jav a2 s . com boolean validateGroupNamesExist(final Collection groupNames, final ErrorCollection errorCollection, final I18nHelper i18n) { validateGroupNamesExistCalled.set(true); return true; } @Override boolean isUserNull(final User user) { isUserNullCalled.set(true); return false; } @Override boolean isExternalUserManagementEnabled() { isExternalUserManagementEnabledCalled.set(true); return false; } @Override List getGroupNamesUserCanSee(final com.atlassian.crowd.embedded.api.User currentUser) { getNonMemberGroupsCalled.set(true); return EasyList.build("SomeOtherGroup", "SomeGroup"); } @Override boolean validateUserIsNotInSelectedGroups(final JiraServiceContext jiraServiceContext, final Collection selectedGroupsNames, final User user) { validateUserIsNotInSelectedGroupsCalled.set(true); return true; } @Override User getUser(final String userName) { return null; } @Override boolean userHasAdminPermission(final User user) { return true; } @Override boolean groupsHaveGlobalUsePermissions(final Collection /* <String> */ groupNames) { return true; } }; final SimpleErrorCollection errors = new SimpleErrorCollection(); final JiraServiceContext jiraServiceContext = getContext(errors); assertFalse(defaultGroupService .validateAddUsersToGroup(jiraServiceContext, EasyList.build("SomeGroup"), EasyList.build("dude")) .isSuccess()); assertTrue(validateGroupNamesExistCalled.get()); assertTrue(isUserNullCalled.get()); assertTrue(isExternalUserManagementEnabledCalled.get()); assertTrue(getNonMemberGroupsCalled.get()); assertTrue(validateUserIsNotInSelectedGroupsCalled.get()); assertEquals(1, errors.getErrorMessages().size()); assertEquals( "Adding the user to the groups you have selected will grant the 'JIRA Users' permission to the user" + " in JIRA. This will exceed the number of users allowed to use JIRA under your license. Please" + " reduce the number of users with the 'JIRA Users', 'JIRA Administrators' or 'JIRA System" + " Administrators' global permissions or consider upgrading your license.", errors.getErrorMessages().iterator().next()); }
From source file:org.apache.hadoop.hive.metastore.txn.TestTxnHandler.java
@Test @Ignore/* w w w.j a v a 2s .com*/ public void deadlockDetected() throws Exception { LOG.debug("Starting deadlock test"); Connection conn = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); Statement stmt = conn.createStatement(); long now = txnHandler.getDbTime(conn); stmt.executeUpdate("insert into TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, " + "txn_user, txn_host) values (1, 'o', " + now + ", " + now + ", 'shagy', " + "'scooby.com')"); stmt.executeUpdate("insert into HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, " + "hl_db, hl_table, hl_partition, hl_lock_state, hl_lock_type, hl_last_heartbeat, " + "hl_user, hl_host) values (1, 1, 1, 'mydb', 'mytable', 'mypartition', '" + txnHandler.LOCK_WAITING + "', '" + txnHandler.LOCK_EXCLUSIVE + "', " + now + ", 'fred', " + "'scooby.com')"); conn.commit(); txnHandler.closeDbConn(conn); final AtomicBoolean sawDeadlock = new AtomicBoolean(); final Connection conn1 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); final Connection conn2 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); try { for (int i = 0; i < 5; i++) { Thread t1 = new Thread() { @Override public void run() { try { try { updateTxns(conn1); updateLocks(conn1); Thread.sleep(1000); conn1.commit(); LOG.debug("no exception, no deadlock"); } catch (SQLException e) { try { txnHandler.checkRetryable(conn1, e, "thread t1"); LOG.debug("Got an exception, but not a deadlock, SQLState is " + e.getSQLState() + " class of exception is " + e.getClass().getName() + " msg is <" + e.getMessage() + ">"); } catch (TxnHandler.RetryException de) { LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + "exception is " + e.getClass().getName() + " msg is <" + e.getMessage() + ">"); sawDeadlock.set(true); } } conn1.rollback(); } catch (Exception e) { throw new RuntimeException(e); } } }; Thread t2 = new Thread() { @Override public void run() { try { try { updateLocks(conn2); updateTxns(conn2); Thread.sleep(1000); conn2.commit(); LOG.debug("no exception, no deadlock"); } catch (SQLException e) { try { txnHandler.checkRetryable(conn2, e, "thread t2"); LOG.debug("Got an exception, but not a deadlock, SQLState is " + e.getSQLState() + " class of exception is " + e.getClass().getName() + " msg is <" + e.getMessage() + ">"); } catch (TxnHandler.RetryException de) { LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + "exception is " + e.getClass().getName() + " msg is <" + e.getMessage() + ">"); sawDeadlock.set(true); } } conn2.rollback(); } catch (Exception e) { throw new RuntimeException(e); } } }; t1.start(); t2.start(); t1.join(); t2.join(); if (sawDeadlock.get()) break; } assertTrue(sawDeadlock.get()); } finally { conn1.rollback(); txnHandler.closeDbConn(conn1); conn2.rollback(); txnHandler.closeDbConn(conn2); } }
From source file:io.realm.RealmTests.java
@Test public void processLocalListenersAfterRefresh() throws InterruptedException { // Used to validate the result final AtomicBoolean listenerWasCalled = new AtomicBoolean(false); final AtomicBoolean typeListenerWasCalled = new AtomicBoolean(false); // Used by the background thread to wait for the main thread to do the write operation final CountDownLatch bgThreadLatch = new CountDownLatch(1); final CountDownLatch bgClosedLatch = new CountDownLatch(1); final CountDownLatch bgThreadReadyLatch = new CountDownLatch(1); Thread backgroundThread = new Thread() { @Override//from ww w . j a v a 2 s .co m public void run() { // this will allow to register a listener. // we don't start looping to prevent the callback to be invoked via // the handler mechanism, the purpose of this test is to make sure refresh calls // the listeners. Looper.prepare(); Realm bgRealm = Realm.getInstance(realmConfig); RealmResults<Dog> dogs = bgRealm.where(Dog.class).findAll(); try { bgRealm.addChangeListener(new RealmChangeListener() { @Override public void onChange() { listenerWasCalled.set(true); } }); dogs.addChangeListener(new RealmChangeListener() { @Override public void onChange() { typeListenerWasCalled.set(true); } }); bgThreadReadyLatch.countDown(); bgThreadLatch.await(); // Wait for the main thread to do a write operation bgRealm.refresh(); // This should call the listener assertTrue(listenerWasCalled.get()); assertTrue(typeListenerWasCalled.get()); bgRealm.close(); bgRealm = null; // DON'T count down in the final block! The test will fail silently!!! bgClosedLatch.countDown(); } catch (InterruptedException e) { fail(e.getMessage()); } finally { if (bgRealm != null) { bgRealm.close(); } } } }; backgroundThread.start(); // Wait until bgThread finishes adding listener to the RealmResults. Otherwise same TableView version won't // trigger the listener. bgThreadReadyLatch.await(); realm.beginTransaction(); realm.createObject(Dog.class); realm.commitTransaction(); bgThreadLatch.countDown(); bgClosedLatch.await(); }
From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java
/** * Test case to check increment function with memstore flushing * @throws Exception//from w w w . j a v a 2s . c o m */ @Test public void testParallelIncrementWithMemStoreFlush() throws Exception { byte[] family = Incrementer.family; this.region = initHRegion(tableName, method, CONF, family); final HRegion region = this.region; final AtomicBoolean incrementDone = new AtomicBoolean(false); Runnable flusher = new Runnable() { @Override public void run() { while (!incrementDone.get()) { try { region.flushcache(); } catch (Exception e) { e.printStackTrace(); } } } }; // after all increment finished, the row will increment to 20*100 = 2000 int threadNum = 20; int incCounter = 100; long expected = threadNum * incCounter; Thread[] incrementers = new Thread[threadNum]; Thread flushThread = new Thread(flusher); for (int i = 0; i < threadNum; i++) { incrementers[i] = new Thread(new Incrementer(this.region, incCounter)); incrementers[i].start(); } flushThread.start(); for (int i = 0; i < threadNum; i++) { incrementers[i].join(); } incrementDone.set(true); flushThread.join(); Get get = new Get(Incrementer.incRow); get.addColumn(Incrementer.family, Incrementer.qualifier); get.setMaxVersions(1); Result res = this.region.get(get); List<Cell> kvs = res.getColumnCells(Incrementer.family, Incrementer.qualifier); // we just got the latest version assertEquals(kvs.size(), 1); Cell kv = kvs.get(0); assertEquals(expected, Bytes.toLong(kv.getValueArray(), kv.getValueOffset())); this.region = null; }
From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java
/** * Test case to check append function with memstore flushing * @throws Exception//from w w w. jav a 2 s.c o m */ @Test public void testParallelAppendWithMemStoreFlush() throws Exception { byte[] family = Appender.family; this.region = initHRegion(tableName, method, CONF, family); final HRegion region = this.region; final AtomicBoolean appendDone = new AtomicBoolean(false); Runnable flusher = new Runnable() { @Override public void run() { while (!appendDone.get()) { try { region.flushcache(); } catch (Exception e) { e.printStackTrace(); } } } }; // after all append finished, the value will append to threadNum * // appendCounter Appender.CHAR int threadNum = 20; int appendCounter = 100; byte[] expected = new byte[threadNum * appendCounter]; for (int i = 0; i < threadNum * appendCounter; i++) { System.arraycopy(Appender.CHAR, 0, expected, i, 1); } Thread[] appenders = new Thread[threadNum]; Thread flushThread = new Thread(flusher); for (int i = 0; i < threadNum; i++) { appenders[i] = new Thread(new Appender(this.region, appendCounter)); appenders[i].start(); } flushThread.start(); for (int i = 0; i < threadNum; i++) { appenders[i].join(); } appendDone.set(true); flushThread.join(); Get get = new Get(Appender.appendRow); get.addColumn(Appender.family, Appender.qualifier); get.setMaxVersions(1); Result res = this.region.get(get); List<Cell> kvs = res.getColumnCells(Appender.family, Appender.qualifier); // we just got the latest version assertEquals(kvs.size(), 1); Cell kv = kvs.get(0); byte[] appendResult = new byte[kv.getValueLength()]; System.arraycopy(kv.getValueArray(), kv.getValueOffset(), appendResult, 0, kv.getValueLength()); assertArrayEquals(expected, appendResult); this.region = null; }
From source file:org.apache.nifi.processors.standard.servlets.ListenHTTPServlet.java
@Override protected void doPost(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { final ProcessContext context = processContext; ProcessSessionFactory sessionFactory; do {//from www . j a v a 2 s. c o m sessionFactory = sessionFactoryHolder.get(); if (sessionFactory == null) { try { Thread.sleep(10); } catch (final InterruptedException e) { } } } while (sessionFactory == null); final ProcessSession session = sessionFactory.createSession(); FlowFile flowFile = null; String holdUuid = null; String foundSubject = null; try { final long n = filesReceived.getAndIncrement() % FILES_BEFORE_CHECKING_DESTINATION_SPACE; if (n == 0 || !spaceAvailable.get()) { if (context.getAvailableRelationships().isEmpty()) { spaceAvailable.set(false); if (logger.isDebugEnabled()) { logger.debug("Received request from " + request.getRemoteHost() + " but no space available; Indicating Service Unavailable"); } response.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE); return; } else { spaceAvailable.set(true); } } response.setHeader("Content-Type", MediaType.TEXT_PLAIN); final boolean contentGzipped = Boolean.parseBoolean(request.getHeader(GZIPPED_HEADER)); final X509Certificate[] certs = (X509Certificate[]) request .getAttribute("javax.servlet.request.X509Certificate"); foundSubject = DEFAULT_FOUND_SUBJECT; if (certs != null && certs.length > 0) { for (final X509Certificate cert : certs) { foundSubject = cert.getSubjectDN().getName(); if (authorizedPattern.matcher(foundSubject).matches()) { break; } else { logger.warn("Rejecting transfer attempt from " + foundSubject + " because the DN is not authorized, host=" + request.getRemoteHost()); response.sendError(HttpServletResponse.SC_FORBIDDEN, "not allowed based on dn"); return; } } } final String destinationVersion = request.getHeader(PROTOCOL_VERSION_HEADER); Integer protocolVersion = null; if (destinationVersion != null) { try { protocolVersion = Integer.valueOf(destinationVersion); } catch (final NumberFormatException e) { // Value was invalid. Treat as if the header were missing. } } final boolean destinationIsLegacyNiFi = (protocolVersion == null); final boolean createHold = Boolean.parseBoolean(request.getHeader(FLOWFILE_CONFIRMATION_HEADER)); final String contentType = request.getContentType(); final InputStream unthrottled = contentGzipped ? new GZIPInputStream(request.getInputStream()) : request.getInputStream(); final InputStream in = (streamThrottler == null) ? unthrottled : streamThrottler.newThrottledInputStream(unthrottled); if (logger.isDebugEnabled()) { logger.debug("Received request from " + request.getRemoteHost() + ", createHold=" + createHold + ", content-type=" + contentType + ", gzip=" + contentGzipped); } final AtomicBoolean hasMoreData = new AtomicBoolean(false); final FlowFileUnpackager unpackager; if (APPLICATION_FLOW_FILE_V3.equals(contentType)) { unpackager = new FlowFileUnpackagerV3(); } else if (APPLICATION_FLOW_FILE_V2.equals(contentType)) { unpackager = new FlowFileUnpackagerV2(); } else if (APPLICATION_FLOW_FILE_V1.equals(contentType)) { unpackager = new FlowFileUnpackagerV1(); } else { unpackager = null; } final Set<FlowFile> flowFileSet = new HashSet<>(); do { final long startNanos = System.nanoTime(); final Map<String, String> attributes = new HashMap<>(); flowFile = session.create(); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream rawOut) throws IOException { try (final BufferedOutputStream bos = new BufferedOutputStream(rawOut, 65536)) { if (unpackager == null) { IOUtils.copy(in, bos); hasMoreData.set(false); } else { attributes.putAll(unpackager.unpackageFlowFile(in, bos)); if (destinationIsLegacyNiFi) { if (attributes.containsKey("nf.file.name")) { // for backward compatibility with old nifi... attributes.put(CoreAttributes.FILENAME.key(), attributes.remove("nf.file.name")); } if (attributes.containsKey("nf.file.path")) { attributes.put(CoreAttributes.PATH.key(), attributes.remove("nf.file.path")); } } hasMoreData.set(unpackager.hasMoreData()); } } } }); final long transferNanos = System.nanoTime() - startNanos; final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS); // put metadata on flowfile final String nameVal = request.getHeader(CoreAttributes.FILENAME.key()); if (StringUtils.isNotBlank(nameVal)) { attributes.put(CoreAttributes.FILENAME.key(), nameVal); } // put arbitrary headers on flow file for (Enumeration<String> headerEnum = request.getHeaderNames(); headerEnum.hasMoreElements();) { String headerName = headerEnum.nextElement(); if (headerPattern != null && headerPattern.matcher(headerName).matches()) { String headerValue = request.getHeader(headerName); attributes.put(headerName, headerValue); } } String sourceSystemFlowFileIdentifier = attributes.get(CoreAttributes.UUID.key()); if (sourceSystemFlowFileIdentifier != null) { sourceSystemFlowFileIdentifier = "urn:nifi:" + sourceSystemFlowFileIdentifier; // If we receveied a UUID, we want to give the FlowFile a new UUID and register the sending system's // identifier as the SourceSystemFlowFileIdentifier field in the Provenance RECEIVE event attributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString()); } flowFile = session.putAllAttributes(flowFile, attributes); session.getProvenanceReporter().receive(flowFile, request.getRequestURL().toString(), sourceSystemFlowFileIdentifier, "Remote DN=" + foundSubject, transferMillis); flowFile = session.putAttribute(flowFile, "restlistener.remote.source.host", request.getRemoteHost()); flowFile = session.putAttribute(flowFile, "restlistener.remote.user.dn", foundSubject); flowFileSet.add(flowFile); if (holdUuid == null) { holdUuid = flowFile.getAttribute(CoreAttributes.UUID.key()); } } while (hasMoreData.get()); if (createHold) { String uuid = (holdUuid == null) ? UUID.randomUUID().toString() : holdUuid; if (flowFileMap.containsKey(uuid)) { uuid = UUID.randomUUID().toString(); } final FlowFileEntryTimeWrapper wrapper = new FlowFileEntryTimeWrapper(session, flowFileSet, System.currentTimeMillis()); FlowFileEntryTimeWrapper previousWrapper; do { previousWrapper = flowFileMap.putIfAbsent(uuid, wrapper); if (previousWrapper != null) { uuid = UUID.randomUUID().toString(); } } while (previousWrapper != null); response.setStatus(HttpServletResponse.SC_SEE_OTHER); final String ackUri = "/" + basePath + "/holds/" + uuid; response.addHeader(LOCATION_HEADER_NAME, ackUri); response.addHeader(LOCATION_URI_INTENT_NAME, LOCATION_URI_INTENT_VALUE); response.getOutputStream().write(ackUri.getBytes("UTF-8")); if (logger.isDebugEnabled()) { logger.debug( "Ingested {} from Remote Host: [{}] Port [{}] SubjectDN [{}]; placed hold on these {} files with ID {}", new Object[] { flowFileSet, request.getRemoteHost(), request.getRemotePort(), foundSubject, flowFileSet.size(), uuid }); } } else { response.setStatus(HttpServletResponse.SC_OK); logger.info( "Received from Remote Host: [{}] Port [{}] SubjectDN [{}]; transferring to 'success' {}", new Object[] { request.getRemoteHost(), request.getRemotePort(), foundSubject, flowFile }); session.transfer(flowFileSet, ListenHTTP.RELATIONSHIP_SUCCESS); session.commit(); } } catch (final Throwable t) { session.rollback(); if (flowFile == null) { logger.error("Unable to receive file from Remote Host: [{}] SubjectDN [{}] due to {}", new Object[] { request.getRemoteHost(), foundSubject, t }); } else { logger.error("Unable to receive file {} from Remote Host: [{}] SubjectDN [{}] due to {}", new Object[] { flowFile, request.getRemoteHost(), foundSubject, t }); } response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, t.toString()); } }