List of usage examples for java.util.concurrent.atomic AtomicBoolean set
public final void set(boolean newValue)
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java
@Test public void shouldAllowConcurrentModificationOfGlobals() throws Exception { // this test simulates a scenario that likely shouldn't happen - where globals are modified by multiple // threads. globals are created in a synchronized fashion typically but it's possible that someone // could do something like this and this test validate that concurrency exceptions don't occur as a // result/*from w w w .j a v a 2s .c o m*/ final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory); final Bindings globals = new SimpleBindings(); globals.put("g", -1); final GremlinExecutor gremlinExecutor = GremlinExecutor.build().globalBindings(globals).create(); final AtomicBoolean failed = new AtomicBoolean(false); final int max = 512; final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max)); IntStream.range(0, max).forEach(i -> { final int yValue = i * 2; final Bindings b = new SimpleBindings(); b.put("x", i); b.put("y", yValue); final int zValue = i * -1; final String script = "z=" + zValue + ";[x,y,z,g]"; try { service.submit(() -> { try { // modify the global in a separate thread gremlinExecutor.getGlobalBindings().put("g", i); gremlinExecutor.getGlobalBindings().put(Integer.toString(i), i); gremlinExecutor.getGlobalBindings().keySet().stream() .filter(s -> i % 2 == 0 && !s.equals("g")).findFirst().ifPresent(globals::remove); final List<Integer> result = (List<Integer>) gremlinExecutor.eval(script, b).get(); futures.add(Pair.with(i, result)); } catch (Exception ex) { failed.set(true); } }); } catch (Exception ex) { throw new RuntimeException(ex); } }); service.shutdown(); assertThat(service.awaitTermination(60000, TimeUnit.MILLISECONDS), is(true)); // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this // test is partially designed to protected against. assertThat(failed.get(), is(false)); assertEquals(max, futures.size()); futures.forEach(t -> { assertEquals(t.getValue0(), t.getValue1().get(0)); assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue()); assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue()); assertThat(t.getValue1().get(3).intValue(), greaterThan(-1)); }); }
From source file:org.apache.bookkeeper.stream.storage.impl.sc.ZkStorageContainerManagerTest.java
@Test public void testStartContainerOnFailures() throws Exception { scManager.close();/*www . j a v a 2s . c o m*/ long containerId = 11L; AtomicBoolean returnGoodContainer = new AtomicBoolean(false); CompletableFuture<StorageContainer> startFuture = new CompletableFuture<>(); StorageContainer goodSc = createStorageContainer(containerId, startFuture, FutureUtils.Void()); mockScFactory = (scId) -> { if (returnGoodContainer.get()) { return goodSc; } else { return createStorageContainer(scId, FutureUtils.exception(new Exception("Failed to start")), FutureUtils.Void()); } }; scRegistry = spy(new StorageContainerRegistryImpl(mockScFactory)); scManager = new ZkStorageContainerManager( myEndpoint, new StorageConfiguration(new CompositeConfiguration()) .setClusterControllerScheduleInterval(1, TimeUnit.SECONDS), clusterMetadataStore, scRegistry, NullStatsLogger.INSTANCE); // start the storage container manager scManager.start(); // update assignment map ClusterAssignmentData cad = ClusterAssignmentData.newBuilder() .putServers(NetUtils.endpointToString(myEndpoint), ServerAssignmentData.newBuilder().addContainers(containerId).build()) .build(); clusterMetadataStore.updateClusterAssignmentData(cad); // wait until container start is called and verify it is not started. verify(scRegistry, timeout(10000).atLeastOnce()).startStorageContainer(eq(containerId)); assertEquals(0, scManager.getLiveContainers().size()); // flip the flag to return a good container to simulate successful startup returnGoodContainer.set(true); FutureUtils.complete(startFuture, goodSc); // wait until container start is called again and the container is started MoreAsserts.assertUtil(ignored -> scManager.getLiveContainers().size() >= 1, () -> null); assertEquals(1, scManager.getLiveContainers().size()); assertTrue(scManager.getLiveContainers().containsKey(containerId)); }
From source file:io.pravega.client.stream.impl.ControllerImplTest.java
@Test public void testParallelGetCurrentSegments() throws Exception { final ExecutorService executorService = Executors.newFixedThreadPool(10); Semaphore createCount = new Semaphore(-19); AtomicBoolean success = new AtomicBoolean(true); for (int i = 0; i < 10; i++) { executorService.submit(() -> { for (int j = 0; j < 2; j++) { try { CompletableFuture<StreamSegments> streamSegments; streamSegments = controllerClient.getCurrentSegments("scope1", "streamparallel"); assertTrue(streamSegments.get().getSegments().size() == 2); assertEquals(new Segment("scope1", "streamparallel", 0), streamSegments.get().getSegmentForKey(0.2)); assertEquals(new Segment("scope1", "streamparallel", 1), streamSegments.get().getSegmentForKey(0.6)); createCount.release(); } catch (Exception e) { log.error("Exception when getting segments: {}", e); // Don't wait for other threads to complete. success.set(false); createCount.release(20); }/*w ww .ja v a 2 s . c o m*/ } }); } createCount.acquire(); executorService.shutdownNow(); assertTrue(success.get()); }
From source file:anhttpserver.ServerTest.java
@Test public void multiThreadContextIsolationTest() throws Exception { final String res1 = "1111111111"; final String res2 = "1"; final String TEST_HEADER = "TEST-HEADER"; final AtomicBoolean testPassed = new AtomicBoolean(true); class ThreadTestHttpHandlerAdapter extends ByteArrayHandlerAdapter { private String result; private int timeToSleep; ThreadTestHttpHandlerAdapter(String result, int timeToSleep) { this.result = result; this.timeToSleep = timeToSleep; }//from www. j av a 2s . co m @Override public byte[] getResponseAsByteArray(HttpRequestContext httpRequestContext) throws IOException { setResponseHeader(TEST_HEADER, result, httpRequestContext); try { Thread.sleep(timeToSleep); } catch (InterruptedException e) { throw new RuntimeException(e); } return result.getBytes(); } } class ThreadTester implements Runnable { private String url; private String result; private int repeatCount; ThreadTester(String url, String result, int repeatCount) { this.url = url; this.result = result; this.repeatCount = repeatCount; } public void run() { try { for (int i = 0; i < repeatCount; i++) { if (!testPassed.get()) { return; } URLConnection connection = getConnection(url); String headerValue = connection.getHeaderField(TEST_HEADER); if (!headerValue.equals(result)) { testPassed.set(false); return; } if (!getResult(connection).equals(result)) { testPassed.set(false); return; } } } catch (Exception e) { testPassed.set(false); } } } server.addHandler("/thread1", new ThreadTestHttpHandlerAdapter(res1, 0)); server.addHandler("/thread2", new ThreadTestHttpHandlerAdapter(res2, 1)); int count = 50; Thread[] threads = new Thread[count]; String url1 = "http://localhost:9999/thread1"; String url2 = "http://localhost:9999/thread2"; for (int i = 0; i < count; i++) { threads[i] = new Thread(new ThreadTester(i % 2 == 0 ? url1 : url2, i % 2 == 0 ? res1 : res2, 20)); threads[i].start(); } for (int i = 0; i < count; i++) { threads[i].join(); } assertTrue(testPassed.get()); }
From source file:com.heliosdecompiler.bootstrapper.Bootstrapper.java
private static HeliosData loadHelios() throws IOException { System.out.println("Finding Helios implementation"); HeliosData data = new HeliosData(); boolean needsToDownload = !IMPL_FILE.exists(); if (!needsToDownload) { try (JarFile jarFile = new JarFile(IMPL_FILE)) { ZipEntry entry = jarFile.getEntry("META-INF/MANIFEST.MF"); if (entry == null) { needsToDownload = true;//from w w w .j a v a 2 s .co m } else { Manifest manifest = new Manifest(jarFile.getInputStream(entry)); String ver = manifest.getMainAttributes().getValue("Implementation-Version"); try { data.buildNumber = Integer.parseInt(ver); data.version = manifest.getMainAttributes().getValue("Version"); data.mainClass = manifest.getMainAttributes().getValue("Main-Class"); } catch (NumberFormatException e) { needsToDownload = true; } } } catch (IOException e) { needsToDownload = true; } } if (needsToDownload) { URL latestJar = new URL(LATEST_JAR); System.out.println("Downloading latest Helios implementation"); FileOutputStream out = new FileOutputStream(IMPL_FILE); HttpURLConnection connection = (HttpURLConnection) latestJar.openConnection(); if (connection.getResponseCode() == 200) { int contentLength = connection.getContentLength(); if (contentLength > 0) { InputStream stream = connection.getInputStream(); byte[] buffer = new byte[1024]; int amnt; AtomicInteger total = new AtomicInteger(); AtomicBoolean stop = new AtomicBoolean(false); Thread progressBar = new Thread() { public void run() { JPanel panel = new JPanel(); panel.setBorder(BorderFactory.createEmptyBorder(20, 20, 20, 20)); JLabel label = new JLabel(); label.setText("Downloading latest Helios build"); panel.add(label); GridLayout layout = new GridLayout(); layout.setColumns(1); layout.setRows(3); panel.setLayout(layout); JProgressBar pbar = new JProgressBar(); pbar.setMinimum(0); pbar.setMaximum(100); panel.add(pbar); JTextArea textArea = new JTextArea(1, 3); textArea.setOpaque(false); textArea.setEditable(false); textArea.setText("Downloaded 00.00MB/00.00MB"); panel.add(textArea); JFrame frame = new JFrame(); frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); frame.setContentPane(panel); frame.pack(); frame.setLocationRelativeTo(null); frame.setVisible(true); while (!stop.get()) { SwingUtilities.invokeLater( () -> pbar.setValue((int) (100.0 * total.get() / contentLength))); textArea.setText("Downloaded " + bytesToMeg(total.get()) + "MB/" + bytesToMeg(contentLength) + "MB"); try { Thread.sleep(100); } catch (InterruptedException ignored) { } } frame.dispose(); } }; progressBar.start(); while ((amnt = stream.read(buffer)) != -1) { out.write(buffer, 0, amnt); total.addAndGet(amnt); } stop.set(true); return loadHelios(); } else { throw new IOException("Content-Length set to " + connection.getContentLength()); } } else if (connection.getResponseCode() == 404) { // Most likely bootstrapper is out of date throw new RuntimeException("Bootstrapper out of date!"); } else { throw new IOException(connection.getResponseCode() + ": " + connection.getResponseMessage()); } } return data; }
From source file:com.sixt.service.framework.kafka.messaging.KafkaFailoverIntegrationTest.java
@Test public void manualKafkaTest() throws InterruptedException { ServiceProperties serviceProperties = fillServiceProperties(); // Topics are created with 3 partitions - see docker-compose-kafkafailover-integrationtest.yml Topic ping = new Topic("ping"); Topic pong = new Topic("pong"); AtomicInteger sentMessages = new AtomicInteger(0); AtomicInteger sendFailures = new AtomicInteger(0); AtomicInteger recievedMessages = new AtomicInteger(0); Producer producer = new ProducerFactory(serviceProperties).createProducer(); final AtomicBoolean produceMessages = new AtomicBoolean(true); // Produce messages until test tells producer to stop. ExecutorService producerExecutor = Executors.newSingleThreadExecutor(); producerExecutor.submit(new Runnable() { @Override// w ww . j a va 2 s . co m public void run() { OrangeContext context = new OrangeContext(); Sleeper sleeper = new Sleeper(); while (produceMessages.get()) { try { String key = RandomStringUtils.randomAscii(5); SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build(); Message request = Messages.requestFor(ping, pong, key, payload, context); producer.send(request); sentMessages.incrementAndGet(); sleeper.sleepNoException(1000); } catch (Throwable t) { sendFailures.incrementAndGet(); logger.error("Caught exception in producer loop", t); } } } }); Consumer consumer = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class, new MessageHandler<SayHelloToCmd>() { @Override public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) { recievedMessages.incrementAndGet(); } }).consumerForTopic(ping, new DiscardFailedMessages()); // Wait to allow manual fiddling with Kafka. Sync with global test timeout above. Thread.sleep(2 * 60 * 1000); produceMessages.set(false); producer.shutdown(); Thread.sleep(10_000); consumer.shutdown(); logger.info("sentMessages: " + sentMessages.get()); logger.info("sendFailures: " + sendFailures.get()); logger.info("recievedMessages: " + recievedMessages.get()); }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java
@Override public GetOperation[] undoPendingChanges(final String workspaceName, final String ownerName, final ItemSpec[] items, final AtomicReference<Failure[]> failures, final String[] itemAttributeFilters, final String[] itemPropertyFilters, final AtomicBoolean onlineOperation, final boolean deleteAdds, final AtomicReference<ChangePendedFlags> changePendedFlags) { onlineOperation.set(true); // set this to none for local workspaces, if the call reaches the server // the flag will get overwritten changePendedFlags.set(ChangePendedFlags.NONE); final Workspace localWorkspace = getLocalWorkspace(workspaceName, ownerName); if (localWorkspace != null) { final AtomicReference<GetOperation[]> toReturn = new AtomicReference<GetOperation[]>(); final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(localWorkspace); try {/* www. j av a 2 s . c o m*/ final AtomicReference<Failure[]> delegateFailures = new AtomicReference<Failure[]>(new Failure[0]); final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false); transaction.execute(new AllTablesTransaction() { @Override public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv, final LocalPendingChangesTable pc) { toReturn.set(LocalDataAccessLayer.undoPendingChanges(localWorkspace, wp, lv, pc, items, delegateFailures, onlineOperationRequired, itemPropertyFilters)); if (onlineOperationRequired.get()) { transaction.abort(); toReturn.set(null); } /* * we should check to see if we are going to cause an * existing file conflict if we are abort the * transaction - since we don't want to try and contact * the server in an offline undo. */ if (toReturn.get() != null) { Map<String, GetOperation> localItemDictionary = null; for (final GetOperation op : toReturn.get()) { if (op.getItemType() == ItemType.FILE && op.getTargetLocalItem() != null && op.getTargetLocalItem().length() > 0 && LocalPath.equals(op.getTargetLocalItem(), op.getCurrentLocalItem()) == false) { final WorkspaceLocalItem item = lv.getByLocalItem(op.getTargetLocalItem()); if ((item == null || item.isDeleted()) && new File(op.getTargetLocalItem()).exists()) { if (localItemDictionary == null) { localItemDictionary = new HashMap<String, GetOperation>(); /* * we go through our list and keep * track of adds we are removing * this is for the shelve /move * case. */ for (final GetOperation getOp : toReturn.get()) { if (getOp.getTargetLocalItem() != null && getOp.getTargetLocalItem().length() > 0 && getOp.getItemType() == ItemType.FILE) { final GetOperation currentValue = localItemDictionary .get(getOp.getTargetLocalItem()); if (currentValue != null) { // don't overwrite an // add if (currentValue.getChangeType().contains(ChangeType.ADD)) { localItemDictionary.put(getOp.getTargetLocalItem(), getOp); } } else { localItemDictionary.put(getOp.getTargetLocalItem(), getOp); } } } } final GetOperation existingItem = localItemDictionary .get(op.getTargetLocalItem()); if (existingItem != null && existingItem.getChangeType().contains(ChangeType.ADD)) { /* * if we are going to be removing * this anyway don't worry */ if (deleteAdds) { continue; } } if (existingItem == null || !tryMoveAddLocation(existingItem, localItemDictionary)) { throw new VersionControlException(MessageFormat.format( //@formatter:off Messages.getString( "WebServiceLayerLocalWorkspaces.UndoItemExistsLocallyFormat"), //$NON-NLS-1$ //@formatter:on (op.getCurrentLocalItem() != null && op.getCurrentLocalItem().length() > 0) ? op.getCurrentLocalItem() : op.getTargetLocalItem(), op.getTargetLocalItem())); } } } } } } }); if (null != toReturn.get()) { onlineOperation.set(false); failures.set(delegateFailures.get()); return toReturn.get(); } } finally { try { transaction.close(); } catch (final IOException e) { throw new VersionControlException(e); } } final Workspace w = reconcileIfLocal(workspaceName, ownerName); // Lock the workspace which will receive the pending changes final WorkspaceLock lock = lockIfLocal(w); try { try { if (getServiceLevel().getValue() >= WebServiceLevel.TFS_2012_QU1.getValue()) { final _Repository5Soap_UndoPendingChangesInLocalWorkspaceResponse response = getRepository5() .undoPendingChangesInLocalWorkspace(workspaceName, ownerName, (_ItemSpec[]) WrapperUtils.unwrap(_ItemSpec.class, items), itemPropertyFilters, itemAttributeFilters, VersionControlConstants.MAX_SERVER_PATH_SIZE); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); toReturn.set((GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getUndoPendingChangesInLocalWorkspaceResult())); } else { final _Repository4Soap_UndoPendingChangesInLocalWorkspaceResponse response = getRepository4() .undoPendingChangesInLocalWorkspace(workspaceName, ownerName, (_ItemSpec[]) WrapperUtils.unwrap(_ItemSpec.class, items), itemPropertyFilters, itemAttributeFilters); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); toReturn.set((GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getUndoPendingChangesInLocalWorkspaceResult())); } } catch (final ProxyException e) { VersionControlExceptionMapper.map(e); } syncWorkingFoldersIfNecessary(w, changePendedFlags.get()); syncPendingChangesIfLocal(w, toReturn.get(), itemPropertyFilters); // When a pending add is undone, the item on disk is not // touched; so we need to inform the scanner that the item is // invalidated so it is re-scanned. We'll invalidate the scanner // if we detect that we went to the server to undo a pending // add. if (null != toReturn.get()) { for (final GetOperation op : toReturn.get()) { if (op.getChangeType().contains(ChangeType.ADD)) { localWorkspace.getWorkspaceWatcher().markPathChanged(""); //$NON-NLS-1$ break; } } } return toReturn.get(); } finally { if (lock != null) { lock.close(); } } } else { return super.undoPendingChanges(workspaceName, ownerName, items, failures, itemAttributeFilters, itemPropertyFilters, onlineOperation, deleteAdds, changePendedFlags); } }
From source file:de.acosix.alfresco.mtsupport.repo.sync.TenantAwareChainingUserRegistrySynchronizer.java
protected void synchronize(final boolean forceUpdate, final boolean isFullSync, final boolean splitTxns) { final String currentDomain = TenantUtil.getCurrentDomain(); LOGGER.debug("Running {} sync with deletions {}allowed in tenant {}", forceUpdate ? "full" : "differential", this.allowDeletions ? "" : "not ", TenantService.DEFAULT_DOMAIN.equals(currentDomain) ? "-default-" : currentDomain); final QName lockQName = this.getLockQNameForCurrentTenant(); String lockToken;// w w w . j ava 2s. c o m try { // splitTxns = true: likely startup-triggered so give up immediately if lock not available // (startup of a cluster node already performs synchronisation) // splitTxns = false: likely login-triggered so wait for the lock and retry lockToken = this.jobLockService.getLock(lockQName, LOCK_TTL, splitTxns ? 0 : LOCK_TTL, splitTxns ? 1 : 10); } catch (final LockAcquisitionException laex) { LOGGER.warn( "User registry synchronization already running in another thread / on another cluster node. Synchronize aborted"); lockToken = null; } if (lockToken != null) { final AtomicBoolean synchRunning = new AtomicBoolean(true); final AtomicBoolean lockReleased = new AtomicBoolean(false); try { // original class used complex setup with asynch refresher thread // this was legacy never adapted when JobLockRefreshCallback was introduced in 3.4 this.jobLockService.refreshLock(lockToken, lockQName, LOCK_TTL, new JobLockRefreshCallback() { /** * {@inheritDoc} */ @Override public void lockReleased() { lockReleased.set(true); } @Override public boolean isActive() { return synchRunning.get(); } }); final Map<String, UserRegistry> plugins = this.getPluginsToSync(); final Set<String> visitedIds = new TreeSet<>(); this.notifySyncStart(plugins.keySet()); for (final Entry<String, UserRegistry> pluginEntry : plugins.entrySet()) { final String id = pluginEntry.getKey(); final UserRegistry plugin = pluginEntry.getValue(); if (LOGGER.isDebugEnabled() && this.mbeanServer != null) { this.logPluginConfig(id); } LOGGER.info("Synchronizing users and groups with user registry {} in tenant {}", id, TenantService.DEFAULT_DOMAIN.equals(currentDomain) ? "-default-" : currentDomain); if (isFullSync) { LOGGER.info( "Full synchronisation with user registry {} in tenant {} - deletions enabled: {} (if true, some users and groups previously created by synchronization with this user registry may be removed, otherwise users / groups removed from this registry will be logged only and remain in the repository while users previously found in a different registry will be moved in the repository rather than recreated)", id, TenantService.DEFAULT_DOMAIN.equals(currentDomain) ? "-default-" : currentDomain, this.allowDeletions); } final boolean requiresNew = splitTxns || AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY; this.syncWithPlugin(id, plugin, forceUpdate, isFullSync, requiresNew, visitedIds, plugins.keySet()); this.applicationEventPublisher.publishEvent(new SynchronizeDirectoryEndEvent(this, id)); } this.notifySyncEnd(); } catch (final RuntimeException re) { this.notifySyncEnd(re); LOGGER.error("Synchronization aborted due to error", re); throw re; } finally { synchRunning.set(false); this.jobLockService.releaseLock(lockToken, lockQName); } } }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java
@Override public GetOperation[] pendChanges(final String workspaceName, final String ownerName, final ChangeRequest[] changes, final PendChangesOptions pendChangesOptions, final SupportedFeatures supportedFeatures, final AtomicReference<Failure[]> failures, final String[] itemPropertyFilters, final String[] itemAttributeFilters, final boolean updateDisk, final AtomicBoolean onlineOperation, final AtomicReference<ChangePendedFlags> changePendedFlags) { onlineOperation.set(false); // set this to none for local workspaces, if the call reaches the server // the flag will get overwritten changePendedFlags.set(ChangePendedFlags.NONE); int unlockCount = 0; final Workspace localWorkspace = getLocalWorkspace(workspaceName, ownerName); if (localWorkspace != null) { boolean attributeChange = false; boolean nonExecuteSymlinkBitPropertyChange = false; if (null != itemAttributeFilters && itemAttributeFilters.length > 0) { attributeChange = true;/*from w ww .j a v a 2s . com*/ } // If the property filters are only for the executable bit, we can // handle that locally, otherwise we must go to the server. if (null != itemPropertyFilters && itemPropertyFilters.length > 0) { for (final String filter : itemPropertyFilters) { /* * Not using wildcard matching here: just because a wildcard * _does_ match the executable key _doesn't_ mean it * wouldn't match others on the server. So only consider a * direct match against the executable key to keep * processing locally. */ if (PropertyValue.comparePropertyNames(PropertyConstants.EXECUTABLE_KEY, filter) != 0 && PropertyValue.comparePropertyNames(PropertyConstants.SYMBOLIC_KEY, filter) != 0) { nonExecuteSymlinkBitPropertyChange = true; break; } } } RequestType requestType = RequestType.NONE; boolean requestingLock = false; for (final ChangeRequest changeRequest : changes) { if (RequestType.NONE == requestType) { requestType = changeRequest.getRequestType(); } else if (requestType != changeRequest.getRequestType()) { // TODO: Move string from server assembly throw new VersionControlException("Not all changes had the same request type"); //$NON-NLS-1$ } // If the caller is requesting a lock, then the call is a server // call, unless the user is performing an add and the LockLevel // is None. // Is it possible to have different locklevels on different // ChangeRequest objects? if (changeRequest.getLockLevel() != LockLevel.UNCHANGED && !(changeRequest.getLockLevel() == LockLevel.NONE && changeRequest.getRequestType() == RequestType.ADD)) { requestingLock = true; } if (changeRequest.getLockLevel() == LockLevel.NONE && changeRequest.getRequestType().equals(RequestType.LOCK)) { unlockCount++; } } final boolean silent = pendChangesOptions.contains(PendChangesOptions.SILENT); if (!requestingLock && !attributeChange && !nonExecuteSymlinkBitPropertyChange) { if (requestType == RequestType.ADD || requestType == RequestType.EDIT || requestType == RequestType.DELETE || requestType == RequestType.RENAME || requestType == RequestType.PROPERTY) { final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(localWorkspace); try { final AtomicReference<Failure[]> delegateFailures = new AtomicReference<Failure[]>(); final AtomicReference<GetOperation[]> toReturn = new AtomicReference<GetOperation[]>(); final RequestType transactionRequestType = requestType; transaction.execute(new AllTablesTransaction() { @Override public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv, final LocalPendingChangesTable pc) { if (transactionRequestType == RequestType.ADD) { toReturn.set(LocalDataAccessLayer.pendAdd(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.EDIT) { toReturn.set(LocalDataAccessLayer.pendEdit(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.DELETE) { toReturn.set(LocalDataAccessLayer.pendDelete(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, itemPropertyFilters)); } else if (transactionRequestType == RequestType.RENAME) { final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false); toReturn.set(LocalDataAccessLayer.pendRename(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, onlineOperationRequired, itemPropertyFilters)); if (onlineOperationRequired.get()) { toReturn.set(null); transaction.abort(); } else if (updateDisk) { // we don't want to file a conflict // while offline, so we check up front. for (final GetOperation getOp : toReturn.get()) { if (getOp.getTargetLocalItem() != null && !LocalPath.equals(getOp.getSourceLocalItem(), getOp.getTargetLocalItem()) && new File(getOp.getTargetLocalItem()).exists()) { throw new VersionControlException(MessageFormat.format( //@formatter:off Messages.getString( "WebServiceLayerLocalWorkspaces.FileExistsFormat"), //$NON-NLS-1$ //@formatter:on getOp.getTargetLocalItem())); } } } } if (transactionRequestType == RequestType.PROPERTY) { final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false); toReturn.set(LocalDataAccessLayer.pendPropertyChange(localWorkspace, wp, lv, pc, changes, silent, delegateFailures, onlineOperationRequired, itemPropertyFilters)); if (onlineOperationRequired.get()) { toReturn.set(null); transaction.abort(); } } } }); if (toReturn.get() != null) { // Offline operation successfully completed. failures.set(delegateFailures.get()); return toReturn.get(); } } finally { try { transaction.close(); } catch (final IOException e) { throw new VersionControlException(e); } } } else if (requestType == RequestType.BRANCH || requestType == RequestType.UNDELETE || requestType == RequestType.LOCK) { // Forward to server } else { // TODO: Remove this when all RequestTypes are supported // here. throw new VersionControlException("Not currently implemented for local workspaces"); //$NON-NLS-1$ } } } if (null != localWorkspace) { // if we only have requests for unlocking, move on if the reconcile // fails this is needed for unlock other final Workspace w = reconcileIfLocal(workspaceName, ownerName, false, false, unlockCount == changes.length, null); // Lock the workspace which will receive the pending changes final WorkspaceLock lock = lockIfLocal(w); try { final GetOperation[] toReturn; try { if (getServiceLevel().getValue() >= WebServiceLevel.TFS_2012_QU1.getValue()) { final _Repository5Soap_PendChangesInLocalWorkspaceResponse response = getRepository5() .pendChangesInLocalWorkspace(workspaceName, ownerName, (_ChangeRequest[]) WrapperUtils.unwrap(_ChangeRequest.class, changes), pendChangesOptions.toIntFlags(), supportedFeatures.toIntFlags(), itemPropertyFilters, itemAttributeFilters, VersionControlConstants.MAX_SERVER_PATH_SIZE); toReturn = (GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getPendChangesInLocalWorkspaceResult()); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); } else { final _Repository4Soap_PendChangesInLocalWorkspaceResponse response = getRepository4() .pendChangesInLocalWorkspace(workspaceName, ownerName, (_ChangeRequest[]) WrapperUtils.unwrap(_ChangeRequest.class, changes), pendChangesOptions.toIntFlags(), supportedFeatures.toIntFlags(), itemPropertyFilters, itemAttributeFilters); toReturn = (GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getPendChangesInLocalWorkspaceResult()); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); } } catch (final ProxyException e) { throw VersionControlExceptionMapper.map(e); } syncWorkingFoldersIfNecessary(w, changePendedFlags.get()); syncPendingChangesIfLocal(w, toReturn, itemPropertyFilters); if (RequestType.ADD == changes[0].getRequestType()) { // The client does not process the getops returned from a // PendAdd call. Because the server has created local // version rows for us, we need to update the local version // table to contain these rows too. LocalDataAccessLayer.afterAdd(localWorkspace, toReturn); // When a pending add is created, the item on disk is not // touched; so we need to inform the scanner that the item // is invalidated so it is re-scanned. Rather than go // through the local paths on which adds were pended, we'll // invalidate the workspace. This is not a common code path. localWorkspace.getWorkspaceWatcher().markPathChanged(""); //$NON-NLS-1$ } onlineOperation.set(true); return toReturn; } finally { if (lock != null) { lock.close(); } } } else { return super.pendChanges(workspaceName, ownerName, changes, pendChangesOptions, supportedFeatures, failures, itemPropertyFilters, itemAttributeFilters, updateDisk, onlineOperation, changePendedFlags); } }
From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java
@Test public void testThatFifoIsNotProcessed() throws IOException, InterruptedException { Assume.assumeFalse(Platform.isWindows()); final String tempPathPrefix = null; final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix); final String BEOWULF_FILE_NAME = "beowulf.txt"; final AtomicBoolean caughtException = new AtomicBoolean(false); try {/*www . j av a 2 s . com*/ Runtime.getRuntime().exec("mkfifo " + Paths.get(tempDirectory.toString(), BEOWULF_FILE_NAME)).waitFor(); final List<Ds3Object> ds3Objects = Arrays.asList(new Ds3Object(BEOWULF_FILE_NAME)); final Ds3ClientHelpers.Job readJob = HELPERS.startReadJob(BUCKET_NAME, ds3Objects); readJob.transfer(new FileObjectPutter(tempDirectory)); } catch (final UnrecoverableIOException e) { assertTrue(e.getMessage().contains(BEOWULF_FILE_NAME)); caughtException.set(true); } finally { FileUtils.deleteDirectory(tempDirectory.toFile()); } assertTrue(caughtException.get()); }