Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java

/**
 * Retrieves the authoritative copy of the working folders for a local
 * workspace.//from w  ww  .j av a2  s.c o  m
 *
 *
 * @param workspace
 *        The local workspace whose working folders should be fetched
 * @return The working folders for the local workspace
 */
public static WorkingFolder[] queryWorkingFolders(final Workspace workspace) {
    final AtomicReference<WorkingFolder[]> toReturn = new AtomicReference<WorkingFolder[]>(null);

    final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(workspace);
    try {
        transaction.execute(new WorkspacePropertiesTransaction() {
            @Override
            public void invoke(final LocalWorkspaceProperties wp) {
                // Make a deep copy to return to the caller.
                toReturn.set(WorkingFolder.clone(wp.getWorkingFolders()));
            }
        });
    } finally {
        try {
            transaction.close();
        } catch (final IOException e) {
            throw new VersionControlException(e);
        }
    }

    return toReturn.get();
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java

public static void processConversionBaselineRequests(final Workspace workspace,
        final Iterable<BaselineRequest> requests) throws CoreCancelException {
    Check.notNull(workspace, "workspace"); //$NON-NLS-1$
    Check.notNull(requests, "requests"); //$NON-NLS-1$

    final AtomicReference<BaselineFolderCollection> baselineFolderCollection = new AtomicReference<BaselineFolderCollection>();

    LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(workspace);
    try {/*from   w w  w .  j ava2 s  . c  o  m*/
        transaction.execute(new WorkspacePropertiesTransaction() {
            @Override
            public void invoke(final LocalWorkspaceProperties wp) {
                baselineFolderCollection.set(new BaselineFolderCollection(workspace, wp.getBaselineFolders()));
            }
        });
    } finally {
        try {
            transaction.close();
        } catch (final IOException e) {
            throw new VersionControlException(e);
        }
    }

    final AtomicReference<Iterable<BaselineRequest>> failedLocal = new AtomicReference<Iterable<BaselineRequest>>();

    baselineFolderCollection.get().processBaselineRequests(workspace, requests, true /* throwIfCanceled */,
            failedLocal);

    boolean hasAnyFailed = false;
    if (failedLocal.get() != null) {
        for (final BaselineRequest r : failedLocal.get()) {
            if (r != null) {
                hasAnyFailed = true;
                break;
            }
        }
    }

    if (hasAnyFailed) {
        /*
         * The set of BaselineRequests which had a populated LocalItem,
         * indicating that the content on the local disk is the committed
         * content. However, we hashed the content while gzipping it, and
         * found that the hash value did not match. (The length matched, or
         * we would not have put a local item on the BaselineRequest.) As a
         * result, we fell back to the download URL to fetch this content.
         *
         * We need to go back through this list and mark the corresponding
         * local version entries with a LastModifiedTime of -1 so that when
         * the scanner runs, these items are hashed again and discovered as
         * pending edits.
         */

        transaction = new LocalWorkspaceTransaction(workspace);
        try {
            transaction.execute(new LocalVersionTransaction() {

                @Override
                public void invoke(final WorkspaceVersionTable lv) {
                    for (final BaselineRequest request : failedLocal.get()) {
                        final WorkspaceLocalItem lvEntry = lv.getByLocalItem(request.getSourceLocalItem());

                        if (null != lvEntry) {
                            lv.removeByLocalItem(request.getSourceLocalItem(), false);
                            lvEntry.setLastModifiedTime(-1);
                            lv.add(lvEntry);
                        }
                    }
                }
            });
        } finally {
            try {
                transaction.close();
            } catch (final IOException e) {
                throw new VersionControlException(e);
            }
        }
    }
}

From source file:org.apache.nifi.hbase.GetHBase.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final String tableName = context.getProperty(TABLE_NAME).getValue();
    final String initialTimeRange = context.getProperty(INITIAL_TIMERANGE).getValue();
    final String filterExpression = context.getProperty(FILTER_EXPRESSION).getValue();
    final HBaseClientService hBaseClientService = context.getProperty(HBASE_CLIENT_SERVICE)
            .asControllerService(HBaseClientService.class);

    // if the table was changed then remove any previous state
    if (previousTable != null && !tableName.equals(previousTable)) {
        try {//  www.ja v  a2 s.com
            context.getStateManager().clear(Scope.CLUSTER);
        } catch (final IOException ioe) {
            getLogger().warn("Failed to clear Cluster State", ioe);
        }
        previousTable = tableName;
    }

    try {
        final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());
        final RowSerializer serializer = new JsonRowSerializer(charset);

        this.lastResult = getState(context.getStateManager());
        final long defaultMinTime = (initialTimeRange.equals(NONE.getValue()) ? 0L
                : System.currentTimeMillis());
        final long minTime = (lastResult == null ? defaultMinTime : lastResult.getTimestamp());

        final Map<String, Set<String>> cellsMatchingTimestamp = new HashMap<>();

        final AtomicReference<Long> rowsPulledHolder = new AtomicReference<>(0L);
        final AtomicReference<Long> latestTimestampHolder = new AtomicReference<>(minTime);

        hBaseClientService.scan(tableName, columns, filterExpression, minTime, new ResultHandler() {
            @Override
            public void handle(final byte[] rowKey, final ResultCell[] resultCells) {

                final String rowKeyString = new String(rowKey, StandardCharsets.UTF_8);

                // check if latest cell timestamp is equal to our cutoff.
                // if any of the cells have a timestamp later than our cutoff, then we
                // want the row. But if the cell with the latest timestamp is equal to
                // our cutoff, then we want to check if that's one of the cells that
                // we have already seen.
                long latestCellTimestamp = 0L;
                for (final ResultCell cell : resultCells) {
                    if (cell.getTimestamp() > latestCellTimestamp) {
                        latestCellTimestamp = cell.getTimestamp();
                    }
                }

                // we've already seen this.
                if (latestCellTimestamp < minTime) {
                    getLogger().debug(
                            "latest cell timestamp for row {} is {}, which is earlier than the minimum time of {}",
                            new Object[] { rowKeyString, latestCellTimestamp, minTime });
                    return;
                }

                if (latestCellTimestamp == minTime) {
                    // latest cell timestamp is equal to our minimum time. Check if all cells that have
                    // that timestamp are in our list of previously seen cells.
                    boolean allSeen = true;
                    for (final ResultCell cell : resultCells) {
                        if (cell.getTimestamp() == latestCellTimestamp) {
                            if (lastResult == null || !lastResult.contains(cell)) {
                                allSeen = false;
                                break;
                            }
                        }
                    }

                    if (allSeen) {
                        // we have already seen all of the cells for this row. We do not want to
                        // include this cell in our output.
                        getLogger().debug("all cells for row {} have already been seen",
                                new Object[] { rowKeyString });
                        return;
                    }
                }

                // If the latest timestamp of the cell is later than the latest timestamp we have already seen,
                // we want to keep track of the cells that match this timestamp so that the next time we scan,
                // we can ignore these cells.
                if (latestCellTimestamp >= latestTimestampHolder.get()) {
                    // new timestamp, so clear all of the 'matching cells'
                    if (latestCellTimestamp > latestTimestampHolder.get()) {
                        latestTimestampHolder.set(latestCellTimestamp);
                        cellsMatchingTimestamp.clear();
                    }

                    for (final ResultCell cell : resultCells) {
                        final long ts = cell.getTimestamp();
                        if (ts == latestCellTimestamp) {
                            final byte[] rowValue = Arrays.copyOfRange(cell.getRowArray(), cell.getRowOffset(),
                                    cell.getRowLength() + cell.getRowOffset());
                            final byte[] cellValue = Arrays.copyOfRange(cell.getValueArray(),
                                    cell.getValueOffset(), cell.getValueLength() + cell.getValueOffset());

                            final String rowHash = new String(rowValue, StandardCharsets.UTF_8);
                            Set<String> cellHashes = cellsMatchingTimestamp.get(rowHash);
                            if (cellHashes == null) {
                                cellHashes = new HashSet<>();
                                cellsMatchingTimestamp.put(rowHash, cellHashes);
                            }
                            cellHashes.add(new String(cellValue, StandardCharsets.UTF_8));
                        }
                    }
                }

                // write the row to a new FlowFile.
                FlowFile flowFile = session.create();
                flowFile = session.write(flowFile, new OutputStreamCallback() {
                    @Override
                    public void process(final OutputStream out) throws IOException {
                        serializer.serialize(rowKey, resultCells, out);
                    }
                });

                final Map<String, String> attributes = new HashMap<>();
                attributes.put("hbase.table", tableName);
                attributes.put("mime.type", "application/json");
                flowFile = session.putAllAttributes(flowFile, attributes);

                session.getProvenanceReporter().receive(flowFile, "hbase://" + tableName + "/" + rowKeyString);
                session.transfer(flowFile, REL_SUCCESS);
                getLogger().debug("Received {} from HBase with row key {}",
                        new Object[] { flowFile, rowKeyString });

                // we could potentially have a huge number of rows. If we get to 500, go ahead and commit the
                // session so that we can avoid buffering tons of FlowFiles without ever sending any out.
                long rowsPulled = rowsPulledHolder.get();
                rowsPulledHolder.set(++rowsPulled);

                if (++rowsPulled % getBatchSize() == 0) {
                    session.commit();
                }
            }
        });

        final ScanResult scanResults = new ScanResult(latestTimestampHolder.get(), cellsMatchingTimestamp);

        // Commit session before we replace the lastResult; if session commit fails, we want
        // to pull these records again.
        session.commit();
        if (lastResult == null || scanResults.getTimestamp() > lastResult.getTimestamp()) {
            lastResult = scanResults;
        } else if (scanResults.getTimestamp() == lastResult.getTimestamp()) {
            final Map<String, Set<String>> combinedResults = new HashMap<>(scanResults.getMatchingCells());

            // copy the results of result.getMatchingCells() to combinedResults.
            // do a deep copy because the Set may be modified below.
            for (final Map.Entry<String, Set<String>> entry : scanResults.getMatchingCells().entrySet()) {
                combinedResults.put(entry.getKey(), new HashSet<>(entry.getValue()));
            }

            // combined the results from 'lastResult'
            for (final Map.Entry<String, Set<String>> entry : lastResult.getMatchingCells().entrySet()) {
                final Set<String> existing = combinedResults.get(entry.getKey());
                if (existing == null) {
                    combinedResults.put(entry.getKey(), new HashSet<>(entry.getValue()));
                } else {
                    existing.addAll(entry.getValue());
                }
            }
            final ScanResult scanResult = new ScanResult(scanResults.getTimestamp(), combinedResults);
            lastResult = scanResult;
        }

        // save state using the framework's state manager
        storeState(lastResult, context.getStateManager());
    } catch (final IOException e) {
        getLogger().error("Failed to receive data from HBase due to {}", e);
        session.rollback();
    } finally {
        // if we failed, we want to yield so that we don't hammer hbase. If we succeed, then we have
        // pulled all of the records, so we want to wait a bit before hitting hbase again anyway.
        context.yield();
    }
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java

/**
 * Given a local workspace, returns all pending changes for that workspace
 * by calling QueryPendingSets on the server. Also returns the current
 * pending change signature from the server.
 *//*from  ww  w  . j  a  v  a2  s.  c  o m*/
@Override
public PendingChange[] queryServerPendingChanges(final Workspace localWorkspace,
        final AtomicReference<GUID> outServerPendingChangeSignature) {
    Check.isTrue(WorkspaceLocation.LOCAL == localWorkspace.getLocation(),
            "WorkspaceLocation.LOCAL == localWorkspace.getLocation()"); //$NON-NLS-1$

    Failure[] failures;
    PendingSet[] pendingSets;

    final ItemSpec[] itemSpecs = new ItemSpec[] { new ItemSpec(ServerPath.ROOT, RecursionType.FULL) };

    try {
        if (getServiceLevel().getValue() >= WebServiceLevel.TFS_2012_QU1.getValue()) {
            final _Repository5Soap_QueryPendingSetsWithLocalWorkspacesResponse response = getRepository5()
                    .queryPendingSetsWithLocalWorkspaces(null, null, localWorkspace.getName(),
                            localWorkspace.getOwnerName(),
                            (_ItemSpec[]) WrapperUtils.unwrap(_ItemSpec.class, itemSpecs),
                            false /* generateDownloadUrls */, null,
                            VersionControlConstants.MAX_SERVER_PATH_SIZE);

            pendingSets = (PendingSet[]) WrapperUtils.wrap(PendingSet.class,
                    response.getQueryPendingSetsWithLocalWorkspacesResult());

            failures = (Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures());
        } else {
            final _Repository4Soap_QueryPendingSetsWithLocalWorkspacesResponse response = getRepository4()
                    .queryPendingSetsWithLocalWorkspaces(null, null, localWorkspace.getName(),
                            localWorkspace.getOwnerName(),
                            (_ItemSpec[]) WrapperUtils.unwrap(_ItemSpec.class, itemSpecs),
                            false /* generateDownloadUrls */, null);

            pendingSets = (PendingSet[]) WrapperUtils.wrap(PendingSet.class,
                    response.getQueryPendingSetsWithLocalWorkspacesResult());

            failures = (Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures());
        }
    } catch (final ProxyException e) {
        throw VersionControlExceptionMapper.map(e);
    }

    getVersionControlClient().reportFailures(localWorkspace, failures);

    if (pendingSets.length == 0) {
        outServerPendingChangeSignature
                .set(queryPendingChangeSignature(localWorkspace.getName(), localWorkspace.getOwnerName()));
        return new PendingChange[0];
    } else {
        outServerPendingChangeSignature.set(pendingSets[0].getPendingChangeSignature());

        // If the server does not have the change where
        // PendingChangeSignature comes down with PendingSet objects, then
        // we'll have to go fetch it ourselves. This change was made to the
        // server in the Dev11 CTP3 iteration.
        if (outServerPendingChangeSignature.get().equals(GUID.EMPTY)) {
            outServerPendingChangeSignature
                    .set(queryPendingChangeSignature(localWorkspace.getName(), localWorkspace.getOwnerName()));
        }

        return pendingSets[0].getPendingChanges();
    }
}

From source file:org.commonjava.indy.pkg.maven.content.MavenMetadataGenerator.java

/**
 *
 * @param group/*ww w. j ava 2 s.co m*/
 * @param members Concrete stores in group
 * @param path
 * @param eventMetadata
 * @return
 * @throws IndyWorkflowException
 */
@Override
@Measure
public Transfer generateGroupFileContent(final Group group, final List<ArtifactStore> members,
        final String path, final EventMetadata eventMetadata) throws IndyWorkflowException {
    String toMergePath = path;
    if (!path.endsWith(MavenMetadataMerger.METADATA_NAME)) {
        toMergePath = normalize(normalize(parentPath(toMergePath)), MavenMetadataMerger.METADATA_NAME);
    }

    Transfer target = fileManager.getTransfer(group, toMergePath);
    if (exists(target)) {
        // Means there is no metadata change if this transfer exists, so directly return it.
        logger.trace("Metadata file exists for group {} of path {}, no need to regenerate.", group.getKey(),
                path);
        eventMetadata.set(GROUP_METADATA_EXISTS, true);
        return target;
    }

    AtomicReference<IndyWorkflowException> wfEx = new AtomicReference<>();
    String mergePath = toMergePath;
    boolean mergingDone = mergerLocks.ifUnlocked(computeKey(group, toMergePath), p -> {
        try {
            logger.debug("Start metadata generation for the metadata file for this path {} in group {}", path,
                    group);
            List<StoreKey> contributing = new ArrayList<>();
            final Metadata md = generateGroupMetadata(group, members, contributing, path);
            if (md != null) {
                final Versioning versioning = md.getVersioning();
                logger.trace(
                        "Regenerated Metadata for group {} of path {}: latest version: {}, versioning versions:{}",
                        group.getKey(), mergePath, versioning != null ? versioning.getLatest() : null,
                        versioning != null ? versioning.getVersions() : null);
                final ByteArrayOutputStream baos = new ByteArrayOutputStream();
                try {
                    logger.trace("Metadata file lost for group {} of path {}, will regenerate.", group.getKey(),
                            path);
                    new MetadataXpp3Writer().write(baos, md);

                    final byte[] merged = baos.toByteArray();
                    if (merged != null) {
                        OutputStream fos = null;
                        try {
                            fos = target.openOutputStream(TransferOperation.GENERATE, true, eventMetadata);
                            fos.write(merged);
                        } catch (final IOException e) {
                            throw new IndyWorkflowException(
                                    "Failed to write merged metadata to: {}.\nError: {}", e, target,
                                    e.getMessage());
                        } finally {
                            closeQuietly(fos);
                        }

                        String mergeInfo = writeGroupMergeInfo(md, group, contributing, mergePath);
                        eventMetadata.set(GROUP_METADATA_GENERATED, true);
                        MetadataInfo info = new MetadataInfo(md);
                        info.setMetadataMergeInfo(mergeInfo);

                        putToMetadataCache(group.getKey(), mergePath, info);
                    }
                } catch (final IOException e) {
                    logger.error(String.format("Cannot write consolidated metadata: %s to: %s. Reason: %s",
                            path, group.getKey(), e.getMessage()), e);
                }
            }
        } catch (IndyWorkflowException e) {
            wfEx.set(e);
            return false;
        }

        return true;
    }, (p, mergerLock) -> {
        logger.info(
                "The metadata generation is still in process by another thread for the metadata file for this path {} in group {}, so block current thread to wait for result",
                path, group);

        return mergerLocks.waitForLock(THREAD_WAITING_TIME_SECONDS, mergerLock);
    });

    IndyWorkflowException ex = wfEx.get();
    if (ex != null) {
        throw ex;
    }

    if (exists(target)) {
        // if this is a checksum file, we need to return the original path.
        Transfer original = fileManager.getTransfer(group, path);
        if (exists(original)) {
            return original;
        }
    }

    if (mergingDone) {
        logger.error(
                "Merging finished but got some error, which is caused the merging file not created correctly. See merging related error log for details. Merging group: {}, path: {}",
                group, path);
    } else {
        logger.error(
                "Merging not finished but thread waiting timeout, caused current thread will get a null merging result. Try to enlarge the waiting timeout. Merging group: {}, path: {}",
                group, path);
    }

    return null;
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testKilledServerWithEnsembleProvider() throws Exception {
    final int CLIENT_QTY = 10;
    final Timing timing = new Timing();
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(CLIENT_QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    TestingCluster cluster = new TestingCluster(3);
    try {/*  w w  w . jav a  2s .co m*/
        cluster.start();

        final AtomicReference<String> connectionString = new AtomicReference<String>(
                cluster.getConnectString());
        final EnsembleProvider provider = new EnsembleProvider() {
            @Override
            public void start() throws Exception {
            }

            @Override
            public String getConnectionString() {
                return connectionString.get();
            }

            @Override
            public void close() throws IOException {
            }
        };

        final Semaphore acquiredSemaphore = new Semaphore(0);
        final AtomicInteger acquireCount = new AtomicInteger(0);
        final CountDownLatch suspendedLatch = new CountDownLatch(CLIENT_QTY);
        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = CuratorFrameworkFactory.builder().ensembleProvider(provider)
                            .sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection())
                            .retryPolicy(new ExponentialBackoffRetry(100, 3)).build();
                    try {
                        final Semaphore suspendedSemaphore = new Semaphore(0);
                        client.getConnectionStateListenable().addListener(new ConnectionStateListener() {
                            @Override
                            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                                if ((newState == ConnectionState.SUSPENDED)
                                        || (newState == ConnectionState.LOST)) {
                                    suspendedLatch.countDown();
                                    suspendedSemaphore.release();
                                }
                            }
                        });

                        client.start();

                        InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, PATH, 1);

                        while (!Thread.currentThread().isInterrupted()) {
                            Lease lease = null;
                            try {
                                lease = semaphore.acquire();
                                acquiredSemaphore.release();
                                acquireCount.incrementAndGet();
                                suspendedSemaphore.acquire();
                            } catch (Exception e) {
                                // just retry
                            } finally {
                                if (lease != null) {
                                    acquireCount.decrementAndGet();
                                    IOUtils.closeQuietly(lease);
                                }
                            }
                        }
                    } finally {
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        Assert.assertEquals(1, acquireCount.get());

        cluster.close();
        timing.awaitLatch(suspendedLatch);
        timing.forWaiting().sleepABit();
        Assert.assertEquals(0, acquireCount.get());

        cluster = new TestingCluster(3);
        cluster.start();

        connectionString.set(cluster.getConnectString());
        timing.forWaiting().sleepABit();

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        timing.forWaiting().sleepABit();
        Assert.assertEquals(1, acquireCount.get());
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(10, TimeUnit.SECONDS);
        executorService.shutdownNow();
        IOUtils.closeQuietly(cluster);
    }
}

From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java

/**
 * @param tableNameOrRegionName Name of a table or name of a region.
 * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
 * @return a pair of HRegionInfo and ServerName if <code>tableNameOrRegionName</code> is
 *  a verified region name (we call {@link  MetaReader#getRegion( CatalogTracker, byte[])}
 *  else null./*from  w  ww. j a v a  2  s.c  o m*/
 * Throw an exception if <code>tableNameOrRegionName</code> is null.
 * @throws IOException
 */
Pair<HRegionInfo, ServerName> getRegion(final byte[] tableNameOrRegionName, final CatalogTracker ct)
        throws IOException {
    if (tableNameOrRegionName == null) {
        throw new IllegalArgumentException("Pass a table name or region name");
    }
    Pair<HRegionInfo, ServerName> pair = MetaReader.getRegion(ct, tableNameOrRegionName);
    if (pair == null) {
        final AtomicReference<Pair<HRegionInfo, ServerName>> result = new AtomicReference<Pair<HRegionInfo, ServerName>>(
                null);
        final String encodedName = Bytes.toString(tableNameOrRegionName);
        MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
            @Override
            public boolean processRow(Result data) throws IOException {
                HRegionInfo info = HRegionInfo.getHRegionInfo(data);
                if (info == null) {
                    LOG.warn("No serialized HRegionInfo in " + data);
                    return true;
                }
                if (!encodedName.equals(info.getEncodedName()))
                    return true;
                ServerName sn = HRegionInfo.getServerName(data);
                result.set(new Pair<HRegionInfo, ServerName>(info, sn));
                return false; // found the region, stop
            }
        };

        MetaScanner.metaScan(conf, connection, visitor, null);
        pair = result.get();
    }
    return pair;
}

From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java

/**
 * For file writing, will wrapping two output streams to caller - one for local cache file, another for nfs file -,
 * and the caller can write to these two streams in the meantime. <br />
 * For the local part, because it uses {@link org.commonjava.maven.galley.cache.partyline.PartyLineCacheProvider} as
 * i/o provider, this supports the R/W on the same resource in the meantime. For details, please see
 * {@link org.commonjava.maven.galley.cache.partyline.PartyLineCacheProvider}.
 *
 * @param resource - the resource will be read
 * @return - the output stream for further writing
 * @throws IOException/*w  ww  .  jav a 2 s .  c  om*/
 */
@Override
public OutputStream openOutputStream(ConcreteResource resource) throws IOException {
    final DualOutputStreamsWrapper dualOutUpper;
    final String nodeIp = getCurrentNodeIp();
    final String pathKey = getKeyForResource(resource);
    final File nfsFile = getNFSDetachedFile(resource);

    final AtomicReference<IOException> taskException = new AtomicReference<>();
    final TransferLockTask<DualOutputStreamsWrapper> streamTransferLockTask = r -> {
        DualOutputStreamsWrapper dualOut = null;
        try {
            lockByISPN(nfsOwnerCache, resource, LockLevel.write);

            nfsOwnerCache.put(pathKey, nodeIp);

            logger.trace("Start to get output stream from local cache through partyline to do join stream");
            final OutputStream localOut = plCacheProvider.openOutputStream(resource);
            logger.trace("The output stream from local cache through partyline is got successfully");
            if (!nfsFile.exists() && !nfsFile.isDirectory()) {
                try {
                    if (!nfsFile.getParentFile().exists()) {
                        nfsFile.getParentFile().mkdirs();
                    }
                    nfsFile.createNewFile();
                } catch (IOException e) {
                    logger.error("[galley] New nfs file created not properly.", e);
                    throw e;
                }
            }
            final OutputStream nfsOutputStream = new FileOutputStream(nfsFile);
            logger.trace("The output stream from NFS is got successfully");
            // will wrap the cache manager in stream wrapper, and let it do tx commit in stream close to make sure
            // the two streams writing's consistency.
            dualOut = new DualOutputStreamsWrapper(localOut, nfsOutputStream, nfsOwnerCache, pathKey, resource);

            if (nfsOwnerCache.getLockOwner(pathKey) != null) {
                logger.trace("[openOutputStream]ISPN locker for key {} with resource {} is {}", pathKey,
                        resource, nfsOwnerCache.getLockOwner(pathKey));
            }

            ThreadContext streamHolder = ThreadContext.getContext(true);
            Set<WeakReference<OutputStream>> streams = (Set<WeakReference<OutputStream>>) streamHolder
                    .get(FAST_LOCAL_STREAMS);

            if (streams == null) {
                streams = new HashSet<>(10);
            }

            streams.add(new WeakReference<>(dualOut));
            streamHolder.put(FAST_LOCAL_STREAMS, streams);
        } catch (NotSupportedException | SystemException | InterruptedException e) {
            logger.error("[galley] Transaction error for nfs cache during file writing.", e);
            throw new IllegalStateException(
                    String.format("[galley] Output stream for resource %s open failed.", resource.toString()),
                    e);
        } catch (IOException e) {
            taskException.set(e);
        }
        logger.trace("The dual output stream wrapped and returned successfully");
        return dualOut;
    };

    dualOutUpper = tryLockAnd(resource, DEFAULT_WAIT_FOR_TRANSFER_LOCK_SECONDS, TimeUnit.SECONDS,
            streamTransferLockTask);
    if (taskException.get() != null) {
        throw taskException.get();
    }
    return dualOutUpper;
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java

/**
 *
 *
 *
 * @param workspace/*from w  w w  . jav a  2 s .c  om*/
 * @param lv
 * @param pc
 * @param itemSpecs
 * @param failures
 * @param onlineOperationRequired
 * @param invalidateWorkspaceAfterServerCall
 * @return
 */
public static GetOperation[] undoPendingChanges(final Workspace workspace, final LocalWorkspaceProperties wp,
        final WorkspaceVersionTable lv, final LocalPendingChangesTable pc, final ItemSpec[] itemSpecs,
        final AtomicReference<Failure[]> failures, final AtomicBoolean onlineOperationRequired,
        final String[] itemPropertyFilters) {
    Check.notNull(workspace, "workspace"); //$NON-NLS-1$
    Check.notNullOrEmpty(itemSpecs, "itemSpecs"); //$NON-NLS-1$

    final List<Failure> failuresList = new ArrayList<Failure>();

    final GetOperation[] toReturn = undoPendingChanges(workspace, wp, lv, pc,
            queryPendingChanges(workspace, wp, lv, pc, itemSpecs, failuresList, false, itemPropertyFilters),
            ChangeType.ALL, failures, onlineOperationRequired);

    for (final Failure failure : failures.get()) {
        failuresList.add(failure);
    }

    failures.set(failuresList.toArray(new Failure[failuresList.size()]));
    return toReturn;
}