Example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

List of usage examples for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap.

Prototype

public ConcurrentSkipListMap(SortedMap<K, ? extends V> m) 

Source Link

Document

Constructs a new map containing the same mappings and using the same ordering as the specified sorted map.

Usage

From source file:Main.java

public static <K, V> ConcurrentSkipListMap<K, V> newConcurrentSkipListMap(final Comparator<? super K> c) {
    return new ConcurrentSkipListMap<K, V>(c);
}

From source file:Main.java

public static <K, V> ConcurrentSkipListMap<K, V> newConcurrentSkipListMap(
        final Map<? extends K, ? extends V> m) {
    return new ConcurrentSkipListMap<K, V>(m);
}

From source file:Main.java

public static <K, V> ConcurrentSkipListMap<K, V> newConcurrentSkipListMap(final SortedMap<K, ? extends V> m) {
    return new ConcurrentSkipListMap<K, V>(m);
}

From source file:org.apache.tephra.hbase.txprune.PruneUpperBoundWriter.java

@SuppressWarnings("WeakerAccess")
public PruneUpperBoundWriter(TableName tableName, DataJanitorState dataJanitorState, long pruneFlushInterval) {
    this.tableName = tableName;
    this.dataJanitorState = dataJanitorState;
    this.pruneFlushInterval = pruneFlushInterval;
    this.pruneEntries = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
    this.emptyRegions = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
}

From source file:org.cinchapi.concourse.util.ConcurrentSkipListMultiset.java

/**
 * Construct a new instance./*w w w .j av  a2s  .  c  o m*/
 * @param comparator
 */
private ConcurrentSkipListMultiset(Comparator<? super T> comparator) {
    this.backing = new ConcurrentSkipListMap<T, SkipListEntry>(comparator);
}

From source file:com.amazonaws.services.dynamodbv2.replication.impl.ShardSubscriberImpl.java

/**
 * Constructs a Subscriber that creates Checkpoints based on the provided factory.
 *
 * @param tableName//from  w  ww .  ja v  a2 s  .  com
 *            The table name
 * @param multiRegionCheckpointFactory
 *            The factory for producing {@link MultiRegionCheckpoint}s
 * @param replicationWorker
 *            The {@link RegionReplicationWorkers} that manages this subscriber
 * @param timeBetweenSweeps
 *            The time in millisecond between scans for successfully replicated updates.
 * @param checkpointBackoffTime
 *            The backoff time in millisecond before retrying a checkpoint
 */
public ShardSubscriberImpl(final String tableName,
        final MultiRegionCheckpointFactory multiRegionCheckpointFactory,
        final RegionReplicationWorker replicationWorker, final long timeBetweenSweeps,
        final long checkpointBackoffTime) {
    this.tableName = tableName;
    this.multiRegionCheckpointFactory = multiRegionCheckpointFactory;
    this.replicationWorker = replicationWorker;
    this.timeBetweenSweeps = timeBetweenSweeps;
    this.checkpointBackoffTime = checkpointBackoffTime;
    checkpointer = null;

    checkpoints = new ConcurrentSkipListMap<String, MultiRegionCheckpoint>(new SequenceNumberComparator());
    sweeper = null;
    cloudWatchClient = replicationWorker.getReplicationConfiguration()
            .getCloudWatchClient(replicationWorker.getRegionName(), tableName);
    userWriteCount = new AtomicLong(0);
}

From source file:org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.java

OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath, DFSClient client,
        IdUserGroup iug) {//from w  w w.  j  av a  2  s . com
    this.fos = fos;
    this.latestAttr = latestAttr;
    // We use the ReverseComparatorOnMin as the comparator of the map. In this
    // way, we first dump the data with larger offset. In the meanwhile, we
    // retrieve the last element to write back to HDFS.
    pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(OffsetRange.ReverseComparatorOnMin);

    pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();

    updateLastAccessTime();
    activeState = true;
    asyncStatus = false;
    asyncWriteBackStartOffset = 0;
    dumpOut = null;
    raf = null;
    nonSequentialWriteInMemory = new AtomicLong(0);

    this.dumpFilePath = dumpFilePath;
    enabledDump = dumpFilePath == null ? false : true;
    nextOffset = new AtomicLong();
    nextOffset.set(latestAttr.getSize());
    try {
        assert (nextOffset.get() == this.fos.getPos());
    } catch (IOException e) {
    }
    dumpThread = null;
    this.client = client;
    this.iug = iug;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.SequenceIdAccounting.java

ConcurrentMap<byte[], Long> getOrCreateLowestSequenceIds(byte[] encodedRegionName) {
    // Intentionally, this access is done outside of this.regionSequenceIdLock. Done per append.
    ConcurrentMap<byte[], Long> m = this.lowestUnflushedSequenceIds.get(encodedRegionName);
    if (m != null)
        return m;
    m = new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
    // Another thread may have added it ahead of us.
    ConcurrentMap<byte[], Long> alreadyPut = this.lowestUnflushedSequenceIds.putIfAbsent(encodedRegionName, m);
    return alreadyPut == null ? m : alreadyPut;
}

From source file:com.buaa.cfs.nfs3.OpenFileCtx.java

OpenFileCtx(DataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath, DFSClient client,
        IdMappingServiceProvider iug, boolean aixCompatMode, NfsConfiguration config) {
    this.fos = fos;
    this.latestAttr = latestAttr;
    this.aixCompatMode = aixCompatMode;
    // We use the ReverseComparatorOnMin as the comparator of the map. In this
    // way, we first dump the data with larger offset. In the meanwhile, we
    // retrieve the last element to write back to HDFS.
    pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>(OffsetRange.ReverseComparatorOnMin);

    pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>();

    updateLastAccessTime();//from   ww  w . ja  v  a 2  s.  c  o m
    activeState = true;
    asyncStatus = false;
    asyncWriteBackStartOffset = 0;
    dumpOut = null;
    raf = null;
    nonSequentialWriteInMemory = new AtomicLong(0);

    this.dumpFilePath = dumpFilePath;
    enabledDump = dumpFilePath != null;
    nextOffset = new AtomicLong();
    nextOffset.set(latestAttr.getSize());
    //        try {
    //            assert (nextOffset.get() == this.fos.getPos());
    //        } catch (IOException e) {
    //        }
    dumpThread = null;
    this.client = client;
    this.iug = iug;
    this.uploadLargeFile = config.getBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD,
            NfsConfigKeys.LARGE_FILE_UPLOAD_DEFAULT);
}

From source file:org.apache.hadoop.hbase.client.MetaCache.java

/**
 * @param tableName/*from   www  .  j ava  2  s .c  o m*/
 * @return Map of cached locations for passed <code>tableName</code>
 */
private ConcurrentSkipListMap<byte[], RegionLocations> getTableLocations(final TableName tableName) {
    // find the map of cached locations for this table
    ConcurrentSkipListMap<byte[], RegionLocations> result;
    result = this.cachedRegionLocations.get(tableName);
    // if tableLocations for this table isn't built yet, make one
    if (result == null) {
        result = new ConcurrentSkipListMap<byte[], RegionLocations>(Bytes.BYTES_COMPARATOR);
        ConcurrentSkipListMap<byte[], RegionLocations> old = this.cachedRegionLocations.putIfAbsent(tableName,
                result);
        if (old != null) {
            return old;
        }
    }
    return result;
}