Example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

List of usage examples for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap.

Prototype

public ConcurrentSkipListMap() 

Source Link

Document

Constructs a new, empty map, sorted according to the Comparable natural ordering of the keys.

Usage

From source file:middleware.NewServerSocket.java

NewServerSocket(SharedData s) {
    sharedData = s;/*from  ww  w  .  j a  v a 2 s .c o m*/
    try {
        serverSocketChannel = ServerSocketChannel.open();
        serverSocketChannel.socket().bind(new InetSocketAddress(s.getMiddlePortNum()));
        serverSocketChannel.configureBlocking(false);

    } catch (IOException e) {
        System.out.println("Error: cannot bind to port " + s.getMiddlePortNum());
        e.printStackTrace();
    }

    try {
        adminServerSocketChannel = ServerSocketChannel.open();
        adminServerSocketChannel.socket().bind(new InetSocketAddress(s.getAdminPortNum()));
        adminServerSocketChannel.configureBlocking(false);
    } catch (IOException e) {
        System.out.println("Error: cannot bind to port " + s.getAdminPortNum());
        e.printStackTrace();
    }

    try {
        selector = Selector.open();
        serverSocketChannel.register(selector, SelectionKey.OP_ACCEPT);
        adminServerSocketChannel.register(selector, SelectionKey.OP_ACCEPT);
    } catch (IOException e) {
        e.printStackTrace();
    }

    keyIterator = null;

    dir = new File(sharedData.getFilePathName() + File.separator + "Transactions");
    if (!dir.exists()) {
        dir.mkdirs();
    } else {
        for (File f : dir.listFiles()) {
            if (!f.delete()) {
                // TODO
            }
        }
    }

    numWorkers = sharedData.getNumWorkers();
    workers = new NewWorker[numWorkers];
    for (int i = 0; i < numWorkers; ++i) {
        Selector tmpS = null;
        try {
            tmpS = Selector.open();
        } catch (IOException e) {
            e.printStackTrace();
        }

        workers[i] = new NewWorker(sharedData, tmpS);
        workers[i].start();
    }

    data = new byte[sharedData.getMaxSize()];
    buffer = ByteBuffer.wrap(data);
    endingMonitoring = false;
    sendingFiles = false;
    monitoring = false;
    dstatDeployed = false;
    failDeployDstat = false;
    configSetenv = false;

    mysql_user = null;
    mysql_pass = null;
    mysql_host = null;
    mysql_port = null;

    sharedData.allTransactionData = new ArrayList<TransactionData>();
    sharedData.allTransactions = new ConcurrentSkipListMap<Integer, byte[]>();
    // sharedData.allStatementsInfo = new ConcurrentLinkedQueue<byte[]>();
    sharedData.allQueries = new ConcurrentSkipListMap<Long, QueryData>();

    userInfo = Encrypt.getUsrMap(sharedData.getUserInfoFilePath());

    fileBuffer = new byte[1024];

    curUser = null;

    userList = new ArrayList<MiddleSocketChannel>();

    dstat = null;

    ntpdate = null;

    stopRemoteDstat = null;

    incrementalLogQueue = new ArrayBlockingQueue<IncrementalLog>(64 * 1024);
}

From source file:org.apache.hadoop.hbase.coprocessor.client.TimeseriesAggregationClient.java

/**
 * It gives the maximum value of a column for a given column family for the given range. In case
 * qualifier is null, a max of all values for the given family is returned.
 * @param table//w  ww  . j av  a  2 s.  c  o  m
 * @param ci
 * @param scan
 * @return max val ConcurrentSkipListMap<Long, R> (Will come as proto from region needs to be
 *         passed out as ConcurrentSkipListMap)
 * @throws Throwable The caller is supposed to handle the exception as they are thrown &
 *           propagated to it.
 */
public <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, R> max(
        final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class MaxCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, R> max = new ConcurrentSkipListMap<Long, R>();

        ConcurrentSkipListMap<Long, R> getMax() {
            return max;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = ((TimeseriesAggregateResponse) result)
                    .getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {
                R candidate;
                if (entry.getValue().getFirstPartCount() > 0) {
                    ByteString b = entry.getValue().getFirstPart(0);
                    Q q = null;
                    try {
                        q = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 3, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    candidate = ci.getCellValueFromProto(q);
                    if (null != q) {
                        if (max.containsKey(entry.getKey())) {
                            R current = max.get(entry.getKey());
                            max.put(entry.getKey(),
                                    (current == null
                                            || (candidate != null && ci.compare(current, candidate) < 0))
                                                    ? candidate
                                                    : current);
                        } else {
                            max.put(entry.getKey(), ci.getCellValueFromProto(q));
                        }
                    }
                }
            }
        }
    }

    MaxCallBack aMaxCallBack = new MaxCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getMax(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    return response;
                }
            }, aMaxCallBack);
    return aMaxCallBack.getMax();
}

From source file:com.palantir.atlasdb.transaction.impl.SerializableTransaction.java

private ConcurrentNavigableMap<Cell, byte[]> getReadsForTable(String table) {
    ConcurrentNavigableMap<Cell, byte[]> reads = readsByTable.get(table);
    if (reads == null) {
        ConcurrentNavigableMap<Cell, byte[]> newMap = new ConcurrentSkipListMap<Cell, byte[]>();
        readsByTable.putIfAbsent(table, newMap);
        reads = readsByTable.get(table);
    }/*  ww w.jav a 2  s.  co  m*/
    return reads;
}

From source file:org.apache.distributedlog.admin.DistributedLogAdmin.java

private static Map<String, StreamCandidate> checkStreams(final Namespace namespace,
        final Collection<String> streams, final OrderedScheduler scheduler, final int concurrency)
        throws IOException {
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    streamQueue.addAll(streams);// w w  w  . j  a v a 2s. c  o m
    final Map<String, StreamCandidate> candidateMap = new ConcurrentSkipListMap<String, StreamCandidate>();
    final AtomicInteger numPendingStreams = new AtomicInteger(streams.size());
    final CountDownLatch doneLatch = new CountDownLatch(1);
    Runnable checkRunnable = new Runnable() {
        @Override
        public void run() {
            while (!streamQueue.isEmpty()) {
                String stream;
                try {
                    stream = streamQueue.take();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
                StreamCandidate candidate;
                try {
                    LOG.info("Checking stream {}.", stream);
                    candidate = checkStream(namespace, stream, scheduler);
                    LOG.info("Checked stream {} - {}.", stream, candidate);
                } catch (Throwable e) {
                    LOG.error("Error on checking stream {} : ", stream, e);
                    doneLatch.countDown();
                    break;
                }
                if (null != candidate) {
                    candidateMap.put(stream, candidate);
                }
                if (numPendingStreams.decrementAndGet() == 0) {
                    doneLatch.countDown();
                }
            }
        }
    };
    Thread[] threads = new Thread[concurrency];
    for (int i = 0; i < concurrency; i++) {
        threads[i] = new Thread(checkRunnable, "check-thread-" + i);
        threads[i].start();
    }
    try {
        doneLatch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
    if (numPendingStreams.get() != 0) {
        throw new IOException(numPendingStreams.get() + " streams left w/o checked");
    }
    for (int i = 0; i < concurrency; i++) {
        threads[i].interrupt();
        try {
            threads[i].join();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }
    return candidateMap;
}

From source file:com.twitter.distributedlog.admin.DistributedLogAdmin.java

private static Map<String, StreamCandidate> checkStreams(
        final com.twitter.distributedlog.DistributedLogManagerFactory factory, final Collection<String> streams,
        final ExecutorService executorService, final BookKeeperClient bkc, final String digestpw,
        final int concurrency) throws IOException {
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    streamQueue.addAll(streams);//from w  ww  .j  av a  2s  . co m
    final Map<String, StreamCandidate> candidateMap = new ConcurrentSkipListMap<String, StreamCandidate>();
    final AtomicInteger numPendingStreams = new AtomicInteger(streams.size());
    final CountDownLatch doneLatch = new CountDownLatch(1);
    Runnable checkRunnable = new Runnable() {
        @Override
        public void run() {
            while (!streamQueue.isEmpty()) {
                String stream;
                try {
                    stream = streamQueue.take();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
                StreamCandidate candidate;
                try {
                    LOG.info("Checking stream {}.", stream);
                    candidate = checkStream(factory, stream, executorService, bkc, digestpw);
                    LOG.info("Checked stream {} - {}.", stream, candidate);
                } catch (IOException e) {
                    LOG.error("Error on checking stream {} : ", stream, e);
                    doneLatch.countDown();
                    break;
                }
                if (null != candidate) {
                    candidateMap.put(stream, candidate);
                }
                if (numPendingStreams.decrementAndGet() == 0) {
                    doneLatch.countDown();
                }
            }
        }
    };
    Thread[] threads = new Thread[concurrency];
    for (int i = 0; i < concurrency; i++) {
        threads[i] = new Thread(checkRunnable, "check-thread-" + i);
        threads[i].start();
    }
    try {
        doneLatch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
    if (numPendingStreams.get() != 0) {
        throw new IOException(numPendingStreams.get() + " streams left w/o checked");
    }
    for (int i = 0; i < concurrency; i++) {
        threads[i].interrupt();
        try {
            threads[i].join();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }
    return candidateMap;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.java

private synchronized void initScheduler(Configuration conf) {
    validateConf(conf);//from ww w .  j a v  a 2  s.  c o m
    //Use ConcurrentSkipListMap because applications need to be ordered
    this.applications = new ConcurrentSkipListMap<ApplicationId, SchedulerApplication<FiCaSchedulerApp>>();
    this.minimumAllocation = Resources
            .createResource(conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
                    YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB));
    initMaximumResourceCapability(Resources.createResource(
            conf.getInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
                    YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB),
            conf.getInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
                    YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)));
    this.usePortForNodeName = conf.getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
            YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
    this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false, conf);
    this.activeUsersManager = new ActiveUsersManager(metrics);
}

From source file:org.apache.hadoop.hbase.client.coprocessor.TimeseriesAggregationClient.java

/**
 * It gives the minimum value of a column for a given column family for the given range. In case
 * qualifier is null, a min of all values for the given family is returned.
 * @param table//from  w  ww .j av  a 2s.  c om
 * @param ci
 * @param scan
 * @return min val ConcurrentSkipListMap<Long, R> (Will come as proto from region needs to be
 *         passed out as ConcurrentSkipListMap)
 * @throws Throwable The caller is supposed to handle the exception as they are thrown &
 *           propagated to it.
 */
public <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, R> min(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class MinCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, R> min = new ConcurrentSkipListMap<Long, R>();

        ConcurrentSkipListMap<Long, R> getMin() {
            return min;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = ((TimeseriesAggregateResponse) result)
                    .getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {
                R candidate;
                if (entry.getValue().getFirstPartCount() > 0) {
                    ByteString b = entry.getValue().getFirstPart(0);
                    Q q = null;
                    try {
                        q = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 3, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    candidate = ci.getCellValueFromProto(q);
                    if (null != q) {
                        if (min.containsKey(entry.getKey())) {
                            R current = min.get(entry.getKey());
                            min.put(entry.getKey(),
                                    (current == null
                                            || (candidate != null && ci.compare(current, candidate) < 0))
                                                    ? current
                                                    : candidate);
                        } else {
                            min.put(entry.getKey(), ci.getCellValueFromProto(q));
                        }
                    }
                }
            }
        }
    }

    MinCallBack aMinCallBack = new MinCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getMin(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    return response;
                }
            }, aMinCallBack);
    return aMinCallBack.getMin();
}

From source file:org.archive.crawler.frontier.BdbFrontier.java

@Override
protected void initOtherQueues() throws DatabaseException {
    boolean recycle = (recoveryCheckpoint != null);

    // tiny risk of OutOfMemoryError: if giant number of snoozed
    // queues all wake-to-ready at once
    readyClassQueues = new LinkedBlockingQueue<String>();

    inactiveQueuesByPrecedence = new ConcurrentSkipListMap<Integer, Queue<String>>();

    retiredQueues = bdb.getStoredQueue("retiredQueues", String.class, recycle);

    // primary snoozed queues
    snoozedClassQueues = new DelayQueue<DelayedWorkQueue>();
    // just in case: overflow for extreme situations
    snoozedOverflow = bdb.getStoredMap("snoozedOverflow", Long.class, DelayedWorkQueue.class, true, false);

    this.futureUris = bdb.getStoredMap("futureUris", Long.class, CrawlURI.class, true,
            recoveryCheckpoint != null);

    // initialize master map in which other queues live
    this.pendingUris = createMultipleWorkQueues();
}

From source file:org.apache.hadoop.hbase.coprocessor.client.TimeseriesAggregationClient.java

/**
 * It gives the minimum value of a column for a given column family for the given range. In case
 * qualifier is null, a min of all values for the given family is returned.
 * @param table// ww w .  j a  v a2s  . c  o m
 * @param ci
 * @param scan
 * @return min val ConcurrentSkipListMap<Long, R> (Will come as proto from region needs to be
 *         passed out as ConcurrentSkipListMap)
 * @throws Throwable The caller is supposed to handle the exception as they are thrown &
 *           propagated to it.
 */
public <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, R> min(
        final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class MinCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, R> min = new ConcurrentSkipListMap<Long, R>();

        ConcurrentSkipListMap<Long, R> getMin() {
            return min;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = ((TimeseriesAggregateResponse) result)
                    .getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {
                R candidate;
                if (entry.getValue().getFirstPartCount() > 0) {
                    ByteString b = entry.getValue().getFirstPart(0);
                    Q q = null;
                    try {
                        q = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 3, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    candidate = ci.getCellValueFromProto(q);
                    if (null != q) {
                        if (min.containsKey(entry.getKey())) {
                            R current = min.get(entry.getKey());
                            min.put(entry.getKey(),
                                    (current == null
                                            || (candidate != null && ci.compare(current, candidate) < 0))
                                                    ? current
                                                    : candidate);
                        } else {
                            min.put(entry.getKey(), ci.getCellValueFromProto(q));
                        }
                    }
                }
            }
        }
    }

    MinCallBack aMinCallBack = new MinCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getMin(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    return response;
                }
            }, aMinCallBack);
    return aMinCallBack.getMin();
}

From source file:org.apache.hadoop.hbase.client.coprocessor.TimeseriesAggregationClient.java

/**
 * It gives the sum value of a column for a given column family for the given range. In case
 * qualifier is null, a sum of all values for the given family is returned.
 * @param table/*ww  w.  j  a v a 2 s .c  om*/
 * @param ci
 * @param scan
 * @return sum val ConcurrentSkipListMap<Long, R> (Will come as proto from region needs to be
 *         passed out as ConcurrentSkipListMap)
 * @throws Throwable The caller is supposed to handle the exception as they are thrown &
 *           propagated to it.
 */
public <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, S> sum(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class SumCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, S> sum = new ConcurrentSkipListMap<Long, S>();

        ConcurrentSkipListMap<Long, S> getMax() {
            return sum;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = ((TimeseriesAggregateResponse) result)
                    .getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {
                S candidate;
                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!sum.containsKey(entry.getKey())) {
                        sum.put(entry.getKey(), null);
                    }
                } else {
                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    candidate = ci.getPromotedValueFromProto(t);
                    if (null != t) {
                        if (sum.containsKey(entry.getKey())) {
                            S current = sum.get(entry.getKey());
                            sum.put(entry.getKey(), (ci.add(current, candidate)));
                        } else {
                            if (entry.getValue().getFirstPartCount() == 0) {
                                sum.put(entry.getKey(), null);
                            } else {
                                sum.put(entry.getKey(), candidate);
                            }
                        }
                    }
                }
            }
        }
    }

    SumCallBack aSumCallBack = new SumCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getSum(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, aSumCallBack);
    return aSumCallBack.getMax();
}