Example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

List of usage examples for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap.

Prototype

public ConcurrentSkipListMap() 

Source Link

Document

Constructs a new, empty map, sorted according to the Comparable natural ordering of the keys.

Usage

From source file:org.apache.hadoop.hbase.coprocessor.client.TimeseriesAggregationClient.java

/**
 * It gives the sum value of a column for a given column family for the given range. In case
 * qualifier is null, a sum of all values for the given family is returned.
 * @param table/*from  ww  w.  j  a  v  a 2  s .c  o  m*/
 * @param ci
 * @param scan
 * @return sum val ConcurrentSkipListMap<Long, R> (Will come as proto from region needs to be
 *         passed out as ConcurrentSkipListMap)
 * @throws Throwable The caller is supposed to handle the exception as they are thrown &
 *           propagated to it.
 */
public <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, S> sum(
        final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class SumCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, S> sum = new ConcurrentSkipListMap<Long, S>();

        ConcurrentSkipListMap<Long, S> getSum() {
            return sum;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = ((TimeseriesAggregateResponse) result)
                    .getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {
                S candidate;
                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!sum.containsKey(entry.getKey())) {
                        sum.put(entry.getKey(), null);
                    }
                } else {
                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    candidate = ci.getPromotedValueFromProto(t);
                    if (null != t) {
                        if (sum.containsKey(entry.getKey())) {
                            S current = sum.get(entry.getKey());
                            sum.put(entry.getKey(), (ci.add(current, candidate)));
                        } else {
                            if (entry.getValue().getFirstPartCount() == 0) {
                                sum.put(entry.getKey(), null);
                            } else {
                                sum.put(entry.getKey(), candidate);
                            }
                        }
                    }
                }
            }
        }
    }

    SumCallBack aSumCallBack = new SumCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getSum(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, aSumCallBack);
    return aSumCallBack.getSum();
}

From source file:org.apache.distributedlog.auditor.DLAuditor.java

private Map<String, Long> calculateStreamSpaceUsage(final URI uri, final Namespace namespace)
        throws IOException {
    Iterator<String> streams = namespace.getLogs();
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    while (streams.hasNext()) {
        streamQueue.add(streams.next());
    }// ww w  . ja v a 2 s. c  om

    final Map<String, Long> streamSpaceUsageMap = new ConcurrentSkipListMap<String, Long>();
    final AtomicInteger numStreamsCollected = new AtomicInteger(0);

    executeAction(streamQueue, 10, new Action<String>() {
        @Override
        public void execute(String stream) throws IOException {
            streamSpaceUsageMap.put(stream, calculateStreamSpaceUsage(namespace, stream));
            if (numStreamsCollected.incrementAndGet() % 1000 == 0) {
                logger.info("Calculated {} streams from uri {}.", numStreamsCollected.get(), uri);
            }
        }
    });

    return streamSpaceUsageMap;
}

From source file:org.solbase.lucenehbase.IndexWriter.java

@SuppressWarnings("unchecked")
public ParsedDoc parseDoc(Document doc, Analyzer analyzer, String indexName, int docNumber,
        List<String> sortFieldNames) throws CorruptIndexException, IOException {
    // given doc, what are all of terms we indexed
    List<Term> allIndexedTerms = new ArrayList<Term>();
    Map<String, byte[]> fieldCache = new HashMap<String, byte[]>(1024);

    // need to hold onto TermDocMetaData, so it can return this array
    List<TermDocMetadata> metadatas = new ArrayList<TermDocMetadata>();

    byte[] docId = Bytes.toBytes(docNumber);
    int position = 0;

    for (Fieldable field : (List<Fieldable>) doc.getFields()) {
        // Indexed field
        if (field.isIndexed() && field.isTokenized()) {

            TokenStream tokens = field.tokenStreamValue();

            if (tokens == null) {
                tokens = analyzer.tokenStream(field.name(), new StringReader(field.stringValue()));
            }/* www.ja v a 2s  .  com*/

            // collect term information per field
            Map<Term, Map<ByteBuffer, List<Number>>> allTermInformation = new ConcurrentSkipListMap<Term, Map<ByteBuffer, List<Number>>>();

            int lastOffset = 0;
            if (position > 0) {
                position += analyzer.getPositionIncrementGap(field.name());
            }

            tokens.reset(); // reset the TokenStream to the first token

            // offsets
            OffsetAttribute offsetAttribute = null;
            if (field.isStoreOffsetWithTermVector())
                offsetAttribute = (OffsetAttribute) tokens.addAttribute(OffsetAttribute.class);

            // positions
            PositionIncrementAttribute posIncrAttribute = null;
            if (field.isStorePositionWithTermVector())
                posIncrAttribute = (PositionIncrementAttribute) tokens
                        .addAttribute(PositionIncrementAttribute.class);

            TermAttribute termAttribute = (TermAttribute) tokens.addAttribute(TermAttribute.class);

            // store normalizations of field per term per document
            // rather
            // than per field.
            // this adds more to write but less to read on other side
            Integer tokensInField = new Integer(0);

            while (tokens.incrementToken()) {
                tokensInField++;
                Term term = new Term(field.name(), termAttribute.term());

                allIndexedTerms.add(term);

                // fetch all collected information for this term
                Map<ByteBuffer, List<Number>> termInfo = allTermInformation.get(term);

                if (termInfo == null) {
                    termInfo = new ConcurrentSkipListMap<ByteBuffer, List<Number>>();
                    allTermInformation.put(term, termInfo);
                }

                // term frequency
                List<Number> termFrequency = termInfo.get(TermDocMetadata.termFrequencyKeyBytes);
                if (termFrequency == null) {
                    termFrequency = new ArrayList<Number>();
                    termFrequency.add(new Integer(0));
                    termInfo.put(TermDocMetadata.termFrequencyKeyBytes, termFrequency);
                }
                // increment
                termFrequency.set(0, termFrequency.get(0).intValue() + 1);

                // position vector
                if (field.isStorePositionWithTermVector()) {
                    position += (posIncrAttribute.getPositionIncrement() - 1);

                    List<Number> positionVector = termInfo.get(TermDocMetadata.positionVectorKeyBytes);

                    if (positionVector == null) {
                        positionVector = new ArrayList<Number>();
                        termInfo.put(TermDocMetadata.positionVectorKeyBytes, positionVector);
                    }

                    positionVector.add(++position);
                }

                // term offsets
                if (field.isStoreOffsetWithTermVector()) {

                    List<Number> offsetVector = termInfo.get(TermDocMetadata.offsetVectorKeyBytes);
                    if (offsetVector == null) {
                        offsetVector = new ArrayList<Number>();
                        termInfo.put(TermDocMetadata.offsetVectorKeyBytes, offsetVector);
                    }

                    offsetVector.add(lastOffset + offsetAttribute.startOffset());
                    offsetVector.add(lastOffset + offsetAttribute.endOffset());

                }

                List<Number> sortValues = new ArrayList<Number>();
                // init sortValues
                for (int i = 0; i < Scorer.numSort; i++) {
                    sortValues.add(new Integer(-1));
                }

                int order = 0;

                // extract sort field value and store it in term doc metadata obj
                for (String fieldName : sortFieldNames) {
                    Fieldable fieldable = doc.getFieldable(fieldName);

                    if (fieldable instanceof EmbeddedSortField) {
                        EmbeddedSortField sortField = (EmbeddedSortField) fieldable;

                        int value = -1;
                        if (sortField.stringValue() != null) {
                            value = Integer.parseInt(sortField.stringValue());
                        }
                        int sortSlot = sortField.getSortSlot();

                        sortValues.set(sortSlot - 1, new Integer(value));
                    } else {
                        // TODO: this logic is used for real time indexing.
                        // hacky. depending on order of sort field names in array
                        int value = -1;
                        if (fieldable.stringValue() != null) {
                            value = Integer.parseInt(fieldable.stringValue());
                        }
                        sortValues.set(order++, new Integer(value));
                    }
                }
                termInfo.put(TermDocMetadata.sortFieldKeyBytes, sortValues);
            }

            List<Number> bnorm = null;
            if (!field.getOmitNorms()) {
                bnorm = new ArrayList<Number>();
                float norm = doc.getBoost();
                norm *= field.getBoost();
                norm *= similarity.lengthNorm(field.name(), tokensInField);
                bnorm.add(Similarity.encodeNorm(norm));
            }

            for (Map.Entry<Term, Map<ByteBuffer, List<Number>>> term : allTermInformation.entrySet()) {
                Term tempTerm = term.getKey();

                byte[] fieldTermKeyBytes = SolbaseUtil.generateTermKey(tempTerm);

                // Mix in the norm for this field alongside each term
                // more writes but faster on read side.
                if (!field.getOmitNorms()) {
                    term.getValue().put(TermDocMetadata.normsKeyBytes, bnorm);
                }

                TermDocMetadata data = new TermDocMetadata(docNumber, term.getValue(), fieldTermKeyBytes,
                        tempTerm);
                metadatas.add(data);
            }
        }

        // Untokenized fields go in without a termPosition
        if (field.isIndexed() && !field.isTokenized()) {
            Term term = new Term(field.name(), field.stringValue());
            allIndexedTerms.add(term);

            byte[] fieldTermKeyBytes = SolbaseUtil.generateTermKey(term);

            Map<ByteBuffer, List<Number>> termMap = new ConcurrentSkipListMap<ByteBuffer, List<Number>>();
            termMap.put(TermDocMetadata.termFrequencyKeyBytes, Arrays.asList(new Number[] {}));
            termMap.put(TermDocMetadata.positionVectorKeyBytes, Arrays.asList(new Number[] {}));

            TermDocMetadata data = new TermDocMetadata(docNumber, termMap, fieldTermKeyBytes, term);
            metadatas.add(data);
        }

        // Stores each field as a column under this doc key
        if (field.isStored()) {

            byte[] _value = field.isBinary() ? field.getBinaryValue() : Bytes.toBytes(field.stringValue());

            // first byte flags if binary or not
            byte[] value = new byte[_value.length + 1];
            System.arraycopy(_value, 0, value, 0, _value.length);

            value[value.length - 1] = (byte) (field.isBinary() ? Byte.MAX_VALUE : Byte.MIN_VALUE);

            // logic to handle multiple fields w/ same name
            byte[] currentValue = fieldCache.get(field.name());
            if (currentValue == null) {
                fieldCache.put(field.name(), value);
            } else {

                // append new data
                byte[] newValue = new byte[currentValue.length + SolbaseUtil.delimiter.length + value.length
                        - 1];
                System.arraycopy(currentValue, 0, newValue, 0, currentValue.length - 1);
                System.arraycopy(SolbaseUtil.delimiter, 0, newValue, currentValue.length - 1,
                        SolbaseUtil.delimiter.length);
                System.arraycopy(value, 0, newValue, currentValue.length + SolbaseUtil.delimiter.length - 1,
                        value.length);

                fieldCache.put(field.name(), newValue);
            }
        }
    }

    Put documentPut = new Put(SolbaseUtil.randomize(docNumber));

    // Store each field as a column under this docId
    for (Map.Entry<String, byte[]> field : fieldCache.entrySet()) {
        documentPut.add(Bytes.toBytes("field"), Bytes.toBytes(field.getKey()), field.getValue());
    }

    // in case of real time update, we need to add back docId field
    if (!documentPut.has(Bytes.toBytes("field"), Bytes.toBytes("docId"))) {

        byte[] docIdStr = Bytes.toBytes(new Integer(docNumber).toString());
        // first byte flags if binary or not
        byte[] value = new byte[docIdStr.length + 1];
        System.arraycopy(docIdStr, 0, value, 0, docIdStr.length);

        value[value.length - 1] = (byte) (Byte.MIN_VALUE);
        documentPut.add(Bytes.toBytes("field"), Bytes.toBytes("docId"), value);
    }

    // Finally, Store meta-data so we can delete this document
    documentPut.add(Bytes.toBytes("allTerms"), Bytes.toBytes("allTerms"),
            SolbaseUtil.toBytes(allIndexedTerms).array());

    ParsedDoc parsedDoc = new ParsedDoc(metadatas, doc, documentPut, fieldCache.entrySet(), allIndexedTerms);
    return parsedDoc;

}

From source file:com.twitter.distributedlog.auditor.DLAuditor.java

private Map<String, Long> calculateStreamSpaceUsage(final URI uri,
        final com.twitter.distributedlog.DistributedLogManagerFactory factory) throws IOException {
    Collection<String> streams = factory.enumerateAllLogsInNamespace();
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    streamQueue.addAll(streams);/* w w w  .  ja v a  2s. c o  m*/

    final Map<String, Long> streamSpaceUsageMap = new ConcurrentSkipListMap<String, Long>();
    final AtomicInteger numStreamsCollected = new AtomicInteger(0);

    executeAction(streamQueue, 10, new Action<String>() {
        @Override
        public void execute(String stream) throws IOException {
            streamSpaceUsageMap.put(stream, calculateStreamSpaceUsage(factory, stream));
            if (numStreamsCollected.incrementAndGet() % 1000 == 0) {
                logger.info("Calculated {} streams from uri {}.", numStreamsCollected.get(), uri);
            }
        }
    });

    return streamSpaceUsageMap;
}

From source file:org.ambraproject.service.user.UserServiceImpl.java

@Override
@SuppressWarnings("unchecked")
public List<UserAlert> getAvailableAlerts() {
    List<UserAlert> alerts = new ArrayList<UserAlert>();

    final SortedMap<Integer, Pair> categoryNames = new ConcurrentSkipListMap<Integer, Pair>();

    HierarchicalConfiguration hc = (HierarchicalConfiguration) configuration;
    List<HierarchicalConfiguration> categories = hc.configurationsAt(ALERTS_CATEGORIES_CATEGORY);

    for (HierarchicalConfiguration c : categories) {
        String key = c.getString("[@key]");
        int order = c.getInt("[@displayOrder]", categoryNames.size());
        String value = c.getString("");

        categoryNames.put(order, new Pair<String, String>(key, value));
    }//from ww w. jav a  2s  .co  m

    final String[] weeklyCategories = hc.getStringArray(ALERTS_WEEKLY);
    final String[] monthlyCategories = hc.getStringArray(ALERTS_MONTHLY);
    final String[] subjectFilters = hc.getStringArray(SUBJECT_FILTER);

    final Set<Map.Entry<Integer, Pair>> categoryNamesSet = categoryNames.entrySet();

    for (final Map.Entry<Integer, Pair> category : categoryNamesSet) {
        final String key = (String) category.getValue().getFirst();
        boolean weeklyCategoryKey = false;
        boolean monthlyCategoryKey = false;
        boolean subjectFilter = false;

        if (ArrayUtils.contains(weeklyCategories, key)) {
            weeklyCategoryKey = true;
        }
        if (ArrayUtils.contains(monthlyCategories, key)) {
            monthlyCategoryKey = true;
        }
        if (ArrayUtils.contains(subjectFilters, key)) {
            subjectFilter = true;
        }

        alerts.add(
                new UserAlert((String) category.getValue().getFirst(), (String) category.getValue().getSecond(),
                        weeklyCategoryKey, monthlyCategoryKey, subjectFilter));
    }
    return alerts;
}

From source file:org.apache.hadoop.hbase.client.coprocessor.TimeseriesAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the corresponding regions.
 * Approach is to compute a global sum of region level sum and rowcount and then compute the
 * average.//from   w  w w .j a  v  a2s. c om
 * @param table
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class AvgCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, Pair<S, Long>> averages = new ConcurrentSkipListMap<Long, Pair<S, Long>>();

        public synchronized ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs() {
            return averages;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = result.getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {

                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!averages.containsKey(entry.getKey())) {
                        averages.put(entry.getKey(), new Pair<S, Long>(null, 0L));
                    }
                } else {

                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    S s = ci.getPromotedValueFromProto(t);

                    ByteBuffer bb = ByteBuffer.allocate(8)
                            .put(getBytesFromResponse(entry.getValue().getSecondPart()));
                    bb.rewind();

                    if (averages.containsKey(entry.getKey())) {
                        S sum = averages.get(entry.getKey()).getFirst();
                        Long rowCount = averages.get(entry.getKey()).getSecond();
                        averages.put(entry.getKey(),
                                new Pair<S, Long>(ci.add(sum, s), rowCount + bb.getLong()));
                    } else {
                        averages.put(entry.getKey(), new Pair<S, Long>(s, bb.getLong()));
                    }
                }
            }
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:org.apache.hadoop.hbase.coprocessor.client.TimeseriesAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the corresponding regions.
 * Approach is to compute a global sum of region level sum and rowcount and then compute the
 * average.//from  w  w  w. j a  v  a2  s.  co m
 * @param table
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs(
        final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class AvgCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, Pair<S, Long>> averages = new ConcurrentSkipListMap<Long, Pair<S, Long>>();

        public synchronized ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs() {
            return averages;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = result.getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {

                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!averages.containsKey(entry.getKey())) {
                        averages.put(entry.getKey(), new Pair<S, Long>(null, 0L));
                    }
                } else {

                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    S s = ci.getPromotedValueFromProto(t);

                    ByteBuffer bb = ByteBuffer.allocate(8)
                            .put(getBytesFromResponse(entry.getValue().getSecondPart()));
                    bb.rewind();

                    if (averages.containsKey(entry.getKey())) {
                        S sum = averages.get(entry.getKey()).getFirst();
                        Long rowCount = averages.get(entry.getKey()).getSecond();
                        averages.put(entry.getKey(),
                                new Pair<S, Long>(ci.add(sum, s), rowCount + bb.getLong()));
                    } else {
                        averages.put(entry.getKey(), new Pair<S, Long>(s, bb.getLong()));
                    }
                }
            }
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:com.bytelightning.opensource.pokerface.PokerFace.java

/**
 * Configures all the needed components, but does not actually start the server.
 * @param config   Contains all information needed to fully wire up the http, https, and httpclient components of this reverse proxy.
 * @throws Exception   Yeah, a lot can go wrong here, but at least it will be caught immediately :-)
 *//*from  w ww . ja  v  a 2  s.  c  om*/
public void config(HierarchicalConfiguration config) throws Exception {
    List<HierarchicalConfiguration> lconf;
    HttpAsyncRequester executor = null;
    BasicNIOConnPool connPool = null;
    ObjectPool<ByteBuffer> byteBufferPool = null;
    LinkedHashMap<String, TargetDescriptor> mappings = null;
    ConcurrentMap<String, HttpHost> hosts = null;

    handlerRegistry = new UriHttpAsyncRequestHandlerMapper();

    // Initialize the keystore (if one was specified)
    KeyStore keystore = null;
    char[] keypass = null;
    String keystoreUri = config.getString("keystore");
    if ((keystoreUri != null) && (keystoreUri.trim().length() > 0)) {
        Path keystorePath = Utils.MakePath(keystoreUri);
        if (!Files.exists(keystorePath))
            throw new ConfigurationException("Keystore does not exist.");
        if (Files.isDirectory(keystorePath))
            throw new ConfigurationException("Keystore is not a file");
        String storepass = config.getString("storepass");
        if ((storepass != null) && "null".equals(storepass))
            storepass = null;
        keystore = KeyStore.getInstance(KeyStore.getDefaultType());
        try (InputStream keyStoreStream = Files.newInputStream(keystorePath)) {
            keystore.load(keyStoreStream, storepass == null ? null : storepass.trim().toCharArray());
        } catch (IOException ex) {
            Logger.error("Unable to load https server keystore from " + keystoreUri);
            return;
        }
        keypass = config.getString("keypass").trim().toCharArray();
    }

    // Wire up the listening reactor
    lconf = config.configurationsAt("server");
    if ((lconf == null) || (lconf.size() != 1))
        throw new ConfigurationException("One (and only one) server configuration element is allowed.");
    else {
        Builder builder = IOReactorConfig.custom();
        builder.setIoThreadCount(ComputeReactorProcessors(config.getDouble("server[@cpu]", 0.667)));
        builder.setSoTimeout(config.getInt("server[@soTimeout]", 0));
        builder.setSoLinger(config.getInt("server[@soLinger]", -1));
        builder.setSoReuseAddress(true);
        builder.setTcpNoDelay(false);
        builder.setSelectInterval(100);

        IOReactorConfig rconfig = builder.build();
        Logger.info("Configuring server with options: " + rconfig.toString());
        listeningReactor = new DefaultListeningIOReactor(rconfig);

        lconf = config.configurationsAt("server.listen");
        InetSocketAddress addr;
        boolean hasNonWildcardSecure = false;
        LinkedHashMap<SocketAddress, SSLContext> addrSSLContext = new LinkedHashMap<SocketAddress, SSLContext>();
        if ((lconf == null) || (lconf.size() == 0)) {
            addr = new InetSocketAddress("127.0.0.1", 8080);
            ListenerEndpoint ep = listeningReactor.listen(addr);
            Logger.warn("Configured " + ep.getAddress());
        } else {
            TrustManager[] trustManagers = null;
            KeyManagerFactory kmf = null;
            // Create all the specified listeners.
            for (HierarchicalConfiguration hc : lconf) {
                String addrStr = hc.getString("[@address]");
                if ((addrStr == null) || (addrStr.length() == 0))
                    addrStr = "0.0.0.0";
                String alias = hc.getString("[@alias]");
                int port = hc.getInt("[@port]", alias != null ? 443 : 80);
                addr = new InetSocketAddress(addrStr, port);
                ListenerEndpoint ep = listeningReactor.listen(addr);
                String protocol = hc.containsKey("[@protocol]") ? hc.getString("[@protocol]") : null;
                Boolean secure = hc.containsKey("[@secure]") ? hc.getBoolean("[@secure]") : null;
                if ((alias != null) && (secure == null))
                    secure = true;
                if ((protocol != null) && (secure == null))
                    secure = true;
                if ((secure != null) && secure) {
                    if (protocol == null)
                        protocol = "TLS";
                    if (keystore == null)
                        throw new ConfigurationException(
                                "An https listening socket was requested, but no keystore was specified.");
                    if (kmf == null) {
                        kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
                        kmf.init(keystore, keypass);
                    }
                    // Are we going to trust all clients or just specific ones?
                    if (hc.getBoolean("[@trustAny]", true))
                        trustManagers = new TrustManager[] { new X509TrustAllManager() };
                    else {
                        TrustManagerFactory instance = TrustManagerFactory
                                .getInstance(TrustManagerFactory.getDefaultAlgorithm());
                        instance.init(keystore);
                        trustManagers = instance.getTrustManagers();
                    }
                    KeyManager[] keyManagers = kmf.getKeyManagers();
                    if (alias != null)
                        for (int i = 0; i < keyManagers.length; i++) {
                            if (keyManagers[i] instanceof X509ExtendedKeyManager)
                                keyManagers[i] = new PokerFaceKeyManager(alias,
                                        (X509ExtendedKeyManager) keyManagers[i]);
                        }
                    SSLContext sslCtx = SSLContext.getInstance(protocol);
                    sslCtx.init(keyManagers, trustManagers, new SecureRandom());
                    if (addr.getAddress().isAnyLocalAddress()) {
                        // This little optimization helps us respond faster for every connection as we don't have to extrapolate a local connection address to wild card.
                        for (Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces(); en
                                .hasMoreElements();) {
                            NetworkInterface intf = en.nextElement();
                            for (Enumeration<InetAddress> enumIpAddr = intf.getInetAddresses(); enumIpAddr
                                    .hasMoreElements();) {
                                addr = new InetSocketAddress(enumIpAddr.nextElement(), port);
                                addrSSLContext.put(addr, sslCtx);
                            }
                        }
                    } else {
                        addrSSLContext.put(addr, sslCtx);
                        hasNonWildcardSecure = true;
                    }
                }
                Logger.warn("Configured " + (alias == null ? "" : (protocol + " on")) + ep.getAddress());
            }
        }
        // We will need an HTTP protocol processor for the incoming connections
        String serverAgent = config.getString("server.serverAgent", "PokerFace/" + Utils.Version);
        HttpProcessor inhttpproc = new ImmutableHttpProcessor(
                new HttpResponseInterceptor[] { new ResponseDateInterceptor(), new ResponseServer(serverAgent),
                        new ResponseContent(), new ResponseConnControl() });
        HttpAsyncService serviceHandler = new HttpAsyncService(inhttpproc, new DefaultConnectionReuseStrategy(),
                null, handlerRegistry, null) {
            public void exception(final NHttpServerConnection conn, final Exception cause) {
                Logger.warn(cause.getMessage());
                super.exception(conn, cause);
            }
        };
        if (addrSSLContext.size() > 0) {
            final SSLContext defaultCtx = addrSSLContext.values().iterator().next();
            final Map<SocketAddress, SSLContext> sslMap;
            if ((!hasNonWildcardSecure) || (addrSSLContext.size() == 1))
                sslMap = null;
            else
                sslMap = addrSSLContext;
            listeningDispatcher = new DefaultHttpServerIODispatch(serviceHandler,
                    new SSLNHttpServerConnectionFactory(defaultCtx, null, ConnectionConfig.DEFAULT) {
                        protected SSLIOSession createSSLIOSession(IOSession iosession, SSLContext sslcontext,
                                SSLSetupHandler sslHandler) {
                            SSLIOSession retVal;
                            SSLContext sktCtx = sslcontext;
                            if (sslMap != null) {
                                SocketAddress la = iosession.getLocalAddress();
                                if (la != null) {
                                    sktCtx = sslMap.get(la);
                                    if (sktCtx == null)
                                        sktCtx = sslcontext;
                                }
                                retVal = new SSLIOSession(iosession, SSLMode.SERVER, sktCtx, sslHandler);
                            } else
                                retVal = super.createSSLIOSession(iosession, sktCtx, sslHandler);
                            if (sktCtx != null)
                                retVal.setAttribute("com.bytelightning.opensource.pokerface.secure", true);
                            return retVal;
                        }
                    });
        } else
            listeningDispatcher = new DefaultHttpServerIODispatch(serviceHandler, ConnectionConfig.DEFAULT);
    }

    // Configure the httpclient reactor that will be used to do reverse proxing to the specified targets.
    lconf = config.configurationsAt("targets");
    if ((lconf != null) && (lconf.size() > 0)) {
        HierarchicalConfiguration conf = lconf.get(0);
        Builder builder = IOReactorConfig.custom();
        builder.setIoThreadCount(ComputeReactorProcessors(config.getDouble("targets[@cpu]", 0.667)));
        builder.setSoTimeout(conf.getInt("targets[@soTimeout]", 0));
        builder.setSoLinger(config.getInt("targets[@soLinger]", -1));
        builder.setConnectTimeout(conf.getInt("targets[@connectTimeout]", 0));
        builder.setSoReuseAddress(true);
        builder.setTcpNoDelay(false);
        connectingReactor = new DefaultConnectingIOReactor(builder.build());

        final int bufferSize = conf.getInt("targets[@bufferSize]", 1024) * 1024;
        byteBufferPool = new SoftReferenceObjectPool<ByteBuffer>(new BasePooledObjectFactory<ByteBuffer>() {
            @Override
            public ByteBuffer create() throws Exception {
                return ByteBuffer.allocateDirect(bufferSize);
            }

            @Override
            public PooledObject<ByteBuffer> wrap(ByteBuffer buffer) {
                return new DefaultPooledObject<ByteBuffer>(buffer);
            }
        });

        KeyManager[] keyManagers = null;
        TrustManager[] trustManagers = null;

        if (keystore != null) {
            KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
            kmf.init(keystore, keypass);
            keyManagers = kmf.getKeyManagers();
        }
        // Will the httpclient's trust any remote target, or only specific ones.
        if (conf.getBoolean("targets[@trustAny]", false))
            trustManagers = new TrustManager[] { new X509TrustAllManager() };
        else if (keystore != null) {
            TrustManagerFactory instance = TrustManagerFactory
                    .getInstance(TrustManagerFactory.getDefaultAlgorithm());
            instance.init(keystore);
            trustManagers = instance.getTrustManagers();
        }
        SSLContext clientSSLContext = SSLContext.getInstance(conf.getString("targets[@protocol]", "TLS"));
        clientSSLContext.init(keyManagers, trustManagers, new SecureRandom());

        // Setup an SSL capable connection pool for the httpclients.
        connPool = new BasicNIOConnPool(connectingReactor,
                new BasicNIOConnFactory(clientSSLContext, null, ConnectionConfig.DEFAULT),
                conf.getInt("targets[@connectTimeout]", 0));
        connPool.setMaxTotal(conf.getInt("targets[@connMaxTotal]", 1023));
        connPool.setDefaultMaxPerRoute(conf.getInt("targets[@connMaxPerRoute]", 1023));

        // Set up HTTP protocol processor for outgoing connections
        String userAgent = conf.getString("targets.userAgent", "PokerFace/" + Utils.Version);
        HttpProcessor outhttpproc = new ImmutableHttpProcessor(new HttpRequestInterceptor[] {
                new RequestContent(), new RequestTargetHost(), new RequestConnControl(),
                new RequestUserAgent(userAgent), new RequestExpectContinue(true) });
        executor = new HttpAsyncRequester(outhttpproc, new DefaultConnectionReuseStrategy());

        // Now set up all the configured targets.
        mappings = new LinkedHashMap<String, TargetDescriptor>();
        hosts = new ConcurrentHashMap<String, HttpHost>();
        String[] scheme = { null };
        String[] host = { null };
        int[] port = { 0 };
        String[] path = { null };
        int[] stripPrefixCount = { 0 };
        for (HierarchicalConfiguration targetConfig : conf.configurationsAt("target")) {
            String match = targetConfig.getString("[@pattern]");
            if ((match == null) || (match.trim().length() < 1)) {
                Logger.error("Unable to configure target;  Invalid url match pattern");
                continue;
            }
            String key = RequestForTargetConsumer.UriToTargetKey(targetConfig.getString("[@url]"), scheme, host,
                    port, path, stripPrefixCount);
            if (key == null) {
                Logger.error("Unable to configure target");
                continue;
            }
            HttpHost targetHost = hosts.get(key);
            if (targetHost == null) {
                targetHost = new HttpHost(host[0], port[0], scheme[0]);
                hosts.put(key, targetHost);
            }
            TargetDescriptor desc = new TargetDescriptor(targetHost, path[0], stripPrefixCount[0]);
            mappings.put(match, desc);
        }
        connectionDispatcher = new DefaultHttpClientIODispatch(new HttpAsyncRequestExecutor(),
                ConnectionConfig.DEFAULT);
    }
    // Allocate the script map which will be populated by it's own executor thread.
    if (config.containsKey("scripts.rootDirectory")) {
        Path tmp = Utils.MakePath(config.getProperty("scripts.rootDirectory"));
        if (!Files.exists(tmp))
            throw new FileNotFoundException("Scripts directory does not exist.");
        if (!Files.isDirectory(tmp))
            throw new FileNotFoundException("'scripts' path is not a directory.");
        scripts = new ConcurrentSkipListMap<String, ScriptObjectMirror>();
        boolean watch = config.getBoolean("scripts.dynamicWatch", false);
        List<Path> jsLibs;
        Object prop = config.getProperty("scripts.library");
        if (prop != null) {
            jsLibs = new ArrayList<Path>();
            if (prop instanceof Collection<?>) {
                @SuppressWarnings("unchecked")
                Collection<Object> oprop = (Collection<Object>) prop;
                for (Object obj : oprop)
                    jsLibs.add(Utils.MakePath(obj));
            } else {
                jsLibs.add(Utils.MakePath(prop));
            }
        } else
            jsLibs = null;

        lconf = config.configurationsAt("scripts.scriptConfig");
        if (lconf != null) {
            if (lconf.size() > 1)
                throw new ConfigurationException("Only one scriptConfig element is allowed.");
            if (lconf.size() == 0)
                lconf = null;
        }

        HierarchicalConfiguration scriptConfig;
        if (lconf == null)
            scriptConfig = new HierarchicalConfiguration();
        else
            scriptConfig = lconf.get(0);
        scriptConfig.setProperty("pokerface.scripts.rootDirectory", tmp.toString());

        configureScripts(jsLibs, scriptConfig, tmp, watch);
        if (watch)
            ScriptDirectoryWatcher = new DirectoryWatchService();
    }

    // Configure the static file directory (if any)
    Path staticFilesPath = null;
    if (config.containsKey("files.rootDirectory")) {
        Path tmp = Utils.MakePath(config.getProperty("files.rootDirectory"));
        if (!Files.exists(tmp))
            throw new FileNotFoundException("Files directory does not exist.");
        if (!Files.isDirectory(tmp))
            throw new FileNotFoundException("'files' path is not a directory.");
        staticFilesPath = tmp;
        List<HierarchicalConfiguration> mimeEntries = config.configurationsAt("files.mime-entry");
        if (mimeEntries != null) {
            for (HierarchicalConfiguration entry : mimeEntries) {
                entry.setDelimiterParsingDisabled(true);
                String type = entry.getString("[@type]", "").trim();
                if (type.length() == 0)
                    throw new ConfigurationException("Invalid mime type entry");
                String extensions = entry.getString("[@extensions]", "").trim();
                if (extensions.length() == 0)
                    throw new ConfigurationException("Invalid mime extensions for: " + type);
                ScriptHelperImpl.AddMimeEntry(type, extensions);
            }
        }
    }

    handlerRegistry.register("/*",
            new RequestHandler(executor, connPool, byteBufferPool, staticFilesPath, mappings,
                    scripts != null ? Collections.unmodifiableNavigableMap(scripts) : null,
                    config.getBoolean("scripts.allowScriptsToSpecifyDynamicHosts", false) ? hosts : null));
}

From source file:net.sourceforge.subsonic.service.SonosService.java

@Override
public ReorderContainerResult reorderContainer(String id, String from, int to, String updateId) {
    if (id.startsWith(ID_PLAYLIST_PREFIX)) {
        int playlistId = Integer.parseInt(id.replace(ID_PLAYLIST_PREFIX, ""));
        Playlist playlist = playlistService.getPlaylist(playlistId);
        if (playlist != null && playlist.getUsername().equals(getUsername())) {

            SortedMap<Integer, MediaFile> indexToSong = new ConcurrentSkipListMap<Integer, MediaFile>();
            List<MediaFile> songs = playlistService.getFilesInPlaylist(playlistId);
            for (int i = 0; i < songs.size(); i++) {
                indexToSong.put(i, songs.get(i));
            }//from w w  w . j a  va2  s. c  o  m

            List<MediaFile> movedSongs = new ArrayList<MediaFile>();
            for (Integer i : parsePlaylistIndices(from)) {
                movedSongs.add(indexToSong.remove(i));
            }

            List<MediaFile> updatedSongs = new ArrayList<MediaFile>();
            updatedSongs.addAll(indexToSong.headMap(to).values());
            updatedSongs.addAll(movedSongs);
            updatedSongs.addAll(indexToSong.tailMap(to).values());

            playlistService.setFilesInPlaylist(playlistId, updatedSongs);
        }
    }
    return new ReorderContainerResult();
}

From source file:org.apache.hadoop.hbase.client.coprocessor.TimeseriesAggregationClient.java

/**
 * This is the client side interface/handle for calling the average method for a given cf-cq
 * combination. It was necessary to add one more call stack as its return type should be a decimal
 * value, irrespective of what columninterpreter says. So, this methods collects the necessary
 * parameters to compute the average and returs the double value.
 * @param table//from  w  w w.  ja  va2 s  .  co  m
 * @param ci
 * @param scan
 * @return <R, S>
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, Double> avg(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
    ConcurrentSkipListMap<Long, Pair<S, Long>> p = getAvgArgs(table, ci, scan);
    ConcurrentSkipListMap<Long, Double> avg = new ConcurrentSkipListMap<Long, Double>();
    for (Map.Entry<Long, Pair<S, Long>> entry : p.entrySet()) {
        avg.put(entry.getKey(), ci.divideForAvg(entry.getValue().getFirst(), entry.getValue().getSecond()));
    }
    return avg;
}