Example usage for org.apache.cassandra.dht Range Range

List of usage examples for org.apache.cassandra.dht Range Range

Introduction

In this page you can find the example usage for org.apache.cassandra.dht Range Range.

Prototype

public Range(T left, T right) 

Source Link

Usage

From source file:com.spotify.hdfs2cass.cassandra.thrift.ExternalSSTableLoaderClient.java

License:Apache License

public void init(String keyspace) {
    Set<InetAddress> hosts = Sets.newHashSet();
    String[] nodes = hostlist.split(",");
    for (String node : nodes) {
        try {/*from w w  w.  j  ava  2s.  c  o m*/
            hosts.add(InetAddress.getByName(node));
        } catch (UnknownHostException e) {
            throw new RuntimeException(e);
        }
    }

    Iterator<InetAddress> hostiter = hosts.iterator();
    while (hostiter.hasNext()) {
        try {
            InetAddress host = hostiter.next();
            Cassandra.Client client = createThriftClient(host.getHostAddress(), rpcPort);

            // log in
            client.set_keyspace(keyspace);
            if (username != null) {
                Map<String, String> creds = Maps.newHashMap();
                creds.put(IAuthenticator.USERNAME_KEY, username);
                creds.put(IAuthenticator.PASSWORD_KEY, password);
                AuthenticationRequest authRequest = new AuthenticationRequest(creds);
                client.login(authRequest);
            }

            List<TokenRange> tokenRanges = client.describe_ring(keyspace);
            List<KsDef> ksDefs = client.describe_keyspaces();

            setPartitioner(client.describe_partitioner());
            Token.TokenFactory tkFactory = getPartitioner().getTokenFactory();

            for (TokenRange tr : tokenRanges) {
                Range<Token> range = new Range<>(tkFactory.fromString(tr.start_token),
                        tkFactory.fromString(tr.end_token));
                for (String ep : tr.endpoints) {
                    addRangeForEndpoint(range, InetAddress.getByName(ep));
                }
            }

            for (KsDef ksDef : ksDefs) {
                Map<String, CFMetaData> cfs = new HashMap<>(ksDef.cf_defs.size());
                for (CfDef cfDef : ksDef.cf_defs)
                    cfs.put(cfDef.name, CFMetaData.fromThrift(cfDef));
                knownCfs.put(ksDef.name, cfs);
            }
            break;
        } catch (Exception e) {
            throw new CrunchRuntimeException("Could not retrieve endpoint ranges: ", e);
        }
    }
}

From source file:com.tuplejump.stargate.cassandra.SearchSupport.java

License:Apache License

protected List<Row> getRows(final ExtendedFilter filter, final Search search) {
    final SearchSupport searchSupport = this;
    AbstractBounds<RowPosition> keyRange = filter.dataRange.keyRange();
    final Range<Token> filterRange = new Range<>(keyRange.left.getToken(), keyRange.right.getToken());
    final boolean isSingleToken = filterRange.left.equals(filterRange.right);
    final boolean isFullRange = isSingleToken && baseCfs.partitioner.getMinimumToken().equals(filterRange.left);

    SearcherCallback<List<Row>> sc = new SearcherCallback<List<Row>>() {
        @Override/*ww  w . j a  v  a 2s .co  m*/
        public List<Row> doWithSearcher(org.apache.lucene.search.IndexSearcher searcher) throws Exception {
            Utils.SimpleTimer timer = Utils.getStartedTimer(logger);
            List<Row> results;
            if (search == null) {
                results = new ArrayList<>();
            } else {
                Utils.SimpleTimer timer2 = Utils.getStartedTimer(SearchSupport.logger);
                Function function = search.function(options);
                Query query = search.query(options);
                int resultsLimit = searcher.getIndexReader().maxDoc();
                if (function.shouldLimit()) {
                    if (resultsLimit == 0) {
                        resultsLimit = 1;
                    }
                    resultsLimit = Math.min(filter.currentLimit() + 1, resultsLimit);
                }
                function.init(options);
                IndexEntryCollector collector = new IndexEntryCollector(function, search, options,
                        resultsLimit);
                searcher.search(query, collector);
                timer2.endLogTime("TopDocs search for [" + collector.getTotalHits() + "] results ");
                if (SearchSupport.logger.isDebugEnabled()) {
                    SearchSupport.logger.debug(String.format("Search results [%s]", collector.getTotalHits()));
                }
                RowScanner iter = new RowScanner(searchSupport, baseCfs, filter, collector,
                        function instanceof AggregateFunction ? false : search.isShowScore());
                Utils.SimpleTimer timer3 = Utils.getStartedTimer(SearchSupport.logger);
                results = function.process(iter, customColumnFactory, baseCfs, currentIndex);
                timer3.endLogTime("Aggregation [" + collector.getTotalHits() + "] results");
            }
            timer.endLogTime("Search with results [" + results.size() + "] ");
            return results;

        }

        @Override
        public Range<Token> filterRange() {
            return filterRange;
        }

        @Override
        public boolean isSingleToken() {
            return isSingleToken;
        }

        @Override
        public boolean isFullRange() {
            return isFullRange;
        }
    };

    return currentIndex.search(sc);
}

From source file:org.elassandra.TokenRangesBisetCacheTests.java

License:Apache License

@Test
public void tokenBitsetTest() throws Exception {
    process(ConsistencyLevel.ONE,//from  ww w.  ja v a  2  s.co m
            "CREATE KEYSPACE IF NOT EXISTS test WITH replication={ 'class':'NetworkTopologyStrategy', 'DC1':'1' }");
    process(ConsistencyLevel.ONE, "CREATE TABLE IF NOT EXISTS test.t1 ( a int,b bigint, primary key (a) )");

    XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("t1")
            .field("discover", ".*").endObject().endObject();
    createIndex("test", Settings.builder().put("index.token_ranges_bitset_cache", true)
            .put("index.queries.cache.enabled", true).build(), "t1", mapping);
    ensureGreen("test");

    for (int j = 0; j < N; j++)
        process(ConsistencyLevel.ONE, "insert into test.t1 (a,b) VALUES (?,?)", j,
                ESSingleNodeTestCase.randomLong());

    // ensure we have at least one segment > 10k docs.
    client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).setFlush(true).get();
    boolean hasOneBigSegment = false;
    for (IndexShardSegments iss : client().admin().indices().prepareSegments("test").get().getIndices()
            .get("test")) {
        for (ShardSegments ss : iss.getShards()) {
            for (Segment seg : ss.getSegments()) {
                if (seg.getNumDocs() > 10000)
                    hasOneBigSegment = true;
            }
        }
    }
    assertThat(hasOneBigSegment, equalTo(true));

    // force caching after 20 requests.
    long nbHits = 0;
    for (int i = 0; i < 30; i++)
        nbHits = client().prepareSearch().setIndices("test").setTypes("t1")
                .setQuery(QueryBuilders.rangeQuery("b").from(0).to(Long.MAX_VALUE))
                .setTokenRanges(Collections.singleton(
                        new Range<Token>(new LongToken(Long.MIN_VALUE + 1), new LongToken(Long.MAX_VALUE - 1))))
                .get().getHits().getTotalHits();

    long upper = client().prepareSearch().setIndices("test").setTypes("t1")
            .setQuery(QueryBuilders.rangeQuery("b").from(0).to(Long.MAX_VALUE))
            .setTokenRanges(Collections
                    .singleton(new Range<Token>(new LongToken(0), new LongToken(Long.MAX_VALUE - 1))))
            .get().getHits().getTotalHits();
    assertThat(upper, lessThan(nbHits));

    long lower = client().prepareSearch().setIndices("test").setTypes("t1")
            .setQuery(QueryBuilders.rangeQuery("b").from(0).to(Long.MAX_VALUE))
            .setTokenRanges(Collections
                    .singleton(new Range<Token>(new LongToken(Long.MIN_VALUE + 1), new LongToken(0))))
            .get().getHits().getTotalHits();
    assertThat(lower, lessThan(nbHits));

    assertThat(lower + upper, equalTo(nbHits));
    assertThat(client().prepareSearch().setIndices("test").setTypes("t1")
            .setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits(), equalTo(N));
}

From source file:org.elasticsearch.action.percolate.PercolateRequest.java

License:Apache License

@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    startTime = in.readVLong();//ww  w .  j  a v a  2  s .com
    documentType = in.readString();
    routing = in.readOptionalString();
    preference = in.readOptionalString();
    source = in.readBytesReference();
    docSource = in.readBytesReference();
    if (in.readBoolean()) {
        getRequest = new GetRequest(null);
        getRequest.readFrom(in);
    }
    onlyCount = in.readBoolean();

    if (in.available() > 0 && in.readBoolean()) {
        Object[] tokens = (Object[]) in.readGenericValue();
        this.tokenRanges = new ArrayList<Range<Token>>(tokens.length / 2);
        for (int i = 0; i < tokens.length;) {
            Range<Token> range = new Range<Token>((Token) tokens[i++], (Token) tokens[i++]);
            this.tokenRanges.add(range);
        }
    }
}

From source file:org.elasticsearch.action.percolate.PercolateShardRequest.java

License:Apache License

@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    documentType = in.readString();// w  w  w  .j av a2  s .  c  o  m
    source = in.readBytesReference();
    docSource = in.readBytesReference();
    onlyCount = in.readBoolean();
    numberOfShards = in.readVInt();
    startTime = in.readLong(); // no vlong, this can be negative!

    // read tokenRanges
    Object[] tokens = (Object[]) in.readGenericValue();
    this.tokenRanges = new ArrayList<Range<Token>>(tokens.length / 2);
    for (int i = 0; i < tokens.length;) {
        Range<Token> range = new Range<Token>((Token) tokens[i++], (Token) tokens[i++]);
        this.tokenRanges.add(range);
    }
}

From source file:org.elasticsearch.action.search.SearchRequest.java

License:Apache License

@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    searchType = SearchType.fromId(in.readByte());

    indices = new String[in.readVInt()];
    for (int i = 0; i < indices.length; i++) {
        indices[i] = in.readString();//from ww w. j  av a2 s. c  o m
    }

    routing = in.readOptionalString();
    preference = in.readOptionalString();

    if (in.readBoolean()) {
        scroll = readScroll(in);
    }

    source = in.readBytesReference();
    extraSource = in.readBytesReference();

    types = in.readStringArray();
    indicesOptions = IndicesOptions.readIndicesOptions(in);

    templateSource = in.readBytesReference();
    if (in.readBoolean()) {
        template = Template.readTemplate(in);
    }
    requestCache = in.readOptionalBoolean();

    if (in.available() > 0 && in.readBoolean()) {
        Object[] tokens = (Object[]) in.readGenericValue();
        this.tokenRanges = new ArrayList<Range<Token>>(tokens.length / 2);
        for (int i = 0; i < tokens.length;) {
            Range<Token> range = new Range<Token>((Token) tokens[i++], (Token) tokens[i++]);
            this.tokenRanges.add(range);
        }
    }
}

From source file:org.elasticsearch.rest.RestRequest.java

License:Apache License

public Collection<Range<Token>> paramsAsTokenRanges(String key) {
    String value = param(key);//from w w w . j a  v  a  2s  .  co m
    if (value != null) {
        Collection<Range<Token>> tokenRanges = new ArrayList<Range<Token>>();
        Token.TokenFactory tokenFactory = StorageService.instance.getPartitioner().getTokenFactory();
        StringTokenizer stk = new StringTokenizer(value, "{[(,)]}");
        while (stk.hasMoreTokens()) {
            Token leftToken = tokenFactory.fromString(stk.nextToken());
            Token rightToken = tokenFactory.fromString(stk.nextToken());
            tokenRanges.add(new Range(leftToken, rightToken));
        }
        return tokenRanges;
    }
    return null;
}

From source file:org.elasticsearch.search.internal.ShardSearchLocalRequest.java

License:Apache License

@SuppressWarnings("unchecked")
protected void innerReadFrom(StreamInput in) throws IOException {
    index = in.readString();//from   w ww .  j a v a 2s. co  m
    shardId = in.readVInt();
    searchType = SearchType.fromId(in.readByte());
    numberOfShards = in.readVInt();
    if (in.readBoolean()) {
        scroll = readScroll(in);
    }

    source = in.readBytesReference();
    extraSource = in.readBytesReference();

    types = in.readStringArray();
    filteringAliases = in.readStringArray();
    nowInMillis = in.readVLong();

    templateSource = in.readBytesReference();
    if (in.readBoolean()) {
        template = Template.readTemplate(in);
    }
    requestCache = in.readOptionalBoolean();

    // read tokenRanges
    Object[] tokens = (Object[]) in.readGenericValue();
    this.tokenRanges = new ArrayList<Range<Token>>(tokens.length / 2);
    for (int i = 0; i < tokens.length;) {
        Range<Token> range = new Range<Token>((Token) tokens[i++], (Token) tokens[i++]);
        this.tokenRanges.add(range);
    }
}