Example usage for org.apache.lucene.util English intToEnglish

List of usage examples for org.apache.lucene.util English intToEnglish

Introduction

In this page you can find the example usage for org.apache.lucene.util English intToEnglish.

Prototype

public static String intToEnglish(int i) 

Source Link

Usage

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat3.java

License:Apache License

public void test() throws Exception {
    Directory dir = newDirectory();//w ww  . j  a  va  2s  .  c o m
    Analyzer analyzer = new Analyzer(Analyzer.PER_FIELD_REUSE_STRATEGY) {
        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new MockTokenizer();
            if (fieldName.contains("payloadsFixed")) {
                TokenFilter filter = new MockFixedLengthPayloadFilter(new Random(0), tokenizer, 1);
                return new TokenStreamComponents(tokenizer, filter);
            } else if (fieldName.contains("payloadsVariable")) {
                TokenFilter filter = new MockVariableLengthPayloadFilter(new Random(0), tokenizer);
                return new TokenStreamComponents(tokenizer, filter);
            } else {
                return new TokenStreamComponents(tokenizer);
            }
        }
    };
    IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    // TODO we could actually add more fields implemented with different PFs
    // or, just put this test into the usual rotation?
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
    Document doc = new Document();
    FieldType docsOnlyType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn this on for a cross-check
    docsOnlyType.setStoreTermVectors(true);
    docsOnlyType.setIndexOptions(IndexOptions.DOCS);

    FieldType docsAndFreqsType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn this on for a cross-check
    docsAndFreqsType.setStoreTermVectors(true);
    docsAndFreqsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS);

    FieldType positionsType = new FieldType(TextField.TYPE_NOT_STORED);
    // turn these on for a cross-check
    positionsType.setStoreTermVectors(true);
    positionsType.setStoreTermVectorPositions(true);
    positionsType.setStoreTermVectorOffsets(true);
    positionsType.setStoreTermVectorPayloads(true);
    FieldType offsetsType = new FieldType(positionsType);
    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    Field field1 = new Field("field1docs", "", docsOnlyType);
    Field field2 = new Field("field2freqs", "", docsAndFreqsType);
    Field field3 = new Field("field3positions", "", positionsType);
    Field field4 = new Field("field4offsets", "", offsetsType);
    Field field5 = new Field("field5payloadsFixed", "", positionsType);
    Field field6 = new Field("field6payloadsVariable", "", positionsType);
    Field field7 = new Field("field7payloadsFixedOffsets", "", offsetsType);
    Field field8 = new Field("field8payloadsVariableOffsets", "", offsetsType);
    doc.add(field1);
    doc.add(field2);
    doc.add(field3);
    doc.add(field4);
    doc.add(field5);
    doc.add(field6);
    doc.add(field7);
    doc.add(field8);
    for (int i = 0; i < MAXDOC; i++) {
        String stringValue = Integer.toString(i) + " verycommon " + English.intToEnglish(i).replace('-', ' ')
                + " " + TestUtil.randomSimpleString(random());
        field1.setStringValue(stringValue);
        field2.setStringValue(stringValue);
        field3.setStringValue(stringValue);
        field4.setStringValue(stringValue);
        field5.setStringValue(stringValue);
        field6.setStringValue(stringValue);
        field7.setStringValue(stringValue);
        field8.setStringValue(stringValue);
        iw.addDocument(doc);
    }
    iw.close();
    verify(dir);
    TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
    iwc = newIndexWriterConfig(analyzer);
    iwc.setCodec(TestUtil.alwaysPostingsFormat(new RocanaLucene50PostingsFormat()));
    iwc.setOpenMode(OpenMode.APPEND);
    IndexWriter iw2 = new IndexWriter(dir, iwc);
    iw2.forceMerge(1);
    iw2.close();
    verify(dir);
    dir.close();
}

From source file:com.sindicetech.siren.search.spans.TestNodeSpansBasics.java

License:Open Source License

private MockSirenDocument[] generateDocuments(int n) {
    MockSirenDocument[] docs = new MockSirenDocument[n];
    for (int i = 0; i < n; i++) {
        String values[] = English.intToEnglish(i).trim().split("[\\W]");
        MockSirenToken[] tokens = new MockSirenToken[values.length];
        for (int j = 0; j < values.length; j++) {
            tokens[j] = token(values[j], node(1, j));
        }//from w ww  .jav  a  2 s .  co m
        docs[i] = doc(tokens);
    }
    return docs;
}

From source file:com.sindicetech.siren.search.spans.TestTermSpansBasics.java

License:Open Source License

private MockSirenDocument[] generateDocuments(int start, int end) {
    assertTrue(end >= start);//  ww w .  ja v a 2s.  c o m
    int n = end - start + 1;
    MockSirenDocument[] docs = new MockSirenDocument[n];
    for (int i = start; i <= end; i++) {
        String values[] = English.intToEnglish(i).trim().split("[\\W]+");
        MockSirenToken[] tokens = new MockSirenToken[values.length];
        for (int j = 0; j < values.length; j++) {
            tokens[j] = token(values[j], node(1));
        }
        docs[i - start] = doc(tokens);
    }
    return docs;
}

From source file:org.apache.solr.BasicFunctionalityTest.java

License:Apache License

@Ignore("See SOLR-1726")
@Test//from   w ww .  j a v a2  s  . co m
public void testDeepPaging() throws Exception {
    for (int i = 0; i < 1000; i++) {
        assertU(adoc("id", String.valueOf(i), "foo_t", English.intToEnglish(i)));
    }
    assertU(commit());
    SolrQueryRequest goldReq = null;
    try {
        goldReq = req("q", "foo_t:one", "rows", "50", "fl", "docid, score");
        SolrQueryResponse gold = h.queryAndResponse("standard", goldReq);
        ResultContext response = (ResultContext) gold.getValues().get("response");
        assertQ("page: " + 0 + " failed",
                req("q", "foo_t:one", "rows", "10", CommonParams.QT, "standard", "fl", "[docid], score"),
                "*[count(//doc)=10]");
        //ugh, what a painful way to get the document
        DocIterator iterator = response.docs.subset(9, 1).iterator();
        int lastDoc = iterator.nextDoc();
        float lastScore = iterator.score();
        for (int i = 1; i < 5; i++) {
            //page through some results
            DocList subset = response.docs.subset(i * 10, 1);
            iterator = subset.iterator();
            int compareDoc = iterator.nextDoc();
            float compareScore = iterator.score();
            assertQ("page: " + i + " failed",
                    req("q", "foo_t:one", CommonParams.QT, "standard", "fl", "[docid], score", "start",
                            String.valueOf(i * 10), "rows", "1", //only get one doc, and then compare it to gold
                            CommonParams.PAGEDOC, String.valueOf(lastDoc), CommonParams.PAGESCORE,
                            String.valueOf(lastScore)),
                    "*[count(//doc)=1]", "//float[@name='score'][.='" + compareScore + "']",
                    "//int[@name='[docid]'][.='" + compareDoc + "']");
            lastScore = compareScore;
            lastDoc = compareDoc;

        }
    } finally {
        if (goldReq != null) {
            goldReq.close();
        }
    }
}

From source file:org.apache.solr.search.TestQueryWrapperFilter.java

License:Apache License

public void testThousandDocuments() throws Exception {
    Directory dir = newDirectory();//  w ww. j a  v a 2 s  . c o m
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < 1000; i++) {
        Document doc = new Document();
        doc.add(newStringField("field", English.intToEnglish(i), Field.Store.NO));
        writer.addDocument(doc);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = newSearcher(reader);

    for (int i = 0; i < 1000; i++) {
        TermQuery termQuery = new TermQuery(new Term("field", English.intToEnglish(i)));
        QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
        TopDocs td = searcher.search(qwf, 10);
        assertEquals(1, td.totalHits);
    }

    reader.close();
    dir.close();
}

From source file:org.elasticsearch.action.bench.BenchmarkIntegrationTest.java

License:Apache License

private String[] randomData() throws Exception {

    final int numIndices = scaledRandomIntBetween(1, 5);
    final String[] indices = new String[numIndices];

    for (int i = 0; i < numIndices; i++) {
        indices[i] = INDEX_PREFIX + i;//from w  w  w. ja  va2  s  .  c  o  m
        final int numDocs = scaledRandomIntBetween(1, 100);
        final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];

        for (int j = 0; j < numDocs; j++) {
            docs[j] = client().prepareIndex(indices[i], INDEX_TYPE).setSource(
                    BenchmarkTestUtil.TestIndexField.INT_FIELD.toString(), randomInt(),
                    BenchmarkTestUtil.TestIndexField.FLOAT_FIELD.toString(), randomFloat(),
                    BenchmarkTestUtil.TestIndexField.BOOLEAN_FIELD.toString(), randomBoolean(),
                    BenchmarkTestUtil.TestIndexField.STRING_FIELD.toString(), English.intToEnglish(j));
        }

        indexRandom(true, docs);
    }

    flushAndRefresh();
    return indices;
}

From source file:org.elasticsearch.bwcompat.BasicBackwardsCompatibilityIT.java

License:Apache License

/**
 * Basic test using Index & Realtime Get with external versioning. This test ensures routing works correctly across versions.
 *//*from  ww w. j  av a  2  s . co m*/
public void testExternalVersion() throws Exception {
    createIndex("test");
    final boolean routing = randomBoolean();
    int numDocs = randomIntBetween(10, 20);
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
        final long version = randomIntBetween(0, Integer.MAX_VALUE);
        client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(version)
                .setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
        GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(version)
                .get();
        assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
        assertThat(get.getVersion(), equalTo(version));
        final long nextVersion = version + randomIntBetween(0, Integer.MAX_VALUE);
        client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(nextVersion)
                .setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
        get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(nextVersion).get();
        assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
        assertThat(get.getVersion(), equalTo(nextVersion));
    }
}

From source file:org.elasticsearch.bwcompat.BasicBackwardsCompatibilityIT.java

License:Apache License

/**
 * Basic test using Index & Realtime Get with internal versioning. This test ensures routing works correctly across versions.
 *///from w  w w . j a  va2 s .c  o m
public void testInternalVersion() throws Exception {
    createIndex("test");
    final boolean routing = randomBoolean();
    int numDocs = randomIntBetween(10, 20);
    for (int i = 0; i < numDocs; i++) {
        String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
        String id = Integer.toString(i);
        assertThat(id, client().prepareIndex("test", "type1", id).setRouting(routingKey)
                .setSource("field1", English.intToEnglish(i)).get().isCreated(), is(true));
        GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(1).get();
        assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
        assertThat(get.getVersion(), equalTo(1l));
        client().prepareIndex("test", "type1", id).setRouting(routingKey)
                .setSource("field1", English.intToEnglish(i)).execute().actionGet();
        get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(2).get();
        assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
        assertThat(get.getVersion(), equalTo(2l));
    }

    assertVersionCreated(compatibilityVersion(), "test");
}

From source file:org.elasticsearch.bwcompat.BasicBackwardsCompatibilityIT.java

License:Apache License

/**
 * Very basic bw compat test with a mixed version cluster random indexing and lookup by ID via term query
 *//* w w  w. j av a  2 s .  c o m*/
public void testIndexAndSearch() throws Exception {
    createIndex("test");
    int numDocs = randomIntBetween(10, 20);
    List<IndexRequestBuilder> builder = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i),
                "the_id", id));
    }
    indexRandom(true, builder);
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
    }
    assertVersionCreated(compatibilityVersion(), "test");
}

From source file:org.elasticsearch.bwcompat.BasicBackwardsCompatibilityIT.java

License:Apache License

public void testRecoverFromPreviousVersion() throws ExecutionException, InterruptedException {
    if (backwardsCluster().numNewDataNodes() == 0) {
        backwardsCluster().startNewNode();
    }//from www . j a  va2s  .  com
    assertAcked(prepareCreate("test").setSettings(Settings.builder()
            .put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern())
            .put(indexSettings())));
    ensureYellow();
    assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
    int numDocs = randomIntBetween(100, 150);
    ArrayList<String> ids = new ArrayList<>();
    logger.info(" --> indexing [{}] docs", numDocs);
    IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < numDocs; i++) {
        String id = randomRealisticUnicodeOfLength(10) + String.valueOf(i);
        ids.add(id);
        docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i));
    }
    indexRandom(true, docs);
    CountResponse countResponse = client().prepareCount().get();
    assertHitCount(countResponse, numDocs);

    if (randomBoolean()) {
        logger.info(" --> moving index to new nodes");
        backwardsCluster().allowOnlyNewNodes("test");
    } else {
        logger.info(" --> allow index to on all nodes");
        backwardsCluster().allowOnAllNodes("test");
    }

    // sometimes index while relocating

    /*
    There is a rarely occurring bug which causes documents to be lost as follows:
            
    We fail to send index requests to a replica when primary and replica relocate shortly after each other and a replica request is in
    flight for a long time.
    One shard with primary and one replica.
    We index a document while the primary is relocating. This actually works ok on the old primary. The request to the new replica is
    sent as a replication request.
    We send two replication requests (to relocation target of primary and replica) but they are in flight for a while (threadpool full,
    network slow etc).
    In the meanwhile:
    - relocation of primary is done
    - replica is relocated after primary relocation is done
    The two replication requests (relocation target of primary and replica) reach their respective targets *after* the relocation of
    the primary is done and the relocation of the replica is done.
    This means:
    - the node that had the replica will not index the document because it does not have the replica anymore
    - the node that has the new primary will index the document but not replicate it because it comes in as a replica request
    - the new replica will not get the the document via recovery (translog etc) because recovery is done already
            
    --> replica does not get the document at all
            
    This should be fixed by https://github.com/elastic/elasticsearch/pull/15900 but the fix will not be backported so we should not
    test this scenario anymore either on 2.x branches. Therfore the indexing while relocation happens is commented out.
            
            
     if (randomBoolean()) {
    logger.info(" --> indexing [{}] more docs", numDocs);
    for (int i = 0; i < numDocs; i++) {
        String id = randomRealisticUnicodeOfLength(10) + String.valueOf(numDocs + i);
        ids.add(id);
        docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(numDocs + i));
    }
    indexRandom(true, docs);
    numDocs *= 2;
    }*/

    logger.info(" --> waiting for relocation to complete", numDocs);
    ensureYellow("test");// move all shards to the new node (it waits on relocation)
    final int numIters = randomIntBetween(10, 20);
    for (int i = 0; i < numIters; i++) {
        SearchResponse afterRelocation = client().prepareSearch().setSize(ids.size()).get();
        assertNoFailures(afterRelocation);
        assertSearchHits(afterRelocation, ids.toArray(new String[ids.size()]));
    }
    assertVersionCreated(compatibilityVersion(), "test");
}