Example usage for org.apache.lucene.store RAMDirectory close

List of usage examples for org.apache.lucene.store RAMDirectory close

Introduction

In this page you can find the example usage for org.apache.lucene.store RAMDirectory close.

Prototype

@Override
public void close() 

Source Link

Document

Closes the store to future operations, releasing associated memory.

Usage

From source file:org.elasticsearch.search.suggest.CompletionPostingsFormatTest.java

License:Apache License

@Test
public void testCompletionPostingsFormat() throws IOException {
    AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true);
    RAMDirectory dir = new RAMDirectory();
    IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
    FieldsConsumer consumer = provider.consumer(output);
    FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true,
            IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, DocValuesType.SORTED, DocValuesType.BINARY,
            new HashMap<String, String>());
    TermsConsumer addField = consumer.addField(fieldInfo);

    PostingsConsumer postingsConsumer = addField.startTerm(new BytesRef("foofightersgenerator"));
    postingsConsumer.startDoc(0, 1);/*from w  w  w .j av  a2 s.c  o  m*/
    postingsConsumer.addPosition(256 - 2,
            provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0, 1);
    postingsConsumer.finishDoc();
    addField.finishTerm(new BytesRef("foofightersgenerator"), new TermStats(1, 1));
    addField.startTerm(new BytesRef("generator"));
    postingsConsumer.startDoc(0, 1);
    postingsConsumer.addPosition(256 - 1,
            provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0, 1);
    postingsConsumer.finishDoc();
    addField.finishTerm(new BytesRef("generator"), new TermStats(1, 1));
    addField.finish(1, 1, 1);
    consumer.close();
    output.close();

    IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
    LookupFactory load = provider.load(input);
    PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new ElasticSearch090PostingsFormat());
    NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
    Lookup lookup = load.getLookup(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null,
            true, true, true, Integer.MAX_VALUE), new CompletionSuggestionContext(null));
    List<LookupResult> result = lookup.lookup("ge", false, 10);
    assertThat(result.get(0).key.toString(), equalTo("Generator - Foo Fighters"));
    assertThat(result.get(0).payload.utf8ToString(), equalTo("id:10"));
    dir.close();
}

From source file:org.elasticsearch.search.suggest.CompletionPostingsFormatTest.java

License:Apache License

@Test
public void testNoDocs() throws IOException {
    AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true);
    RAMDirectory dir = new RAMDirectory();
    IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
    FieldsConsumer consumer = provider.consumer(output);
    FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true,
            IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, DocValuesType.SORTED, DocValuesType.BINARY,
            new HashMap<String, String>());
    TermsConsumer addField = consumer.addField(fieldInfo);
    addField.finish(0, 0, 0);/*from  w w  w .  j a v a2 s.  c om*/
    consumer.close();
    output.close();

    IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
    LookupFactory load = provider.load(input);
    PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new ElasticSearch090PostingsFormat());
    NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
    assertNull(load.getLookup(new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null,
            true, true, true, Integer.MAX_VALUE), new CompletionSuggestionContext(null)));
    dir.close();
}

From source file:org.elasticsearch.test.integration.search.suggest.CompletionPostingsFormatTest.java

License:Apache License

@Test
public void testCompletionPostingsFormat() throws IOException {
    AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true);
    RAMDirectory dir = new RAMDirectory();
    IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
    FieldsConsumer consumer = provider.consumer(output);
    FieldInfo fieldInfo = new FieldInfo("foo", true, 1, false, true, true,
            IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, DocValuesType.SORTED, DocValuesType.BINARY,
            new HashMap<String, String>());
    TermsConsumer addField = consumer.addField(fieldInfo);

    PostingsConsumer postingsConsumer = addField.startTerm(new BytesRef("foofightersgenerator"));
    postingsConsumer.startDoc(0, 1);//ww  w.j  a  va2  s . c  om
    postingsConsumer.addPosition(256 - 2,
            provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0, 1);
    postingsConsumer.finishDoc();
    addField.finishTerm(new BytesRef("foofightersgenerator"), new TermStats(1, 1));
    addField.startTerm(new BytesRef("generator"));
    postingsConsumer.startDoc(0, 1);
    postingsConsumer.addPosition(256 - 1,
            provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10")), 0, 1);
    postingsConsumer.finishDoc();
    addField.finishTerm(new BytesRef("generator"), new TermStats(1, 1));
    addField.finish(1, 1, 1);
    consumer.close();
    output.close();

    IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
    LookupFactory load = provider.load(input);
    PostingsFormatProvider format = new PreBuiltPostingsFormatProvider(new ElasticSearch090PostingsFormat());
    NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer(TEST_VERSION_CURRENT));
    Lookup lookup = load.getLookup(
            new CompletionFieldMapper(new Names("foo"), analyzer, analyzer, format, null, true, true, true),
            false);
    List<LookupResult> result = lookup.lookup("ge", false, 10);
    assertThat(result.get(0).key.toString(), equalTo("Generator - Foo Fighters"));
    assertThat(result.get(0).payload.utf8ToString(), equalTo("id:10"));
    dir.close();
}

From source file:org.elasticsearch.test.integration.search.suggest.CompletionPostingsFormatTest.java

License:Apache License

public Lookup buildAnalyzingLookup(final CompletionFieldMapper mapper, String[] terms, String[] surfaces,
        long[] weights) throws IOException {
    RAMDirectory dir = new RAMDirectory();
    FilterCodec filterCodec = new FilterCodec("filtered", Codec.getDefault()) {
        public PostingsFormat postingsFormat() {
            return mapper.postingsFormatProvider().get();
        }// w w  w  .  j  a v a 2s. c o  m
    };
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, mapper.indexAnalyzer());

    indexWriterConfig.setCodec(filterCodec);
    IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
    for (int i = 0; i < weights.length; i++) {
        Document doc = new Document();
        BytesRef payload = mapper.buildPayload(new BytesRef(surfaces[i]), weights[i],
                new BytesRef(Long.toString(weights[i])));
        doc.add(mapper.getCompletionField(terms[i], payload));
        if (randomBoolean()) {
            writer.commit();
        }
        writer.addDocument(doc);
    }
    writer.commit();
    writer.forceMerge(1);
    writer.commit();
    DirectoryReader reader = DirectoryReader.open(writer, true);
    assertThat(reader.leaves().size(), equalTo(1));
    assertThat(reader.leaves().get(0).reader().numDocs(), equalTo(weights.length));
    AtomicReaderContext atomicReaderContext = reader.leaves().get(0);
    Terms luceneTerms = atomicReaderContext.reader().terms(mapper.name());
    Lookup lookup = ((Completion090PostingsFormat.CompletionTerms) luceneTerms).getLookup(mapper, false);
    reader.close();
    writer.close();
    dir.close();
    return lookup;
}

From source file:org.genemania.mediator.lucene.exporter.Generic2LuceneExporter.java

License:Open Source License

public void export() throws Exception {
    final ExportProfile profile = createExportProfile(basePath, profileName);
    Analyzer analyzer = createAnalyzer();

    try {//w w w.  j ava  2  s.  c om
        final Map<String, Long> namingSourceIds = new HashMap<String, Long>();

        File indexFile = new File(makeIndexPath("base"));
        FSDirectory directory = FSDirectory.open(indexFile);
        final IndexWriter indexWriter = new IndexWriter(directory, analyzer, true, MaxFieldLength.UNLIMITED);
        processFile(genericDbPath, "GENE_NAMING_SOURCES.txt", new FileHandler() {
            @Override
            public boolean process(String line) throws IOException {
                String[] parts = line.split("\t", -1);
                exportNamingSource(indexWriter, parts);
                namingSourceIds.put(parts[1], Long.parseLong(parts[0]));
                return true;
            }
        });

        processFile(genericDbPath, "TAGS.txt", new FileHandler() {
            @Override
            public boolean process(String line) throws IOException {
                String[] parts = line.split("\t", -1);
                exportTag(indexWriter, parts);
                return true;
            }
        });

        processFile(genericDbPath, "ONTOLOGIES.txt", new FileHandler() {
            @Override
            public boolean process(String line) throws IOException {
                String[] parts = line.split("\t", -1);
                exportOntologies(indexWriter, parts);
                return true;
            }
        });

        processFile(genericDbPath, "ONTOLOGY_CATEGORIES.txt", new FileHandler() {
            @Override
            public boolean process(String line) throws IOException {
                String[] parts = line.split("\t", -1);
                exportOntologyCategories(indexWriter, parts);
                return true;
            }
        });

        exportStatistics(indexWriter);
        indexWriter.close();

        String[] organisms = config.getSection("Organisms").getEntry("organisms").split("\\s*,\\s*");
        for (final String organismId : organisms) {
            Section organismSection = config.getSection(organismId);
            final String shortName = organismSection.getEntry("short_name");
            System.out.println(shortName);

            RAMDirectory ramDirectory = new RAMDirectory();
            final IndexWriter writer = new IndexWriter(ramDirectory, analyzer, true, MaxFieldLength.UNLIMITED);
            final Organism organism = new Organism();
            processFile(genericDbPath, "ORGANISMS.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    if (parts[1].equals(shortName)) {
                        exportOrganism(writer, parts);
                        populateOrganism(organism, parts);
                        return false;
                    }
                    return true;
                }
            });

            final Long entrezNamingSourceId = namingSourceIds.get("Entrez Gene ID");
            final Map<Long, String> externalIds = new HashMap<Long, String>();
            final Map<Long, Long> externalNamingSourceIds = new HashMap<Long, Long>();

            final Set<Long> nodes = new HashSet<Long>();
            processFile(genericDbPath, "GENES.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long organismId = Long.parseLong(parts[5]);
                    if (organismId == organism.getId()) {
                        exportGene(writer, parts);
                        long nodeId = Long.parseLong(parts[4]);
                        nodes.add(nodeId);

                        long namingSourceId = Long.parseLong(parts[3]);
                        if (namingSourceId == entrezNamingSourceId) {
                            externalIds.put(nodeId, parts[1]);
                            externalNamingSourceIds.put(nodeId, namingSourceId);
                        }
                    }
                    return true;
                }
            });

            final Map<Long, Long> geneDataToNodeIds = new HashMap<Long, Long>();
            processFile(genericDbPath, "NODES.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long nodeId = Long.parseLong(parts[0]);
                    if (nodes.contains(nodeId)) {
                        exportNode(writer, parts, String.valueOf(organism.getId()));
                        geneDataToNodeIds.put(Long.parseLong(parts[2]), nodeId);
                    }
                    return true;
                }
            });

            processFile(genericDbPath, "GENE_DATA.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long geneDataId = Long.parseLong(parts[0]);
                    Long nodeId = geneDataToNodeIds.get(geneDataId);
                    if (nodeId != null) {
                        String externalId = externalIds.get(nodeId);
                        long namingSourceId = -1;
                        if (externalId != null) {
                            namingSourceId = externalNamingSourceIds.get(nodeId);
                        }
                        exportGeneData(writer, parts, externalId, namingSourceId);
                    }
                    return true;
                }
            });

            final Set<Long> groups = new HashSet<Long>();
            processFile(genericDbPath, "NETWORK_GROUPS.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long organismId = Long.parseLong(parts[4]);
                    if (organismId == organism.getId()) {
                        exportGroup(writer, parts);
                        groups.add(Long.parseLong(parts[0]));
                    }
                    return true;
                }
            });

            final Set<Long> metadata = new HashSet<Long>();
            final Set<Long> networks = new HashSet<Long>();
            processFile(genericDbPath, "NETWORKS.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long groupId = Long.parseLong(parts[5]);
                    long networkId = Long.parseLong(parts[0]);
                    if (groups.contains(groupId) && profile.includesNetwork(parts)) {
                        exportNetwork(writer, parts);
                        long metadataId = Long.parseLong(parts[2]);
                        metadata.add(metadataId);

                        networks.add(networkId);
                    }
                    return true;
                }
            });

            processFile(genericDbPath, "NETWORK_METADATA.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long metadataId = Long.parseLong(parts[0]);
                    if (metadata.contains(metadataId)) {
                        exportNetworkMetadata(writer, parts);
                    }
                    return true;
                }
            });

            processFile(genericDbPath, "NETWORK_TAG_ASSOC.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long networkId = Long.parseLong(parts[1]);
                    if (networks.contains(networkId)) {
                        exportNetworkTagAssoc(writer, parts);
                    }
                    return true;
                }
            });

            final Set<Long> attribute_groups = new HashSet<Long>();
            processFile(genericDbPath, "ATTRIBUTE_GROUPS.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long organismId = Long.parseLong(parts[1]);
                    if (organismId == organism.getId()) {
                        exportAttributeGroup(writer, parts);
                        long group_id = Long.parseLong(parts[0]);
                        attribute_groups.add(group_id);
                    }
                    return true;
                }
            });

            final Set<Long> attributes = new HashSet<Long>();
            processFile(genericDbPath, "ATTRIBUTES.txt", new FileHandler() {
                @Override
                public boolean process(String line) throws IOException {
                    String[] parts = line.split("\t", -1);
                    long group_id = Long.parseLong(parts[1]);
                    if (attribute_groups.contains(group_id)) {
                        exportAttribute(writer, parts);
                        long attribute_id = Long.parseLong(parts[0]);
                        attributes.add(attribute_id);
                    }
                    return true;
                }
            });

            writer.close();

            String gmOrganismId = organismSection.getEntry("gm_organism_id");
            File organismFile = new File(makeIndexPath(String.format("%s", gmOrganismId)));
            FSDirectory fileDirectory = FSDirectory.open(organismFile);
            IndexWriter organismWriter = new IndexWriter(fileDirectory, analyzer, true,
                    MaxFieldLength.UNLIMITED);
            IndexReader reader = IndexReader.open(ramDirectory);
            organismWriter.addIndexes(new IndexReader[] { reader });
            organismWriter.close();
            fileDirectory.close();
            ramDirectory.close();

            Properties properties = new Properties();
            properties.put("short_name", shortName);
            properties.put("common_name", organismSection.getEntry("common_name"));
            properties.put("organism_id", gmOrganismId);

            String propertyPath = String.format("%s%smetadata.xml", gmOrganismId, File.separator);
            FileOutputStream out = new FileOutputStream(makeIndexPath(propertyPath));
            try {
                properties.storeToXML(out, null, "UTF-8");
            } finally {
                out.close();
            }
        }
    } finally {
        close();
    }
}

From source file:org.rssowl.core.internal.ApplicationServiceImpl.java

License:Open Source License

private boolean runNewsFilters(final List<INews> news, String feedLink, final IProgressMonitor monitor)
        throws Exception {

    /* Load Enabled Filters that are scoped to given Feed */
    Set<ISearchFilter> enabledFilters = loadEnabledFilters(feedLink);

    /* Nothing to do */
    if (enabledFilters.isEmpty())
        return false;

    /* Return early on cancellation */
    if (monitor.isCanceled() || Owl.isShuttingDown())
        return false;

    /* Need to index News and perform Searches */
    RAMDirectory directory = null;
    final IndexSearcher[] searcher = new IndexSearcher[1];
    if (needToIndex(enabledFilters)) {
        boolean indexDescription = needToIndexDescription(enabledFilters);
        directory = new RAMDirectory();
        directory.setLockFactory(NoLockFactory.getNoLockFactory());

        /* Index News */
        try {//from   ww  w  .j  a  v a 2 s .co  m
            IndexWriter indexWriter = new IndexWriter(directory, Indexer.createAnalyzer());
            for (int i = 0; i < news.size(); i++) {

                /* Return early on cancellation */
                if (monitor.isCanceled() || Owl.isShuttingDown())
                    return false;

                NewsDocument document = new NewsDocument(news.get(i));
                document.addFields(indexDescription);
                document.getDocument().getField(SearchDocument.ENTITY_ID_TEXT).setValue(String.valueOf(i));
                indexWriter.addDocument(document.getDocument());
            }
            indexWriter.close();

            searcher[0] = new IndexSearcher(directory);
        } catch (Exception e) {
            directory.close();
            throw e;
        }
    }

    /* Remember the news already filtered */
    List<INews> filteredNews = new ArrayList<INews>(news.size());
    boolean filterMatchedAll = false;

    /* Iterate over Filters */
    for (ISearchFilter filter : enabledFilters) {

        /* No Search Required */
        if (filter.getSearch() == null) {
            filterMatchedAll = true;

            List<INews> remainingNews = new ArrayList<INews>(news);
            remainingNews.removeAll(filteredNews);
            if (!remainingNews.isEmpty())
                applyFilter(filter, remainingNews);

            /* Done - we only support 1 filter per News */
            break;
        }

        /* Search Required */
        else if (directory != null && searcher[0] != null) {

            /* Return early if cancelled and nothing filtered yet */
            if ((monitor.isCanceled() || Owl.isShuttingDown()) && filteredNews.isEmpty())
                return false;

            try {
                final List<INews> matchingNews = new ArrayList<INews>();

                /* Perform Query */
                Query query = ModelSearchQueries.createQuery(filter.getSearch());
                searcher[0].search(query, new HitCollector() {
                    @Override
                    public void collect(int doc, float score) {
                        try {
                            Document document = searcher[0].doc(doc);
                            int index = Integer.valueOf(document.get(SearchDocument.ENTITY_ID_TEXT));
                            matchingNews.add(news.get(index));
                        } catch (CorruptIndexException e) {
                            Activator.getDefault().logError(e.getMessage(), e);
                        } catch (IOException e) {
                            Activator.getDefault().logError(e.getMessage(), e);
                        }
                    }
                });

                /* Apply Filter */
                matchingNews.removeAll(filteredNews);
                if (!matchingNews.isEmpty()) {
                    applyFilter(filter, matchingNews);
                    filteredNews.addAll(matchingNews);
                }
            } catch (IOException e) {
                directory.close();
                throw e;
            }
        }
    }

    /* Free RAMDirectory if it was built */
    if (directory != null)
        directory.close();

    return filterMatchedAll || !filteredNews.isEmpty();
}

From source file:org.sindice.siren.qparser.tabular.parser.TabularQueryParserTestHelper.java

License:Open Source License

public static float getScore(final Map<String, String> ntriples, final Map<String, Float> boosts,
        final String query, final boolean scattered)
        throws CorruptIndexException, LockObtainFailedException, IOException, ParseException {
    RAMDirectory ramDir = null;

    try {//from w  w w  . j a  v  a 2s.  com
        ramDir = new RAMDirectory();
        TabularQueryParserTestHelper.index(ramDir, ntriples);
        return TabularQueryParserTestHelper.getScore(ramDir, query, boosts, scattered);
    } finally {
        if (ramDir != null)
            ramDir.close();
    }
}

From source file:org.sindice.siren.qparser.tabular.parser.TabularQueryParserTestHelper.java

License:Open Source License

public static boolean match(final Map<String, String> ntriples, final Map<String, Float> boosts,
        final String query, final boolean scattered)
        throws CorruptIndexException, LockObtainFailedException, IOException, ParseException {
    RAMDirectory ramDir = null;

    try {//from   w w w  .  j a va 2s . c  om
        ramDir = new RAMDirectory();
        TabularQueryParserTestHelper.index(ramDir, ntriples);
        return TabularQueryParserTestHelper.match(ramDir, query, boosts, scattered);
    } finally {
        if (ramDir != null)
            ramDir.close();
    }
}

From source file:org.sindice.siren.qparser.tabular.parser.TabularQueryParserTestHelper.java

License:Open Source License

public static boolean match(final String ntriple, final String query)
        throws CorruptIndexException, LockObtainFailedException, IOException, ParseException {
    RAMDirectory ramDir = null;

    try {//from  w  w w  .j  a va2  s.co m
        ramDir = new RAMDirectory();
        TabularQueryParserTestHelper.index(ramDir, ntriple);
        return TabularQueryParserTestHelper.match(ramDir, query, _defaultField);
    } finally {
        if (ramDir != null)
            ramDir.close();
    }
}

From source file:org.sindice.siren.qparser.tabular.parser.TabularQueryParserTestHelper.java

License:Open Source License

public static boolean matchImplicit(final String ntriple, final String query)
        throws CorruptIndexException, LockObtainFailedException, IOException, ParseException {
    RAMDirectory ramDir = null;

    try {//from  w ww.  j a  v a 2 s.  co  m
        ramDir = new RAMDirectory();
        TabularQueryParserTestHelper.indexImplicit(ramDir, ntriple);
        return TabularQueryParserTestHelper.match(ramDir, query, _implicitField);
    } finally {
        if (ramDir != null)
            ramDir.close();
    }
}