List of usage examples for org.apache.lucene.analysis.util FilesystemResourceLoader FilesystemResourceLoader
FilesystemResourceLoader
From source file:jp.co.atware.solr.analizers.cjk.MultistageMappingCharFilterTest.java
License:Apache License
@Before public void setup() throws Exception { Map<String, String> args = new HashMap<String, String>(); args.put("mapping", "src/test/resources/multistage-test/first.txt;src/test/resources/multistage-test/second.txt"); charFilterFactory = new MultistageMappingCharFilterFactory(args); charFilterFactory.inform(new FilesystemResourceLoader()); tokenizerFactory = new CJKTokenizerFactory(new HashMap<String, String>()); }
From source file:jp.sf.fess.solr.plugin.suggest.util.SolrConfigUtil.java
License:Apache License
public static List<SuggestFieldInfo> getSuggestFieldInfoList(final SuggestUpdateConfig config) { final List<SuggestFieldInfo> list = new ArrayList<SuggestFieldInfo>(); for (final SuggestUpdateConfig.FieldConfig fieldConfig : config.getFieldConfigList()) { try {//from ww w. j ava2 s .com final List<String> fieldNameList = Arrays.asList(fieldConfig.getTargetFields()); final SuggestUpdateConfig.TokenizerConfig tokenizerConfig = fieldConfig.getTokenizerConfig(); //create tokenizerFactory TokenizerFactory tokenizerFactory = null; if (tokenizerConfig != null) { final Class<?> cls = Class.forName(tokenizerConfig.getClassName()); final Constructor<?> constructor = cls.getConstructor(Map.class); tokenizerFactory = (TokenizerFactory) constructor.newInstance(tokenizerConfig.getArgs()); try { final Class[] params = new Class[] { ResourceLoader.class }; final Method inform = cls.getDeclaredMethod("inform", params); final Object[] args = new Object[] { new FilesystemResourceLoader() }; inform.invoke(tokenizerFactory, args); } catch (final NoSuchMethodException e) { //ignore } catch (final Exception e) { logger.warn("Failed to execute inform of tokenizer.", e); } } //create converter final SuggestIntegrateConverter suggestIntegrateConverter = new SuggestIntegrateConverter(); for (final SuggestUpdateConfig.ConverterConfig converterConfig : fieldConfig .getConverterConfigList()) { final SuggestReadingConverter suggestReadingConverter = SuggestUtil .createConverter(converterConfig.getClassName(), converterConfig.getProperties()); suggestIntegrateConverter.addConverter(suggestReadingConverter); } if (tokenizerFactory != null) { suggestIntegrateConverter.setTokenizerFactory(tokenizerFactory); } suggestIntegrateConverter.start(); //create normalizer final SuggestIntegrateNormalizer suggestIntegrateNormalizer = new SuggestIntegrateNormalizer(); for (final SuggestUpdateConfig.NormalizerConfig normalizerConfig : fieldConfig .getNormalizerConfigList()) { final SuggestNormalizer suggestNormalizer = SuggestUtil .createNormalizer(normalizerConfig.getClassName(), normalizerConfig.getProperties()); suggestIntegrateNormalizer.addNormalizer(suggestNormalizer); } suggestIntegrateNormalizer.start(); final SuggestFieldInfo suggestFieldInfo = new SuggestFieldInfo(fieldNameList, tokenizerFactory, suggestIntegrateConverter, suggestIntegrateNormalizer); list.add(suggestFieldInfo); } catch (final Exception e) { throw new FessSuggestException( "Failed to create Tokenizer." + fieldConfig.getTokenizerConfig().getClassName(), e); } } return list; }
From source file:org.apache.solr.search.PreAnalyzedQParserPlugin.java
License:Apache License
public void init(NamedList args) { // TODO Auto-generated method stub try {/*ww w. j a v a 2 s .c o m*/ // Initialize lemmatizer lemmatizer = LemmatizerFactory.createLemmatizer(); // Initialize OpenNLP model and tokenizer modelsPath = args.get("modelsPath").toString(); tokenModelIn = new FileInputStream( PreAnalyzedQParserPlugin.modelsPath + lang + File.separator + lang + "-token.bin"); tokenModel = new TokenizerModel(tokenModelIn); tokenizer = new TokenizerME(tokenModel); tokenizerSimple = SimpleTokenizer.INSTANCE; tokenizerWS = WhitespaceTokenizer.INSTANCE; // Get Synonyms file path synonymsPath = args.get("synonymsPath").toString(); // Initialize Synonyms Filter factory Map<String, String> argsSyn = new HashMap<String, String>(); argsSyn.put("synonyms", synonymsPath); argsSyn.put("luceneMatchVersion", Version.LUCENE_46.toString()); synFactory = new SynonymFilterFactory(argsSyn); synFactory.inform(new FilesystemResourceLoader()); // Get Mapping Char file path mappingPath = args.get("mappingsPath").toString(); lemmaLogPath = args.get("lemmaLogPath").toString(); // Initialize Mapping Char Filter factory Map<String, String> argsCharFactory = new HashMap<String, String>(); argsCharFactory.put("mapping", mappingPath); argsCharFactory.put("luceneMatchVersion", Version.LUCENE_46.toString()); mapCharFactory = new MappingCharFilterFactory(argsCharFactory); mapCharFactory.inform(new FilesystemResourceLoader()); } catch (LemmatizerException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (FileNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (InvalidFormatException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }