solr中Analyzer(分词器)及其自定义

public class T {

            //全文搜索引擎使用某种算法对要建索引的文档进行分析, 从文档中提取出若干Token(词元), 这些算法称为Tokenizer(分词器), 
            //这些Token会被进一步处理, 比如转成小写等, 这些处理算法被称为Token Filter(词元处理器), 
            //被处理后的结果被称为Term(词), 文档中包含了几个这样的Term被称为Frequency(词频)。
            //引擎会建立Term和原文档的Inverted Index(倒排索引), 这样就能根据Term很快到找到源文档了。 
            //文本被Tokenizer处理前可能要做一些预处理, 比如去掉里面的HTML标记, 这些处理的算法被称为Character Filter(字符过滤器), 
            //这整个的分析算法被称为Analyzer(分析器)。
            org.apache.lucene.analysis.Analyzer analyzer;
            //主要的分词环节是Tokenizer类执行,而Filter负责数据的预处理和分词后处理且数量不限
            org.apache.lucene.analysis.AnalyzerWrapper analyzerWrapper;
            //org.apache.lucene.analysis.TokenStreamComponents tokenStreamComponents;

            org.apache.lucene.analysis.Token token;
            //建立索引的基本单位,表示每个被编入索引的字符
            org.apache.lucene.analysis.TokenStream tokenStream;
            //是用来迭代 Token的迭代器,包括一些便利方法
            //分词流,即将对象分词后所得的Token在内存中以流的方式存在
            //也说是说如果在取得Token必须从TokenStream中获取,而分词对象可以是文档文本,也可以是查询文本。
            org.apache.lucene.analysis.Tokenizer tokenizer;
            //继承TokenStream,将输入的Reader转为Tokenizer(本质上还是一个TokenStream).接收流并进行词切分,是定制分词器的核心之一
            org.apache.lucene.analysis.TokenFilter tokenFilter; //完成对TokenStream的过滤,输入是一个TokenStream
            org.apache.lucene.analysis.CharFilter charFilter; //是一个字符预处理的组件
            //   org.apache.lucene.analysis.TokenFactory tokenFactory;
}

class M extends org.apache.lucene.analysis.Analyzer {

            @Override
            protected Analyzer.TokenStreamComponents createComponents(String string, Reader reader) {

                        org.apache.lucene.analysis.Tokenizer tokenizer;
                        //实现:org.apache.lucene.analysis.core.*Tokenizer
                        org.apache.lucene.analysis.TokenStream tokenStream;
                        //实现:org.apache.lucene.analysis.core.*Filter
                        return null;
            }

            //TokenStreamComponents
            // public TokenStreamComponents(Tokenizer source, TokenStream result) { }
            //  public TokenStreamComponents(Tokenizer source) {}
            //
            //Tokenizer
            //protected Tokenizer(Reader input) { }
            //protected Tokenizer(AttributeSource.AttributeFactory factory, Reader input) { }
            //实现类:org.apache.lucene.analysis.core.*Tokenizer
            //TokenStream
            //protected TokenStream() {}
            //protected TokenStream(AttributeSource input) {}
            //protected TokenStream(AttributeSource.AttributeFactory factory) {}
            //实现类:org.apache.lucene.analysis.NumericTokenStream, .TokenFilter, .Tokenizer
}
public class MM extends org.apache.lucene.analysis.Analyzer {

            @Override
            protected TokenStreamComponents createComponents(String string, Reader reader) {

                        org.apache.lucene.analysis.Tokenizer tokenizer;
                        //实现:org.apache.lucene.analysis.core.*Tokenizer
                        org.apache.lucene.analysis.TokenStream tokenStream;
                        //实现:org.apache.lucene.analysis.core.*Filter
                        return null;
            }

            //TokenStreamComponents
            // public TokenStreamComponents(Tokenizer source, TokenStream result) { }
            //  public TokenStreamComponents(Tokenizer source) {}
            //
            //Tokenizer
            //protected Tokenizer(Reader input) { }
            //protected Tokenizer(AttributeSource.AttributeFactory factory, Reader input) { }
            //实现类:org.apache.lucene.analysis.core.*Tokenizer
            //TokenStream
            //protected TokenStream() {}
            //protected TokenStream(AttributeSource input) {}
            //protected TokenStream(AttributeSource.AttributeFactory factory) {}
            //实现类:org.apache.lucene.analysis.NumericTokenStream, .TokenFilter, .Tokenizer
}





版权声明:本文为king_sky_wjb原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。