- 浏览: 135105 次
- 性别:
- 来自: 福建省莆田市
文章分类
最新评论
-
houruiming:
tks for your info which helps m ...
setcontent和setcontentobject用的是同一片内存 -
turingfellow:
in.tftpd -l -s /home/tmp -u ro ...
commands -
turingfellow:
LINUX下的网络设置 ifconfig ,routeLINU ...
commands -
turingfellow:
安装 linux loopbackyum install um ...
commands
using System;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Standard;
using SF.Snowball.Ext;
using System.Collections.Generic;
using System.Collections;
using OpenNLP.Tools.PosTagger;
namespace Lucene.Net.Analysis.Snowball
{
//词汇的实体类,包括两个属性
public class myEwordEntity
{
public string txtWord;//词的文本
public string stemroot;//被过滤后词的词根
public string posWord;//词的词性
public int token_begin;//在文章中的开始位置
public int token_end;//在文章中的结束位置
public myEwordEntity()
{
txtWord = string.Empty;
posWord = string.Empty;
stemroot = string.Empty;
token_begin = 0;
token_end = 0;
}
}
public class Stemmer
{
private char[] b;
private int i, /* offset into b */
i_end, /* offset to end of stemmed word */
j, k;
private static int INC = 50;
/* unit of size whereby b is increased */
public Stemmer()
{
b = new char[INC];
i = 0;
i_end = 0;
}
/**
* Add a character to the word being stemmed. When you are finished
* adding characters, you can call stem(void) to stem the word.
*/
public void add(char ch)
{
if (i == b.Length)
{
char[] new_b = new char[i + INC];
for (int c = 0; c < i; c++)
new_b[c] = b[c];
b = new_b;
}
b[i++] = ch;
}
/** Adds wLen characters to the word being stemmed contained in a portion
* of a char[] array. This is like repeated calls of add(char ch), but
* faster.
*/
public void add(char[] w, int wLen)
{
if (i + wLen >= b.Length)
{
char[] new_b = new char[i + wLen + INC];
for (int c = 0; c < i; c++)
new_b[c] = b[c];
b = new_b;
}
for (int c = 0; c < wLen; c++)
b[i++] = w[c];
}
/**
* After a word has been stemmed, it can be retrieved by toString(),
* or a reference to the internal buffer can be retrieved by getResultBuffer
* and getResultLength (which is generally more efficient.)
*/
public string stemerToString()
{
return new String(b, 0, i_end);
}
/**
* Returns the length of the word resulting from the stemming process.
*/
public int getResultLength()
{
return i_end;
}
/**
* Returns a reference to a character buffer containing the results of
* the stemming process. You also need to consult getResultLength()
* to determine the length of the result.
*/
public char[] getResultBuffer()
{
return b;
}
/* cons(i) is true <=> b[i] is a consonant. */
private bool cons(int i)
{
switch (b[i])
{
case 'a':
case 'e':
case 'i':
case 'o':
case 'u': return false;
case 'y': return (i == 0) ? true : !cons(i - 1);
default: return true;
}
}
/* m() measures the number of consonant sequences between 0 and j. if c is
a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
*/
private int m()
{
int n = 0;
int i = 0;
while (true)
{
if (i > j) return n;
if (!cons(i)) break; i++;
}
i++;
while (true)
{
while (true)
{
if (i > j) return n;
if (cons(i)) break;
i++;
}
i++;
n++;
while (true)
{
if (i > j) return n;
if (!cons(i)) break;
i++;
}
i++;
}
}
/* vowelinstem() is true <=> 0,...j contains a vowel */
private bool vowelinstem()
{
int i;
for (i = 0; i <= j; i++)
if (!cons(i))
return true;
return false;
}
/* doublec(j) is true <=> j,(j-1) contain a double consonant. */
private bool doublec(int j)
{
if (j < 1)
return false;
if (b[j] != b[j - 1])
return false;
return cons(j);
}
/* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short word. e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
*/
private bool cvc(int i)
{
if (i < 2 || !cons(i) || cons(i - 1) || !cons(i - 2))
return false;
int ch = b[i];
if (ch == 'w' || ch == 'x' || ch == 'y')
return false;
return true;
}
private bool ends(String s)
{
int l = s.Length;
int o = k - l + 1;
if (o < 0)
return false;
char[] sc = s.ToCharArray();
for (int i = 0; i < l; i++)
if (b[o + i] != sc[i])
return false;
j = k - l;
return true;
}
/* setto(s) sets (j+1),...k to the characters in the string s, readjusting
k. */
private void setto(String s)
{
int l = s.Length;
int o = j + 1;
char[] sc = s.ToCharArray();
for (int i = 0; i < l; i++)
b[o + i] = sc[i];
k = j + l;
}
/* r(s) is used further down. */
private void r(String s)
{
if (m() > 0)
setto(s);
}
/* step1() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
*/
private void step1()
{
if (b[k] == 's')
{
if (ends("sses"))
k -= 2;
else if (ends("ies"))
setto("i");
else if (b[k - 1] != 's')
k--;
}
if (ends("eed"))
{
if (m() > 0)
k--;
}
else if ((ends("ed") || ends("ing")) && vowelinstem())
{
k = j;
if (ends("at"))
setto("ate");
else if (ends("bl"))
setto("ble");
else if (ends("iz"))
setto("ize");
else if (doublec(k))
{
k--;
int ch = b[k];
if (ch == 'l' || ch == 's' || ch == 'z')
k++;
}
else if (m() == 1 && cvc(k)) setto("e");
}
}
/* step2() turns terminal y to i when there is another vowel in the stem. */
private void step2()
{
if (ends("y") && vowelinstem())
b[k] = 'i';
}
/* step3() maps double suffices to single ones. so -ization ( = -ize plus
-ation) maps to -ize etc. note that the string before the suffix must give
m() > 0. */
private void step3()
{
if (k == 0)
return;
/* For Bug 1 */
switch (b[k - 1])
{
case 'a':
if (ends("ational")) { r("ate"); break; }
if (ends("tional")) { r("tion"); break; }
break;
case 'c':
if (ends("enci")) { r("ence"); break; }
if (ends("anci")) { r("ance"); break; }
break;
case 'e':
if (ends("izer")) { r("ize"); break; }
break;
case 'l':
if (ends("bli")) { r("ble"); break; }
if (ends("alli")) { r("al"); break; }
if (ends("entli")) { r("ent"); break; }
if (ends("eli")) { r("e"); break; }
if (ends("ousli")) { r("ous"); break; }
break;
case 'o':
if (ends("ization")) { r("ize"); break; }
if (ends("ation")) { r("ate"); break; }
if (ends("ator")) { r("ate"); break; }
break;
case 's':
if (ends("alism")) { r("al"); break; }
if (ends("iveness")) { r("ive"); break; }
if (ends("fulness")) { r("ful"); break; }
if (ends("ousness")) { r("ous"); break; }
break;
case 't':
if (ends("aliti")) { r("al"); break; }
if (ends("iviti")) { r("ive"); break; }
if (ends("biliti")) { r("ble"); break; }
break;
case 'g':
if (ends("logi")) { r("log"); break; }
break;
default:
break;
}
}
/* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
private void step4()
{
switch (b[k])
{
case 'e':
if (ends("icate")) { r("ic"); break; }
if (ends("ative")) { r(""); break; }
if (ends("alize")) { r("al"); break; }
break;
case 'i':
if (ends("iciti")) { r("ic"); break; }
break;
case 'l':
if (ends("ical")) { r("ic"); break; }
if (ends("ful")) { r(""); break; }
break;
case 's':
if (ends("ness")) { r(""); break; }
break;
}
}
/* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
private void step5()
{
if (k == 0)
return;
/* for Bug 1 */
switch (b[k - 1])
{
case 'a':
if (ends("al")) break; return;
case 'c':
if (ends("ance")) break;
if (ends("ence")) break; return;
case 'e':
if (ends("er")) break; return;
case 'i':
if (ends("ic")) break; return;
case 'l':
if (ends("able")) break;
if (ends("ible")) break; return;
case 'n':
if (ends("ant")) break;
if (ends("ement")) break;
if (ends("ment")) break;
/* element etc. not stripped before the m */
if (ends("ent")) break; return;
case 'o':
if (ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't')) break;
/* j >= 0 fixes Bug 2 */
if (ends("ou")) break; return;
/* takes care of -ous */
case 's':
if (ends("ism")) break; return;
case 't':
if (ends("ate")) break;
if (ends("iti")) break; return;
case 'u':
if (ends("ous")) break; return;
case 'v':
if (ends("ive")) break; return;
case 'z':
if (ends("ize")) break; return;
default:
return;
}
if (m() > 1)
k = j;
}
/* step6() removes a final -e if m() > 1. */
private void step6()
{
j = k;
if (b[k] == 'e')
{
int a = m();
if (a > 1 || a == 1 && !cvc(k - 1))
k--;
}
if (b[k] == 'l' && doublec(k) && m() > 1)
k--;
}
/** Stem the word placed into the Stemmer buffer through calls to add().
* Returns true if the stemming process resulted in a word different
* from the input. You can retrieve the result with
* getResultLength()/getResultBuffer() or toString().
*/
public void stem()
{
k = i - 1;
if (k > 1)
{
step1();
step2();
step3();
step4();
step5();
step6();
}
i_end = k + 1;
i = 0;
}
}
/// <summary>Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
/// LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}.
///
/// Available stemmers are listed in {@link SF.Snowball.Ext}. The name of a
/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
/// {@link EnglishStemmer} is named "English".
/// </summary>
public class SnowballAnalyzer : Analyzer
{
private System.String name;
private System.Collections.Hashtable stopSet;//停用词表
private string mModelPath; //词性标注软件模型所在位置
/// <summary>Builds the named analyzer with no stop words. </summary>
public SnowballAnalyzer(System.String name)
{
//获得词性标注软件模型所在位置。模型文件一般放在本工程下面
mModelPath = System.IO.Path.GetDirectoryName(
System.Reflection.Assembly.GetExecutingAssembly().GetName().CodeBase);
mModelPath = new System.Uri(mModelPath).LocalPath + @"\Models\";
this.name = name;
}
/// <summary>Builds the named analyzer with the given stop words. </summary>
public SnowballAnalyzer(System.String name, System.String[] stopWords)
: this(name)
{
stopSet = StopFilter.MakeStopSet(stopWords);
}
/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
/// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}.
/// </summary>
public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
{
TokenStream result = new StandardTokenizer(reader);
result = new StandardFilter(result);
result = new LowerCaseFilter(result);
if (stopSet != null)
result = new StopFilter(result, stopSet);
//从result NokenStream 分离出Token 来,确定词性。
// result = new SnowballFilter(result, name);
return result;
}
/// <summary>
/// 根据Snowball源码自己写的 Analyzer,返回词汇实体列表代替原来调用的TokenStream
///
/// </summary>
/// <param name="fieldName"></param>
/// <param name="reader"></param>
/// <returns></returns>
public List<myEwordEntity> TokenStreamToEntityList(System.String fieldName, System.IO.TextReader reader)
{
TokenStream result = TokenStream(fieldName, reader);
// TokenStream result2 = TokenStream(fieldName, reader);
List<myEwordEntity> wordEnList = new List<myEwordEntity>();
while (true)
{
Token token = result.Next();
myEwordEntity entity = new myEwordEntity();
if (token == null)
break;
else
{
entity.token_begin = token.StartOffset();
entity.token_end = token.EndOffset();
entity.txtWord = token.TermText();//获得词汇文本
entity.stemroot = AfterStemed(entity.txtWord);
wordEnList.Add(entity);
}
}
ArrayList myposlist = new ArrayList();
foreach (myEwordEntity entity in wordEnList)
{
myposlist.Add(entity.txtWord);
}
EnglishMaximumEntropyPosTagger mTager = new EnglishMaximumEntropyPosTagger(mModelPath + "EnglishPOS.nbin", mModelPath + @"\Parser\tagdict");
myposlist = mTager.Tag(myposlist);
for (int i = 0; i < myposlist.Count; i++)
{
wordEnList[i].posWord = myposlist[i].ToString();
}
//对每个词汇进行词根还原
/* result2 = new SnowballFilter(result2, name);
int k=0;//工作下标
while(true)
{
Token token=result2.Next();
if (token == null)
break;
else
{
wordEnList[k].stemroot= token.TermText();
k++;
}
}*/
return wordEnList;
}
public string AfterStemed(string input)
{
Stemmer s = new Stemmer();
input = input.ToLower();
char[] inputchar = input.ToCharArray();
s.add(inputchar, inputchar.Length);
s.stem();
string u = s.stemerToString();
return u;
}
}
}
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Standard;
using SF.Snowball.Ext;
using System.Collections.Generic;
using System.Collections;
using OpenNLP.Tools.PosTagger;
namespace Lucene.Net.Analysis.Snowball
{
//词汇的实体类,包括两个属性
public class myEwordEntity
{
public string txtWord;//词的文本
public string stemroot;//被过滤后词的词根
public string posWord;//词的词性
public int token_begin;//在文章中的开始位置
public int token_end;//在文章中的结束位置
public myEwordEntity()
{
txtWord = string.Empty;
posWord = string.Empty;
stemroot = string.Empty;
token_begin = 0;
token_end = 0;
}
}
public class Stemmer
{
private char[] b;
private int i, /* offset into b */
i_end, /* offset to end of stemmed word */
j, k;
private static int INC = 50;
/* unit of size whereby b is increased */
public Stemmer()
{
b = new char[INC];
i = 0;
i_end = 0;
}
/**
* Add a character to the word being stemmed. When you are finished
* adding characters, you can call stem(void) to stem the word.
*/
public void add(char ch)
{
if (i == b.Length)
{
char[] new_b = new char[i + INC];
for (int c = 0; c < i; c++)
new_b[c] = b[c];
b = new_b;
}
b[i++] = ch;
}
/** Adds wLen characters to the word being stemmed contained in a portion
* of a char[] array. This is like repeated calls of add(char ch), but
* faster.
*/
public void add(char[] w, int wLen)
{
if (i + wLen >= b.Length)
{
char[] new_b = new char[i + wLen + INC];
for (int c = 0; c < i; c++)
new_b[c] = b[c];
b = new_b;
}
for (int c = 0; c < wLen; c++)
b[i++] = w[c];
}
/**
* After a word has been stemmed, it can be retrieved by toString(),
* or a reference to the internal buffer can be retrieved by getResultBuffer
* and getResultLength (which is generally more efficient.)
*/
public string stemerToString()
{
return new String(b, 0, i_end);
}
/**
* Returns the length of the word resulting from the stemming process.
*/
public int getResultLength()
{
return i_end;
}
/**
* Returns a reference to a character buffer containing the results of
* the stemming process. You also need to consult getResultLength()
* to determine the length of the result.
*/
public char[] getResultBuffer()
{
return b;
}
/* cons(i) is true <=> b[i] is a consonant. */
private bool cons(int i)
{
switch (b[i])
{
case 'a':
case 'e':
case 'i':
case 'o':
case 'u': return false;
case 'y': return (i == 0) ? true : !cons(i - 1);
default: return true;
}
}
/* m() measures the number of consonant sequences between 0 and j. if c is
a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
*/
private int m()
{
int n = 0;
int i = 0;
while (true)
{
if (i > j) return n;
if (!cons(i)) break; i++;
}
i++;
while (true)
{
while (true)
{
if (i > j) return n;
if (cons(i)) break;
i++;
}
i++;
n++;
while (true)
{
if (i > j) return n;
if (!cons(i)) break;
i++;
}
i++;
}
}
/* vowelinstem() is true <=> 0,...j contains a vowel */
private bool vowelinstem()
{
int i;
for (i = 0; i <= j; i++)
if (!cons(i))
return true;
return false;
}
/* doublec(j) is true <=> j,(j-1) contain a double consonant. */
private bool doublec(int j)
{
if (j < 1)
return false;
if (b[j] != b[j - 1])
return false;
return cons(j);
}
/* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short word. e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
*/
private bool cvc(int i)
{
if (i < 2 || !cons(i) || cons(i - 1) || !cons(i - 2))
return false;
int ch = b[i];
if (ch == 'w' || ch == 'x' || ch == 'y')
return false;
return true;
}
private bool ends(String s)
{
int l = s.Length;
int o = k - l + 1;
if (o < 0)
return false;
char[] sc = s.ToCharArray();
for (int i = 0; i < l; i++)
if (b[o + i] != sc[i])
return false;
j = k - l;
return true;
}
/* setto(s) sets (j+1),...k to the characters in the string s, readjusting
k. */
private void setto(String s)
{
int l = s.Length;
int o = j + 1;
char[] sc = s.ToCharArray();
for (int i = 0; i < l; i++)
b[o + i] = sc[i];
k = j + l;
}
/* r(s) is used further down. */
private void r(String s)
{
if (m() > 0)
setto(s);
}
/* step1() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
*/
private void step1()
{
if (b[k] == 's')
{
if (ends("sses"))
k -= 2;
else if (ends("ies"))
setto("i");
else if (b[k - 1] != 's')
k--;
}
if (ends("eed"))
{
if (m() > 0)
k--;
}
else if ((ends("ed") || ends("ing")) && vowelinstem())
{
k = j;
if (ends("at"))
setto("ate");
else if (ends("bl"))
setto("ble");
else if (ends("iz"))
setto("ize");
else if (doublec(k))
{
k--;
int ch = b[k];
if (ch == 'l' || ch == 's' || ch == 'z')
k++;
}
else if (m() == 1 && cvc(k)) setto("e");
}
}
/* step2() turns terminal y to i when there is another vowel in the stem. */
private void step2()
{
if (ends("y") && vowelinstem())
b[k] = 'i';
}
/* step3() maps double suffices to single ones. so -ization ( = -ize plus
-ation) maps to -ize etc. note that the string before the suffix must give
m() > 0. */
private void step3()
{
if (k == 0)
return;
/* For Bug 1 */
switch (b[k - 1])
{
case 'a':
if (ends("ational")) { r("ate"); break; }
if (ends("tional")) { r("tion"); break; }
break;
case 'c':
if (ends("enci")) { r("ence"); break; }
if (ends("anci")) { r("ance"); break; }
break;
case 'e':
if (ends("izer")) { r("ize"); break; }
break;
case 'l':
if (ends("bli")) { r("ble"); break; }
if (ends("alli")) { r("al"); break; }
if (ends("entli")) { r("ent"); break; }
if (ends("eli")) { r("e"); break; }
if (ends("ousli")) { r("ous"); break; }
break;
case 'o':
if (ends("ization")) { r("ize"); break; }
if (ends("ation")) { r("ate"); break; }
if (ends("ator")) { r("ate"); break; }
break;
case 's':
if (ends("alism")) { r("al"); break; }
if (ends("iveness")) { r("ive"); break; }
if (ends("fulness")) { r("ful"); break; }
if (ends("ousness")) { r("ous"); break; }
break;
case 't':
if (ends("aliti")) { r("al"); break; }
if (ends("iviti")) { r("ive"); break; }
if (ends("biliti")) { r("ble"); break; }
break;
case 'g':
if (ends("logi")) { r("log"); break; }
break;
default:
break;
}
}
/* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
private void step4()
{
switch (b[k])
{
case 'e':
if (ends("icate")) { r("ic"); break; }
if (ends("ative")) { r(""); break; }
if (ends("alize")) { r("al"); break; }
break;
case 'i':
if (ends("iciti")) { r("ic"); break; }
break;
case 'l':
if (ends("ical")) { r("ic"); break; }
if (ends("ful")) { r(""); break; }
break;
case 's':
if (ends("ness")) { r(""); break; }
break;
}
}
/* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
private void step5()
{
if (k == 0)
return;
/* for Bug 1 */
switch (b[k - 1])
{
case 'a':
if (ends("al")) break; return;
case 'c':
if (ends("ance")) break;
if (ends("ence")) break; return;
case 'e':
if (ends("er")) break; return;
case 'i':
if (ends("ic")) break; return;
case 'l':
if (ends("able")) break;
if (ends("ible")) break; return;
case 'n':
if (ends("ant")) break;
if (ends("ement")) break;
if (ends("ment")) break;
/* element etc. not stripped before the m */
if (ends("ent")) break; return;
case 'o':
if (ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't')) break;
/* j >= 0 fixes Bug 2 */
if (ends("ou")) break; return;
/* takes care of -ous */
case 's':
if (ends("ism")) break; return;
case 't':
if (ends("ate")) break;
if (ends("iti")) break; return;
case 'u':
if (ends("ous")) break; return;
case 'v':
if (ends("ive")) break; return;
case 'z':
if (ends("ize")) break; return;
default:
return;
}
if (m() > 1)
k = j;
}
/* step6() removes a final -e if m() > 1. */
private void step6()
{
j = k;
if (b[k] == 'e')
{
int a = m();
if (a > 1 || a == 1 && !cvc(k - 1))
k--;
}
if (b[k] == 'l' && doublec(k) && m() > 1)
k--;
}
/** Stem the word placed into the Stemmer buffer through calls to add().
* Returns true if the stemming process resulted in a word different
* from the input. You can retrieve the result with
* getResultLength()/getResultBuffer() or toString().
*/
public void stem()
{
k = i - 1;
if (k > 1)
{
step1();
step2();
step3();
step4();
step5();
step6();
}
i_end = k + 1;
i = 0;
}
}
/// <summary>Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
/// LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}.
///
/// Available stemmers are listed in {@link SF.Snowball.Ext}. The name of a
/// stemmer is the part of the class name before "Stemmer", e.g., the stemmer in
/// {@link EnglishStemmer} is named "English".
/// </summary>
public class SnowballAnalyzer : Analyzer
{
private System.String name;
private System.Collections.Hashtable stopSet;//停用词表
private string mModelPath; //词性标注软件模型所在位置
/// <summary>Builds the named analyzer with no stop words. </summary>
public SnowballAnalyzer(System.String name)
{
//获得词性标注软件模型所在位置。模型文件一般放在本工程下面
mModelPath = System.IO.Path.GetDirectoryName(
System.Reflection.Assembly.GetExecutingAssembly().GetName().CodeBase);
mModelPath = new System.Uri(mModelPath).LocalPath + @"\Models\";
this.name = name;
}
/// <summary>Builds the named analyzer with the given stop words. </summary>
public SnowballAnalyzer(System.String name, System.String[] stopWords)
: this(name)
{
stopSet = StopFilter.MakeStopSet(stopWords);
}
/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
/// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}.
/// </summary>
public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
{
TokenStream result = new StandardTokenizer(reader);
result = new StandardFilter(result);
result = new LowerCaseFilter(result);
if (stopSet != null)
result = new StopFilter(result, stopSet);
//从result NokenStream 分离出Token 来,确定词性。
// result = new SnowballFilter(result, name);
return result;
}
/// <summary>
/// 根据Snowball源码自己写的 Analyzer,返回词汇实体列表代替原来调用的TokenStream
///
/// </summary>
/// <param name="fieldName"></param>
/// <param name="reader"></param>
/// <returns></returns>
public List<myEwordEntity> TokenStreamToEntityList(System.String fieldName, System.IO.TextReader reader)
{
TokenStream result = TokenStream(fieldName, reader);
// TokenStream result2 = TokenStream(fieldName, reader);
List<myEwordEntity> wordEnList = new List<myEwordEntity>();
while (true)
{
Token token = result.Next();
myEwordEntity entity = new myEwordEntity();
if (token == null)
break;
else
{
entity.token_begin = token.StartOffset();
entity.token_end = token.EndOffset();
entity.txtWord = token.TermText();//获得词汇文本
entity.stemroot = AfterStemed(entity.txtWord);
wordEnList.Add(entity);
}
}
ArrayList myposlist = new ArrayList();
foreach (myEwordEntity entity in wordEnList)
{
myposlist.Add(entity.txtWord);
}
EnglishMaximumEntropyPosTagger mTager = new EnglishMaximumEntropyPosTagger(mModelPath + "EnglishPOS.nbin", mModelPath + @"\Parser\tagdict");
myposlist = mTager.Tag(myposlist);
for (int i = 0; i < myposlist.Count; i++)
{
wordEnList[i].posWord = myposlist[i].ToString();
}
//对每个词汇进行词根还原
/* result2 = new SnowballFilter(result2, name);
int k=0;//工作下标
while(true)
{
Token token=result2.Next();
if (token == null)
break;
else
{
wordEnList[k].stemroot= token.TermText();
k++;
}
}*/
return wordEnList;
}
public string AfterStemed(string input)
{
Stemmer s = new Stemmer();
input = input.ToLower();
char[] inputchar = input.ToCharArray();
s.add(inputchar, inputchar.Length);
s.stem();
string u = s.stemerToString();
return u;
}
}
}
发表评论
-
protocols
2011-04-03 19:22 924<!-- The protocols capabilit ... -
dfcap
2011-04-03 19:15 875<!-- The df capability has a ... -
booktrading /seller
2011-03-29 23:19 927<html><head><tit ... -
booktrading / manager
2011-03-29 23:18 1087<html><head><tit ... -
booktrading / common
2011-03-29 23:17 986<html><head><tit ... -
booktrading / buyer
2011-03-29 23:13 844<!-- <H3>The buyer age ... -
tomcat的context说明书
2011-03-20 17:39 803http://tomcat.apache.org/tomcat ... -
msyql的select语法
2010-09-13 22:52 107513.2.7. SELECT语法 13.2.7.1. ... -
zotero与word集成
2010-09-11 08:50 1766Manually Installing the Zotero ... -
university 2/n
2010-08-24 07:54 897Chapter 1.Introduction of regis ... -
university 1/n
2010-08-24 07:53 939chapter? Introduction ?.?The st ... -
Sun Java Bugs that affect lucene
2010-08-23 08:59 735Sometimes Lucene runs amok of b ... -
penn tree bank 6/6
2010-08-20 07:09 91811 This use of 12 Contact the - ... -
penn tree bank 5/n
2010-08-19 07:40 921always errs on the side of caut ... -
penn tree bank 4/n
2010-08-19 07:39 8174. Bracketing 4.1 Basic Methodo ... -
penn tree bank 3/n
2010-08-15 23:31 8182.3.1 Automated Stage. During t ... -
penn tree bank 2/n
2010-08-15 23:30 1504Mitchell P Marcus et al. Buildi ... -
capabilities 3/3
2010-08-11 22:58 77401<capability xmlns="ht ... -
capabilities 2/3
2010-08-11 22:57 737Fig.3.Element creation cases:a) ... -
capabilities 1/3
2010-08-11 22:56 947Extending the Capability Concep ...
相关推荐
3. 分词器升级:新的分词器如IK(智能中文分词器)和 Snowball 分词器,为不同语言和场景提供了更好的文本分析能力。 二、稳定性与可靠性 1. 数据恢复:5.2版改进了数据恢复机制,当节点重新加入集群或发生故障后,...
PHP分词工具有很多,如PHP_Pinyin(用于中文分词及拼音转换)、Sphinx(一个高性能的全文搜索引擎)、Snowball(一种词干提取算法)等。这些工具通常基于词典或者统计模型,如TF-IDF、BM25等,通过比较词汇的频率和...
1. **Snowball Porter Stemmer插件**:应用Snowball算法进行词干提取,减少词汇形态变化对搜索结果的影响。 2. **ICU分词器插件**:提供更高级的Unicode语言处理,包括复杂的脚本拆分和语言特定的词形还原。 3. **...
3. **Snowball**:虽然主要是用于英文词干提取,但通过结合第三方中文词库,也可以实现简单的中文分词。 4. **PHPCWS**:基于PSCWS的PHP5.3版本以上版本的分词库,增加了对多字词的支持。 5. **THULAC**:由北京...
3. Snowball Analyzer:基于Lucene的分词器,支持多国语言,但对中文支持有限,需配合其他中文分词库使用。 4. PKUAnnotator:北京大学开发的中文分词系统,具有较高的准确率和召回率。 五、选择与优化 不同的分词...
5. SnowballFilter:基于Snowball算法进行词干提取。 根据实际需求,开发者可以自定义分词器和过滤器,或者组合使用现有的组件,构建自己的分析链。 三、Solr分词项目的实施 在实际项目中,Solr的分词流程通常...
词典的大小和质量直接影响分词效果,Snowball算法用于生成词典,能够快速扩展到多种语言。 3. **HMM(隐马尔可夫模型)算法**:jieba分词运用了HMM,这是一种统计学方法,通过观察序列来推断隐藏状态。HMM可以学习...
在NLP研究和开发中,有一些知名的开源分词和词性标注工具,例如哈工大的ICTCLAS、北京大学的PKU Snowball、 Stanford NLP工具包以及NLTK(自然语言工具包)等。这些工具不仅提供了基本的分词和词性标注功能,还可能...
本文将详细介绍一款由个人开发者基于CSWS(Chinese Snowball Word Segmentation,中文雪球分词算法)开发的分词软件及词频分析工具,帮助读者理解和掌握相关知识。 首先,我们来了解一下“分词”。分词是自然语言...
3. Snowball.js:一个基于词典的轻量级分词库,适用于简单的分词需求。 4. THULAC.js:清华大学开发的词性标注和分词工具的JavaScript版本,适合学术和专业文本的处理。 三、JS分词的实际应用 1. 搜索引擎优化:...
默认的分词器是`standard`,但可以根据需求定制或选择其他预定义的分词器,如`simple`, `whitespace`, `keyword`, `stop`, `snowball`等。 1. **自定义分词器**:通过定义自己的分词规则,可以实现特定语言或业务...
这个过程通常需要词干提取算法,如Snowball Stemmer或Lancaster Stemmer,但中文处理中,由于词形变化较少,词干提取可能相对简单,更多的是通过规则来实现。 **去停用词** 是指移除文本中无实际含义或者对主题理解...
Java中,我们可以使用Lancaster Stemmer、Porter Stemmer或者Snowball Stemmer等库来实现。其中,Porter Stemmer是最广泛使用的算法,它通过一系列规则将单词转换为其“词根”形式。例如,“running”会被转换为...
分析器是处理文本的关键组件,它负责将输入的文本分割成可索引的词项,通常涉及分词、去除停用词、词形还原等步骤。5.5.0版本的分析器支持多种语言,包括英语、中文、法语等,并提供了一些预定义的分析器,如...
《深入理解libstemmer_java:Linux环境下的分词利器》 在信息技术的海洋中,文本处理扮演着至关重要的角色,而分词作为其中的基础环节,是理解和分析文本内容的关键步骤。libstemmer_java是一个专为Java开发的高效...
- **分词库**:如 Snowball 或 IK 分词库,提供对中文词汇的切分,以满足搜索需求。 **4. Lucene 应用步骤** 1. **创建索引**:创建一个 `Directory` 对象,如 `FSDirectory`,表示索引存储的位置。然后使用 `...
- **SnowballC**:基于Snowball词干算法,适用于多种语言的词干提取。 - **quanteda**:一个全面的文本分析框架,支持复杂的数据类型和分析方法。 - **wordnet**:提供了一个英语词汇数据库的接口,用于词义和...
若没有现成的库,需要自己实现这些规则,否则可以寻找第三方库如Snowball C++库。 5. **输出结果**:将提取后的词干打印到屏幕。可以使用`std::cout`进行输出。 在实现过程中,需要注意以下几点: - 处理边界情况,...
3. 词干提取与词形还原:使用如NLTK库的Porter Stemmer或Snowball Stemmer来减少词汇的不同形态,转化为基本形式。 4. TF-IDF表示:将文本转换为数值向量,TF-IDF(Term Frequency-Inverse Document Frequency)是...
我们将主要关注以下几点:数据读取、文本清洗、分词、词干提取与词形还原、去除停用词、TF-IDF向量化以及词袋模型。 首先,Python提供了多个库来处理文本数据,如`nltk`(Natural Language Toolkit)和`spaCy`。在`...