最近在研究lucene,主要做ftp搜索和网页的站内搜索。
ftp搜索比较好做,主流的FTP有两种一种是IIS的一种是Server-U的.
真对这两种FTP分别进行分析就可以得到FTP资源的文件名和路径及大小和日期
然后对其进行索引就可以了,比较简单。
网页检索可不像ftp那样了,我试着用lucene自带的htmlparser,解析纯英文的网页
没有问题,可解析中文的网页时有时会遇到编码问题。郁闷。。。
SourceForge搜到了一个开源的HTMLParser。网址是http://htmlparser.sourceforge.net
目前的版本为1.6。
测试代码为:
import java.io.*;
import org.htmlparser.filters.*;
import org.htmlparser.*;
import org.htmlparser.nodes.*;
import org.htmlparser.tags.*;
import org.htmlparser.util.*;
import org.htmlparser.visitors.*;
public class HTMLParserTest
{
public static void main(String args[]) throws Exception
{
String path = "D://Webdup//MyWebsites//biti//download//latest//cisco.biti.edu.cn//index.html";
StringBuffer sbStr = new StringBuffer();
BufferedReader reader = new BufferedReader(new FileReader(new File(path)));
String temp = "";
while((temp=reader.readLine())!=null)
{
sbStr.append(temp);
sbStr.append("/r/n");
}
reader.close();
String result = sbStr.toString();
readAll(result);
readTextAndLink(result);
readByHtml(result);
readTextAndTitle(result);
}
//按页面方式处理.解析标准的html页面
public static void readByHtml(String content) throws Exception
{
Parser myParser;
myParser = Parser.createParser(content, "GB2312");
HtmlPage visitor = new HtmlPage(myParser);
myParser.visitAllNodesWith(visitor);
String textInPage = visitor.getTitle();
System.out.println(textInPage);
NodeList nodelist ;
nodelist = visitor.getBody();
System.out.print(nodelist.asString().trim());
}
//读取文本内容和标题
public static void readTextAndTitle(String result) throws Exception
{
Parser parser ;
NodeList nodelist ;
parser = Parser.createParser(result,"GB2312");
NodeFilter textFilter = new NodeClassFilter(TextNode.class);
NodeFilter titleFilter = new NodeClassFilter(TitleTag.class);
OrFilter lastFilter = new OrFilter();
lastFilter.setPredicates(new NodeFilter[]{textFilter,titleFilter});
nodelist = parser.parse(lastFilter);
Node[] nodes = nodelist.toNodeArray();
String line ="";
for(int i=0;i<nodes.length;i++)
{
Node node = nodes[i];
if(node instanceof TextNode)
{
TextNode textnode = (TextNode) node;
line = textnode.getText();
}
else
if(node instanceof TitleTag)
{
TitleTag titlenode = (TitleTag) node;
line = titlenode.getTitle();
}
if (isTrimEmpty(line))
continue;
System.out.println(line);
}
}
//分别读纯文本和链接
public static void readTextAndLink(String result) throws Exception
{
Parser parser;
NodeList nodelist;
parser = Parser.createParser(result,"GB2312");
NodeFilter textFilter = new NodeClassFilter(TextNode.class);
NodeFilter linkFilter = new NodeClassFilter(LinkTag.class);
OrFilter lastFilter = new OrFilter();
lastFilter.setPredicates(new NodeFilter[] { textFilter, linkFilter });
nodelist = parser.parse(lastFilter);
Node[] nodes = nodelist.toNodeArray();
String line ="";
for(int i=0;i<nodes.length;i++)
{
Node node = nodes[i];
if(node instanceof TextNode)
{
TextNode textnode = (TextNode) node;
line = textnode.getText();
}
else
if(node instanceof LinkTag)
{
LinkTag link = (LinkTag)node;
line = link.getLink();
}
if (isTrimEmpty(line))
continue;
System.out.println(line);
}
}
public static void readAll(String result) throws Exception
{
Parser parser;
Node[] nodes ;
parser = Parser.createParser(result,"GB2312");
nodes = parser.extractAllNodesThatAre(TextNode.class);
//读取所有的内容节点
for (int i = 0; i < nodes.length; i++)
{
TextNode textnode = (TextNode) nodes[i];
String line = textnode.toPlainTextString().trim();
if (line.equals(""))
continue;
System.out.println(line);
}
}
/**
* 去掉左右空格后字符串是否为空
*/
public static boolean isTrimEmpty(String astr)
{
if ((null == astr) || (astr.length() == 0))
{
return true;
}
if (isBlank(astr.trim()))
{
return true;
}
return false;
}
/**
* 字符串是否为空:null或者长度为0.
*/
public static boolean isBlank(String astr)
{
if ((null == astr) || (astr.length() == 0))
{
return true;
}
else
{
return false;
}
}
}