论坛首页 编程语言技术论坛

文本处理学习笔记2

浏览 3735 次
精华帖 (0) :: 良好帖 (1) :: 新手帖 (0) :: 隐藏帖 (1)
作者 正文
   发表时间:2010-01-29   最后修改:2010-01-29
环境情况如下:
python-2.5.2

Python在文本处理方面很有特色,
用的时候书写自然,编写快速,处理速度也凑和。
下面是准备实现一个抓帖小工具,抓谁呢?
抓天涯吧,人气还行,只看楼主功能还收费。

python代码(CrawlerTianYa.py):
# coding:gbk

import os
import sys
import urllib
import sgmllib

escape_str = """!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\t\r\n"""

class TitleCrawler(sgmllib.SGMLParser):
    def __init__(self, url_str):
        sgmllib.SGMLParser.__init__(self)
        self.url_prefix = url_str.rsplit("/", 1)[0]
        self.url_dict = {}
        self.url_list = []
        self.url_pre = None
    def reset(self):
        sgmllib.SGMLParser.reset(self)
        self.writer_name = ""
        self.writer_reach = False
        self.topic_name = ""
        self.topic_reach = False
        self.vdata = []
    def start_a(self, attrs):
        for k, v in attrs:
            if k != "href":
                continue
            if not self.writer_name and "Listwriter.asp?vwriter=" in v:
                self.writer_reach = True
            if "ArticlesList.asp?stritem=" in v:
                self.topic_reach = True
                self.vdata.append(self.vdata_temp)
            if self.url_prefix in v and not self.url_dict.has_key(v):
                self.url_pre = v
    def start_br(self, attrs):
        if self.topic_reach:
            self.topic_reach = False
            self.topic_name = escapeTxt("".join(self.vdata))
            self.vdata = []
    def handle_data(self, text):
        self.vdata_temp = text
        if self.writer_reach:
            self.writer_reach = False
            self.writer_name = text
        if self.topic_reach:
            self.vdata.append(text)
        if self.url_pre:
            text = text.strip()
            if text.isdigit():
                self.url_dict[self.url_pre] = text
                self.url_list.append((self.url_pre, text))
            self.url_pre = None

class UserCrawler(sgmllib.SGMLParser):
    def __init__(self, writer):
        sgmllib.SGMLParser.__init__(self)
        self.writer_name = writer
        self.is_all = (self.writer_name=="ALL")
    def reset(self):
        sgmllib.SGMLParser.reset(self)
        self.writer_mayreach = False
        self.writer_reach = False
        self.topic_data = []
        self.vdata = []
    def start_a(self, attrs):
        for k, v in attrs:
            if k != "href":
                continue
            if "Listwriter.asp?vid=" in v or "Listwriter.asp?vwriter=" in v:
                self.writer_mayreach = True
    def start_table(self, attrs):
        self.writer_mayreach = False
        if self.writer_reach:
            self.writer_reach = False
            self.topic_data.extend(self.vdata)
            self.vdata = []
    def start_span(self, attrs):
        self.writer_mayreach = False
        if self.writer_reach:
            self.writer_reach = False
            self.topic_data.extend(self.vdata)
            self.vdata = []
    def start_br(self, attrs):
        if self.writer_reach:
            self.vdata.append("\n")
    def handle_data(self, text):
        if self.writer_mayreach:
            self.writer_mayreach = False
            if self.is_all or self.writer_name in text:
                self.writer_reach = True
                self.vdata.append(self.vdata_temp)
        if self.writer_reach:
            self.vdata.append(text)
        self.vdata_temp = text

def recursive(url_str, url_tag, dir_str, topic_name, writer_name, url_dict, url_list):
    # 递归最大页数判断:
    page_count = int(url_dict["page_count"])
    if page_count < 1:
        return
    url_dict[url_str] = url_tag
    url_list.append((url_str, url_tag))
    data_t = readUrl(url_str)
    tc_t = TitleCrawler(url_str)
    tc_t.feed(data_t)
    tc_t.close()
    if not topic_name:
        topic_name = tc_t.topic_name
        url_dict["topic_name"] = topic_name
    if not writer_name:
        writer_name = tc_t.writer_name
        url_dict["writer_name"] = writer_name
        if url_dict.has_key("user_name") and url_dict["user_name"]:
            writer_name = url_dict["user_name"]
            url_dict["writer_name"] = writer_name
    if not dir_str:
        dir_str = "tianya" + os.sep + topic_name
        url_dict["dir_str"] = dir_str
        if not os.path.exists(dir_str):
            print dir_str
            os.makedirs(dir_str)
    uc_t = UserCrawler(writer_name)
    uc_t.feed(data_t)
    uc_t.close()
    o_txt = "".join(uc_t.topic_data)
    o_txt = stripTxt(o_txt)
    file_name = dir_str + os.sep + url_tag + ".txt"
    print file_name
    file_cur = file(file_name, "w")
    file_cur.write(o_txt)
    file_cur.close()
    # 递归最大页数判断:
    page_count = int(url_dict["page_count"])
    page_count = page_count - 1
    url_dict["page_count"] = str(page_count)
    if page_count < 1:
        return
    # 递归取下一个链接
    for k, v in tc_t.url_list:
        if not url_dict.has_key(k):
            recursive(k, v, dir_str, topic_name, writer_name, url_dict, url_list)

def createUrlLog(dir_str, writer_name, url_list):
    result = ["writer_name = %s"%writer_name]
    for kv in url_list:
        result.append("%s = %s"%kv)
    file_name = dir_str + os.sep + "0.log"
    print file_name
    file_log = file(file_name, "w")
    file_log.write("\n".join(result))
    file_log.close()

def readUrl(url_str):
    print url_str
    socket_t = urllib.urlopen(url_str)
    data_t = socket_t.read()
    socket_t.close()
    return data_t

def escapeTxt(text_str):
    result = []
    for i in text_str:
        if i not in escape_str:
            result.append(i)
    return "".join(result)

def stripTxt(text_str):
    text_arr = text_str.splitlines()
    result = []
    for i in text_arr:
        line = i.replace("  ", "  ")
        result.append(line.strip())
    return "\n".join(result)

def usage():
    print "tianya -h"
    print "tianya -c http://www.tianya.cn/publicforum/content/free/1/1491738.shtml"
    print "tianya -a tianya/"
    print "tianya -n 10 -c http://www.tianya.cn/publicforum/content/free/1/1491738.shtml"
    print "tianya -n 10 -a tianya/『天涯杂谈』医行天下"
    print "  Options include:"
    print "    -h [help]   - 打印帮助"
    print "    -c [create] - 从帖子首页开始抓取"
    print "    -a [append] - 更新已经抓取过的帖子"
    print "    -n [number] - 抓取的最大页面数,防止中途断掉,默认32页"
    print "    -u [user]   - 抓取哪个用户的帖子,ALL代表全部,默认为楼主"
    if not os.path.exists("tianya"):
        return
    tianya_dir = os.listdir("tianya")
    if not tianya_dir:
        return
    print "  当前可更新:"
    for i in tianya_dir:
        print "tianya -a tianya/" + i

if __name__ == "__main__":
    if "-h" in sys.argv:
        usage()
    elif "-c" in sys.argv:
        page_count = "8"
        if "-n" in sys.argv:
            page_count = sys.argv[sys.argv.index("-n")+1]
        user_name = ""
        if "-u" in sys.argv:
            user_name = sys.argv[sys.argv.index("-u")+1]
        url_str = sys.argv[sys.argv.index("-c")+1]
        url_dict = {"page_count":page_count, "user_name":user_name}
        url_list = []
        recursive(url_str, "1", None, None, None, url_dict, url_list)
        createUrlLog(url_dict["dir_str"], url_dict["writer_name"], url_list)

    elif "-a" in sys.argv:
        page_count = "32"
        if "-n" in sys.argv:
            page_count = sys.argv[sys.argv.index("-n")+1]
        user_name = ""
        if "-u" in sys.argv:
            user_name = sys.argv[sys.argv.index("-u")+1]
        dir_str = sys.argv[sys.argv.index("-a")+1]
        topic_name = dir_str.split(os.sep)[-1]
        file_name = dir_str + os.sep + "0.log"
        file_log = file(file_name, "r")
        data_t = file_log.readlines()
        file_log.close()
        writer_name = None
        url_str = None
        url_tag = None
        url_dict = {"page_count":str(int(page_count)+1), "user_name":user_name}
        url_list = []

        entry_t = data_t[0].split("=")
        k = entry_t[0].strip()
        v = entry_t[1].strip()
        writer_name = v
        for i in data_t[1:-1]:
            entry_t = i.split("=")
            k = entry_t[0].strip()
            v = entry_t[1].strip()
            url_dict[k] = v
            url_list.append((k, v))
        entry_t = data_t[-1].split("=")
        k = entry_t[0].strip()
        v = entry_t[1].strip()
        url_str = k
        url_tag = v

        recursive(url_str, url_tag, dir_str, topic_name, writer_name, url_dict, url_list)
        createUrlLog(dir_str, writer_name, url_list)

    else:
        usage()



具体使用方法如下:
把文件放入一个目录,如
D:\Spider

命令行进入目录,输入 tianya 回车,
一些常用命令行将打印出来,
例如 一个帖子第一页如下:
http://www.tianya.cn/publicforum/content/free/1/1491738.shtml

抓取命令使用:
tianya -c http://www.tianya.cn/publicforum/content/free/1/1491738.shtml


复杂点的可以用
tianya -c http://www.tianya.cn/publicforum/content/free/1/1491738.shtml -n 3

先抓3页下来看看。

曾经抓过的帖子,再次向后跟进就不需要这么多了,
现在可以再输入 tianya 回车,命令有增加了类似于:
tianya -a tianya/『天涯杂谈』医行天下

现在只要使用这个命令就能继续抓了。
网速不好的,可能会断线报错什么的,不过不用怕,再输入再抓即可。


更特殊的选项:
    -n [number] - 抓取的最大页面数,防止中途断掉,默认32页
    -u [user]   - 抓取哪个用户的帖子,ALL代表全部,默认为楼主
这个-n是为网络状况不同的人而设置的,网络状况好的可以设置大点,网络状况不好就设置小一点。
-u 是为了专门查看某人的回帖而设置,当然不给这个参数默认就是楼主了,要是想把所有人都抓了呢(包括楼主和所有回帖),就这样:
tianya -c http://www.tianya.cn/publicforum/content/free/1/1491738.shtml -u ALL

tianya -a tianya/『天涯杂谈』医行天下 -u ALL


当初看孔二狗的东北往事:黑道风云20年没把我累死,
所以以后用工具,
希望大家在天涯看帖更轻松!(非广告)
论坛首页 编程语言技术版

跳转论坛:
Global site tag (gtag.js) - Google Analytics