python 简单爬虫,爬100个百度百科页面的标题和描述

网友投稿 960 2022-11-10

python 简单爬虫,爬100个百度百科页面的标题和描述

python 简单爬虫,爬100个百度百科页面的标题和描述

spider_main.py

# coding:utf8from baike import url_managerfrom baike import html_downloaderfrom baike import html_outputerfrom baike import html_parserclass SpiderMain(object): def __init__(self): self.urls = url_manager.UrlManager() self.downloader = html_downloader.HtmlDownloader() self.parser = html_parser.HtmlParser() self.outputer = html_outputer.HtmlOutputer() def craw(self, root_url): count = 1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() print 'craw %d : %s' %(count, new_url) html_cont = self.downloader.download(new_url) new_urls, new_data = self.parser.parse(new_url, html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count == 100: break; count = count + 1 except: print 'craw failed' self.outputer.output_html()if __name__ == "__main__": root_url = " obj_spider = SpiderMain() obj_spider.craw(root_url)

html_downloader.py

# coding:utf8import urllib2class HtmlDownloader(object): def download(self, url): if url is None: return None response = urllib2.urlopen(url) if response.getcode() != 200: return None return response.read()

html_outputer.py

# coding:utf8class HtmlOutputer(object): def __init__(self): self.datas = [] def collect_data(self, data): if data is None: return self.datas.append(data) def output_html(self): fout = open('output.html', 'w') fout.write("") fout.write("") fout.write("

") # ascii for data in self.datas: fout.write("") fout.write("" % data['url'].encode('utf-8')) fout.write("" % data['title'].encode('utf-8')) fout.write("" % data['summary'].encode('utf-8')) fout.write("") fout.write("
%s%s%s
") fout.write("") fout.write("") fout.close()

html_parser.py

# coding:utf8import urlparsefrom bs4 import BeautifulSoupimport reclass HtmlParser(object): def _get_new_urls(self, page_url, soup): new_urls = set() links = soup.find_all('a', href=re.compile(r"/view/\d+\.htm")) for link in links: new_url = link['href'] new_full_url = urlparse.urljoin(page_url, new_url) new_urls.add(new_full_url) return new_urls def _get_new_data(self, page_url, soup): res_data = {} res_data['url'] = page_url title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find('h1') res_data['title'] = title_node.get_text() summary_node = soup.find('div', class_="lemma-summary") res_data['summary'] = summary_node.get_text() return res_data def parse(self, page_url, html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8') new_urls = self._get_new_urls(page_url, soup) new_data = self._get_new_data(page_url, soup) return new_urls, new_data

url_manager.py

# coding:utf8class UrlManager(object): def __init__(self): self.new_urls = set() self.old_urls = set() def add_new_url(self, url): if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self, urls): if urls is None or len(urls) == 0: return for url in urls: self.add_new_url(url) def has_new_url(self): return len(self.new_urls) != 0 def get_new_url(self): new_url = self.new_urls.pop() self.old_urls.add(new_url) return new_url

版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。

上一篇:yii2开启表结构缓存
下一篇:Linux命令的组成规则及其全拼单词
相关文章

 发表评论

暂时没有评论,来抢沙发吧~