import requests, json
from lxml import etreeclass QiubaiSpider(object):# 初始化urldef __init__(self):igin_url = "/{}/"self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"}# 因为这里发现url规律比较好找,可以事先构造url列表def get_url_list(self):url_list = [igin_url.format(page_num) for page_num in range(1, 13)]return url_list# 发送请求获取数据def parse_url(self, url):response = (url, headers=self.headers)# 提取数据def get_content_list(self, html_str):html = etree.HTML(html_str)# 经过分析发现,每一条数据都是存在一个div标签中的,所以这里按照div分组group_div_list = html.xpath("//div[@id='content-left']/div")content_list = []for div in group_div_list:content_dict = {}content_dict["user_name"] = div.xpath(".//h2/text()")[0].strip() if len(div.xpath("//h2/text()")) != 0 else Nonecontent_dict["user_age"] = div.xpath(".//div[@class='articleGender womenIcon']/text()")[0] if len(div.xpath(".//div[@class='articleGender womenIcon']/text()")) != 0 else Nonecontent_dict["user_gender"] = div.xpath(".//div[@class='author clearfix']/div/@class")[0] if len(div.xpath(".//div[@class='author clearfix']/div/@class")) != 0 else Nonecontent_dict["user_image"] = div.xpath(".//div[@class='author clearfix']/a/img/@src")content_dict["user_image"] = "https" + content_dict["user_image"][0] if len(div.xpath(".//div[@class='author clearfix']/a/img/@src")) != 0 else Nonecontent_dict["comments"] = div.xpath(".//div[@class='content']/span/text()") if len(div.xpath(".//div[@class='content']/span/text()")) != 0 else Nonecontent_dict["comments"] = [i.strip() for i in content_dict["comments"]]content_dict["stats-vote"] = div.xpath(".//div[@class='stats']/span[@class='stats-vote']/i/text()")[0] if len(div.xpath(".//div[@class='stats']/span[@class='stats-vote']/i/text()")) != 0 else Nonecontent_dict["stats-comments"] = div.xpath(".//div[@class='stats']/span[@class='stats-comments']//i/text()")[0] if len(div.xpath(".//div[@class='stats']/span[@class='stats-comments']//i/text()")) != 0 else Nonecontent_list.append(content_dict)return content_list# 保存数据def save_data(self,content_list):with open(", "a", encoding="utf-8") as f:for content_dict in content_list:print(content_dict)f.write(json.dumps(content_dict, ensure_ascii=False))f.write("n")print("保存成功")def run(self):# 1-获取urlurl_list = _url_list()# 2-发送请求,获取数据for url in url_list:# 3-提取数据html_str = self.parse_url(url)content_list = _content_list(html_str)# 4-保存self.save_data(content_list)if __name__ == '__main__':qiubai = QiubaiSpider()qiubai.run()
本文发布于:2024-02-02 08:43:20,感谢您对本站的认可!
本文链接:https://www.4u4v.net/it/170683460142653.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
留言与评论(共有 0 条评论) |