# 本程序亲测有效,用于理解爬虫相关的基础知识,不足之处希望大家批评指正
1 import requests 2 from lxml import etree 3 from multiprocessing import JoinableQueue as Queue 4 from multiprocessing import Process 5 6 """爬取目标: 7 利用多进程实现 8 """ 9 10 class QiuShi: 11 def __init__(self): 12 # url和headers 13 self.base_url = '/{}' 14 self.headers = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36' 15 16 # 定义队列,用来传递数据 17 self.url_queue = Queue() 18 quest_queue = Queue() 19 self.html_queue = Queue() 20 21 22 def get_url_list(self): 23 """获取所有的url""" 24 for i in range(1, 14): 25 target_url = self.base_url.format(i) 26 print(target_url) 27 # 计数需要请求的url 28 self.url_queue.put(target_url) 29 30 def request_url(self): 31 """向url发起请求""" 32 while True: 33 target_url = self.() 34 response = (target_url, self.headers) 35 print(response) 36 quest_queue.put(response) 37 self.url_queue.task_done() 38 39 def get_content(self): 40 """获取数据""" 41 while True: 42 html_text = ().content.decode() 43 html = etree.HTML(html_text) 44 div_list = html.xpath('//div[@id="content-left"]/div') 45 content_list = [] 46 for div in div_list: 47 item = {} 48 item['author'] = div.xpath('.//h2/text()')[0].strip() 49 item['content'] = div.xpath('.//span/text()')[0].strip() 50 print(item) 51 content_list.append(item) 52 self.html_queue.put(content_list) 53 quest_queue.task_done() 54 55 def save_data(self): 56 """保存入库""" 57 while True: 58 data_list = self.() 59 for data in data_list: 60 with open(', 'a+') as f: 61 f.write(str(data)) 62 f.write('rn') 63 self.html_queue.task_done() 64 65 def main(self): 66 67 # 获取所有url 68 _url_list() 69 # 创建一个进程盒子,用于收集进程 70 process_list = [] 71 p_request = Process(targetquest_url) 72 process_list.append(p_request) 73 74 p_content = Process(target_content) 75 process_list.append(p_content) 76 77 p_save_data = Process(target=self.save_data) 78 process_list.append(p_save_data) 79 80 # 让所有进程跑起来 81 for process in process_list: 82 process.daemon = True # 设置为守护进程:主进程结束,子进程任务完不完成,都要随着主进程结束而结束 83 process.start() 84 85 # 等待进程任务完成,回收进程 director:主任:普通员工都下班了,主任才能下班 86 for director in [self.url_quest_queue,self.html_queue]: 87 director.join() 88 89 90 91 if __name__ == '__main__': 92 qiushi = QiuShi() 93 qiushi.main()
转载于:.html
本文发布于:2024-01-28 21:41:16,感谢您对本站的认可!
本文链接:https://www.4u4v.net/it/170644928110471.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
留言与评论(共有 0 条评论) |