python爬虫微信公众号留言

阅读: 评论:0

python爬虫微信公众号留言

python爬虫微信公众号留言

最近在学习Python3网络爬虫开发实践(崔庆才 著)刚好也学习到他使用代理爬取公众号文章这里,但是照着他的代码写,出现了一些问题。在这里我用到了这本书的前面讲的一些内容进行了完善。(作者写这个代码已经是半年前的事了,但腾讯的网站在这半年前进行了更新)

下面我直接上代码:

TIMEOUT = 20

from requests import Request, Session, PreparedRequest

import requests

from selenium import webdriver

ptions import NoSuchElementException

from bs4 import BeautifulSoup as bs

import pymysql

# 要爬取的内容

keyword = '美女图片'

options = webdriver.ChromeOptions()

# 设置中文

options.add_argument('lang=zh_CN.UTF-8')

# 更换头部

options.add_argument(

'user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"')

browser = webdriver.Chrome(chrome_options=options)

REDIS_HOST = '192.168.1.248'

REDIS_PORT = 6379

REDIS_PASSWORD = '*****'

REDIS_KEY = 'requests'

PROXY_POOL_URL = '127.0.0.1:8080/random'

MAX_FAILED_TIME = 5

MYSQL_HOST = 'localhost'

MYSQL_PORT = 3306

MYSQL_USER = 'moxiao'

MYSQL_PASSWORD = '******'

class mysqlConn():

def __init__(self, host=MYSQL_HOST, username=MYSQL_USER, password=MYSQL_PASSWORD, port=MYSQL_PORT):

"""

mysql 初始化

:param host:

:param username:

:param password:

:param port:

"""

try:

self.db = pymysql.Connection(host=host, user=username, password=password,

database='weixin_data', port=port)

self.cursor = self.db.cursor()

except pymysql.MySQLError as e:

print(e.args)

def insert(self, table, data):

keys = ', '.join(data.keys())

values = ', '.join(['%s'] * len(data))

sql = 'insert into %s (%s) values (%s)' % (table, keys, values)

try:

ute(sql, tuple(data.values()))

self.dbmit()

except pymysql.MySQLError as e:

print(e.args)

llback()

class WeixinRequest(Request):

def __init__(self, url, callback, method="GET", headers=None, need_proxy=False, fail_time=0, timeout=TIMEOUT):

super(WeixinRequest, self).__init__(url=url, method=method, headers=headers)

self.callback = callback

self.fail_time = fail_time

self.timeout = timeout

def prepare(self):

p = PreparedRequest()

p.prepare(

method&#hod,

url=self.url,

headers=self.headers,

)

return p

class WeixinResponse():

def __init__(self, text):

< = text

def set_status_code(self, status_code):

self.status_code = status_code

import pickle

from redis import StrictRedis

class RedisQueue():

def __init__(self):

"""

初始化redis

"""

self.db = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db=3)

def add(self, request):

"""

向队列添加序列化后的Request

:param request:请求对象

:return:添加结果

"""

if isinstance(request, WeixinRequest):

return self.db.rpush(REDIS_KEY, pickle.dumps(request))

return False

def pop(self):

"""

取出下一个request并反序列化

:return: Request 或者 None

"""

if self.db.llen(REDIS_KEY):

return pickle.loads(self.db.lpop(REDIS_KEY))

return False

def empty(self):

return self.db.llen(REDIS_KEY) == 0

def del_all(self):

return self.db.delete(REDIS_KEY)

def get_proxy(self):

"""

从代理池获取代理IP

:return:

"""

try:

response = (PROXY_POOL_URL)

if response.status_code == 200:

print('get Proxy', )

except requests.ConnectionError:

return None

from urllib.parse import urlencode

from requests import ReadTimeout, ConnectionError

from pyquery import PyQuery as pq

VALD_STATUES = [200]

class Spider():

base_url = '?'

# 这里的page可以修改,即第几页,我本来想获取所有的个数再除以10 这样就能爬完了,但是我只是测试所以这里并没有做

# 但如果需要做可以加到schedule方法的while循环内的最下面 即self.params['page']+=1

params = {'type': 2, 's_from': 'input', 'query': keyword, 'page': 1, 'ie': 'utf8', '_sug_': 'n',

'_sug_type_': ''}

headers = {'Host': 'weixin.sogou',

'Connection': 'keep-alive',

'Cache-Control': 'max-age=0',

'Upgrade-Insecure-Requests': '1',

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',

'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',

'Accept-Encoding': 'gzip, deflate',

'Accept-Language': 'zh-CN,zh;q=0.9',

'Referer': 'http: // weixin.sogou /',

'Cookie': '你的cookie'} # TODO 不可能把我的给你撒

session = Session()

queue = RedisQueue()

queue.del_all()

mysql = mysqlConn()

def start(self):

"""

初始化工作

:return:

"""

# 全局更新headers

# 如果你试过用这个方法修改headers,那么就知道这个在这里好像没什么用,我在这里浪费了至少两个小时

self.session.headers.update(self.headers)

start_url = self.base_url + urlencode(self.params)

# 这里我将need_proxy=False设为了False 即并没有使用代理 ps:我也就是测试一下

# 真正修改了headers是在这里

weixin_request = WeixinRequest(url=start_url, callback=self.parse_index, headers=self.headers, need_proxy=False)

# 调度第一个请求

self.queue.add(weixin_request)

def schedule(self):

"""

调度请求

:return:

"""

while not pty():

weixin_request = self.queue.pop()

callback = weixin_request.callback

print('Schedule', weixin_request.url)

response = quest(weixin_request)

if response and response.status_code in VALD_STATUES:

results = list(callback(response))

if results:

for result in results:

print('New Result', result)

if isinstance(result, WeixinRequest):

# 将新的文章详情的url也加入队列

self.queue.add(result)

if isinstance(result, dict):

# 储存到mysql

else:

<(weixin_request)

else:

<(weixin_request)

def request(self, weixin_request):

"""

执行请求

:param weixin_request:请求

:return: 响应

"""

if not '' in weixin_request.url:

try:

if d_proxy:

proxy = _proxy()

if proxy:

proxies = {

'http': '' + proxy,

'https': '' + proxy

}

return self.session.send(weixin_request.prepare(),

timeout=weixin_request.timeout, allow_redirects=False, proxies=proxies)

return self.session.send(weixin_request.prepare(), timeout=weixin_request.timeout,

allow_redirects=False)

except (ConnectionError, ReadTimeout) as e:

print(e.args)

return False

else:

print('-' * 20)

<(weixin_request.url)

try:

browser.find_element_by_class_name('rich_media_area_primary_inner')

wr = WeixinResponse(browser.page_source)

wr.set_status_code(200)

return wr

except NoSuchElementException:

wr = WeixinResponse('')

wr.set_status_code(403)

return wr

def parse_index(self, response):

"""

解析索引页

:param response: 响应

:return: 新的响应

"""

doc = )

items = doc('.news-box .news-list li .txt-box h3 a').items()

for item in items:

url = item.attr('href')

weixin_request = WeixinRequest(url=url, callback=self.parse_detail)

yield weixin_request

def parse_detail(self, response):

"""

解析详情页

:param response: 响应

:return: 微信公众号文章

"""

doc = )

profile_inner = doc('.profile_inner')

data = {

'title': doc('.rich_media_title').text(),

'content': doc('.rich_media_content').text(),

'date': doc('#publish_time').text(),

# 'nickname':doc('#js_profile_qrcode > div > strong').text(),

'nickname': profile_inner.find('.profile_nickname').text(),

'wechat':

[ns for ns in profile_inner.find('.profile_meta').find('.profile_meta_value').items()][

0].text()

}

# 储存图片

print('#' * 30)

soup = )

wn = soup.find_all('img')

for img in wn:

if img.has_attr('_width') and img.has_attr('data-src'):

print(img.attrs['data-src'])

yield data

def error(self, weixin_request):

"""

错误处理

:param weixin_request:请求

:return:

"""

weixin_request.fail_time = weixin_request.fail_time + 1

print('Request Failed', weixin_request.fail_time, 'Times', weixin_request.url)

if weixin_request.fail_time < MAX_FAILED_TIME:

self.queue.add(weixin_request)

def run(self):

self.start()

self.schedule()

if __name__ == '__main__':

spider = Spider()

spider.run()

2018-10-6更新:

今天测试之后使用了cookie并不能登录这个网站了,也许是腾讯使用了新的安全验证,具体也无从得知,但使用浏览器访问没有问题

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持脚本之家。

本文发布于:2024-01-30 23:47:28,感谢您对本站的认可!

本文链接:https://www.4u4v.net/it/170662964923710.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:爬虫   公众   留言   python
留言与评论(共有 0 条评论)
   
验证码:

Copyright ©2019-2022 Comsenz Inc.Powered by ©

网站地图1 网站地图2 网站地图3 网站地图4 网站地图5 网站地图6 网站地图7 网站地图8 网站地图9 网站地图10 网站地图11 网站地图12 网站地图13 网站地图14 网站地图15 网站地图16 网站地图17 网站地图18 网站地图19 网站地图20 网站地图21 网站地图22/a> 网站地图23