思路
获取到微信公众号的文章之后,使用 BeautifulSoup 和正则表达式 解析出文章标题和包含正文的html.
获取到html之后,用正则表达式获取图片下载链接,下载成功后将微信的链接替换成本地图片链接。
运行效果
![在这里插入图片描述](https://img-blog.csdnimg.cn/893c9573c3d443c48f171afe62ab1ffb.png?x-oss-process=image/watermark,type_d3F5LXplbmhlaQ,shadow_50,text_Q1NETiBAWmVyb3RvZ2V0aGVy,size_20,color_FFFFFF,t_70,g_se,x_16)
代码
import requests
from re import findall
from bs4 import BeautifulSoup
import time
import os
weixin_title=""
weixin_time=""
#获取微信公众号内容,保存标题和时间
def get_weixin_html(url):
global weixin_time,weixin_title
res=requests.get(url)
soup=BeautifulSoup(res.text,"html.parser")
#获取标题
temp=soup.find('h1')
weixin_title=temp.string.strip()
#使用正则表达式获取时间
result=findall(r'[0-9]{4}-[0-9]{2}-[0-9]{2}.+:[0-9]{2}',res.text)
weixin_time=result[0]
#获取正文html并修改
content=soup.find(id='js_content')
soup2=BeautifulSoup((str(content)),"html.parser")
soup2.div['style']='visibility: visible;'
html=str(soup2)
pattern=r'http[s]?:\/\/[a-z.A-Z_0-9\/\?=-_-]+'
result = findall(pattern, html)
#将data-src修改为src
for url in result:
html=html.replace('data-src="'+url+'"','src="'+url+'"')
return html
#上传图片至服务器
def download_pic(content):
pic_path='pic/'
if not os.path.exists(pic_path):
os.makedirs(pic_path)
#使用正则表达式查找所有需要下载的图片链接
pattern=r'http[s]?:\/\/[a-z.A-Z_0-9\/\?=-_-]+'
pic_list = findall(pattern, content)
for index, item in enumerate(pic_list,1):
count=1
flag=True
pic_url=str(item)
while flag and count0:
file_name = str(index)+'.png'
elif pic_url.find('gif')>0:
file_name=str(index)+'.gif'
else:
file_name=str(index)+'.jpg'
with open( pic_path + file_name,"wb") as f:
f.write(data.content)
#将图片链接替换为本地链接
content = content.replace(pic_url, pic_path + file_name)
flag = False
print('已下载第' + str(index) +'张图片.')
count += 1
time.sleep(1)
except:
count+=1
time.sleep(1)
if count>10:
print("下载出错:",pic_url)
return content
if __name__ == "__main__":
#获取html
input_flag=True
while input_flag:
weixin_url=input('请输入微信文章链接后按Enter:')
re=findall(r'http[s]?:\/\/mp.weixin.qq.com\/s\/[0-9a-zA-Z_]+',weixin_url)
if len(re) |