Python爬虫爬取抽屉新闻

# Author: Sooele

import requests
from bs4 import BeautifulSoup
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
}
url = 'https://dig.chouti.com/'


r1 = requests.get(url=url,headers=headers).text
# print(r1)

with open('./Web/'+'chouti.html','w',encoding='utf-8') as fp:
    fp.write(r1)

soup = BeautifulSoup(r1,'html.parser')

#匹配成功的第一个..标签对象
content_list = soup.find(name='div',attrs={'class':'link-con'})

## [标签对象,标签对象]
item_list = content_list.find_all(name='div',attrs={'class':'link-item'})
#print(item_list)
for item in item_list:
    a = item.find(name='a',attrs={'class':'link-title link-statistics'}).text
    print(a)