爬虫网站(Xpath )

import lxml.etree
import requests
from fake_useragent import UserAgent   ##随机UA头cnfemdomtube
headers = {
    'user-agent': UserAgent(verify_ssl=False).random,
    'Cookie': '***********'
}

for index_url in range(1,100):
    url = f'https://网站.com/front?o=t&page={index_url}'###转页
    print(f'第{index_url}页')
    response = requests.get(url=url,headers=headers).text
    # print(response)
    xpath_html= lxml.etree.HTML(response)###转xpath
    xpath_html_div = xpath_html.xpath('//*[@id="app"]/div/div/div/div[4]/div[*]/div[3]//@data-url')###xpathu抓视频页面url
    # print(len(xpath_html_div)) ##20个url
    # print(xpath_html_div) ##20个url
    for xpath_html_url in xpath_html_div:##页面url循环抓m3u8
        response_url = requests.get(url=xpath_html_url,headers=headers).text
        # print(response_url)
        xpath_html_url = lxml.etree.HTML(response_url)###转xpath
        xpath_html_url_m3u8 = xpath_html_url.xpath('//*[@id="app"]/div/div[2]/div[1]/div[4]/meta[4]//@content')##获取页面u3m8 链接
        # print(xpath_html_url_m3u8)
        with open('m3u8.csv', 'a+', encoding='utf-8')as file:###写入文件csv
            file.write(str(xpath_html_url_m3u8) + '\n')
            print('写入:',xpath_html_url_m3u8)