猫眼电影网址
1、分析要抓取的内容
2、抓取首页,正则匹配需要的内容
import requests
import re
from fake_useragent import UserAgent
baseurl = 'https://maoyan.com/board/4?offset=0'
headers = {
'User-Agent': UserAgent(use_cache_server=False).random
}
respones=requests.get(baseurl,headers=headers)
pattern = re.compile('<dd>.*?board-index.*?(\d+)</i>.*?alt.*?src="(.*?)"'
+ '.*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items=re.findall(pattern,respones.text)
print(items)
3、将爬取到的内容写入文件
with open ('movies.txt','a',encoding='utf-8') as f:
f.write(json.dumps(items,ensure_ascii=False))
4、整合代码并进行分页爬取
完整代码
import requests
import re
from fake_useragent import UserAgent
import json
import time
def get_info(baseurl):
respones = requests.get(baseurl, headers=headers)
pattern = re.compile('<dd>.*?board-index.*?(\d+)</i>.*?alt.*?src="(.*?)"'
+ '.*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, respones.text)
for item in items:
print(item)
return items
def save_info(items):
with open('movies.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(items, ensure_ascii=False))
if __name__ == '__main__':
baseurl = 'https://maoyan.com/board/4?offset='
headers = {
'User-Agent': UserAgent(use_cache_server=False).random
}
pattern = re.compile('<dd>.*?board-index.*?(\d+)</i>.*?alt.*?src="(.*?)"'
+ '.*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
for i in range(10):
save_info(get_info(baseurl+str(i*10)))
time.sleep(1) # 由于猫眼反爬虫机制,加入休眠函数让爬取速度不要太快