首先将第一个页面抓取下来:
import requests
"""
抓取一个
"""
def get_one_page(url):
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/70.0.3538.102 Safari/537.36'
}
response = requests.get(url,headers=headers)
#判断响应的状态码
if response.status_code == 200:
return response.text
else:
return None
if __name__ == '__main__':
url = 'https://maoyan.com/board/4'
html = get_one_page(url)
print(html)
利用在正则表达式提取出我们想要的内容:
def parse_one_page(html):
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',re.S
)
items = re.findall(pattern,html)
return items
但是返回结果太乱了,我们用生成器来进行迭代:
import requests
import re
"""
抓取一个
"""
def get_one_page(url):
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/70.0.3538.102 Safari/537.36'
}
response = requests.get(url,headers=headers)
#判断响应的状态码
if response.status_code == 200:
return response.text
else:
return None
#对页面进行解析
def parse_one_page(html):
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',re.S
)
items = re.findall(pattern,html)
for item in items:
yield {
'index':item[0],
'image':item[1],
'title':item[2],
'actor':item[3].strip()[3:],
'time':item[4][5:],
'score':item[6]
}
if __name__ == '__main__':
url = 'https://maoyan.com/board/4'
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
接下来将提取的结果写入文件中:
def write_to_file(content):#传入的content是一个字典
with open('result.txt','a',encoding='utf-8') as f:
print(type(json.dumps(content)))#使用dumps()方法实现字典的序列化
f.write(json.dumps(content,ensure_ascii=False)+'\n')#将ensure_ascii设置为False是保证输出结果是中文形式而不是Unicode编码
爬取排名前100的完整代码如下:
import requests
import re
import json
#抓取一个页面
def get_one_page(url):
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/70.0.3538.102 Safari/537.36'
}
response = requests.get(url,headers=headers)
#判断响应的状态码
if response.status_code == 200:
return response.text
else:
return None
#对页面进行解析
def parse_one_page(html):
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star.*?>(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',re.S
)
items = re.findall(pattern,html)
for item in items:
yield {
'index':item[0],
'image':item[1],
'title':item[2],
'actor':item[3].strip()[3:],
'time':item[4][5:],
'score':item[6]
}
def write_to_file(content):#传入的content是一个字典
with open('result.txt','a',encoding='utf-8') as f:
f.write(json.dumps(content,ensure_ascii=False)+'\n')#将ensure_ascii设置为False是保证输出结果是中文形式而不是Unicode编码
def main(offset):
url = 'https://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
write_to_file(item)
if __name__ == '__main__':
for i in range(10):
main(i*10)