python多进程requests模块爬取网页数据import requests, re
import json
from multiprocessing import Pool
from requests.exceptions import RequestException
def get_page_html(url):
try:
headers = {
'host':'maoyan.com',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)
AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/63.0.3239.132 Safari/537.36'
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
return response.text
else:
return None
except RequestException as e:
return None
def handle_html(html):
pattern = re.compile('
.*?(.*?)' '.*?.*?>(.*?).*?"star">'
'(.*?)
.*?"releasetime">(.*?).*?"integer">(.*?)''.*?"fraction">(.*?).*?
', re.S)items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2].strip(),
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5]+item[6],
}
def write_to_file(content):
with open('./html.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + "\n")
def main(offset=0):
html =get_page_html("http://maoyan.com/board/4?offset=%s" % offset)
items = handle_html(html=html)
for item in items:
print(item)
write_to_file(content=item)
# print(html)
if __name__ == '__main__':
# for i in range(10):
# main(i*10)
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
以上数据写入到文件时,会出现错乱。原因: 当一个进程的数据还没有写完,另一个进程的数据开始写入导致。