import requests
from lxml import html
header = {
"Connection": "close",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36"}
etree = html.etree
response = requests.get("http://cansang.gxnongmu.com/docList.aspx?catId=183&Page=1",headers = header)
result = response.content.decode('utf8')
html_result = etree.HTML(result)
Sericulture_titles = html_result.xpath("//div[@class='zt2listnr']/ul[1]/li")
for Sericulture_title in Sericulture_titles:
Sericulture_title_name = Sericulture_title.xpath("./a/text()")
Sericulture_title_href = Sericulture_title.xpath("./a/@href")
print(Sericulture_title_name)
print(Sericulture_title_href)
在这个过程中遇到两个问题:
问题一:
requests.exceptions.ProxyError: HTTPConnectionPool(host=‘127.0.0.1’, port=8888): Max retries exceeded with url:
网上查了一下是代理的问题。
解决:将代理关闭
问题二:
requests.exceptions.ConnectionError: (‘Connection aborted.’, RemoteDisconnected('Remote end…
原因:request的连接数过多而导致Max retries exceeded
在header中不使用持久连接,在headers中加入以下代码:
'Connection': 'close'