源码:
import urllib
import urllib.request
import urllib.parse
def loadPage(url,filename):
"""
作用:根据url获取服务器响应地址
"""
print("正在下载" + filename)
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"}
request = urllib.request.Request(url,headers = headers)
return (urllib.request.urlopen(request).read().decode("utf8"))
def writePage(html,filename):
"""
作用:将html写入到本地
"""
print("正在保存"+filename)
#文件写入
with open(filename,"w",encoding='utf-8') as f:
f.write(html)
print("-"*30)
def tiebaSpider(url,beginPage,endPage):
for page in range(beginPage,endPage):
pn =(page-1)*50
filename = "第" + str(page) + "页.html"
fullurl = url +"&pn=" + str(pn)
#print(page)
#print(fullurl)
html = loadPage(fullurl,filename)
#print(html)
writePage(html,filename)
if __name__ == "__main__":
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8') # 改变标准输出的默认编码
kw = input("请输入需要爬取的贴吧名")
beginPage = int(input("请输入起始页"))
endPage = int(input("请输入中止页"))
url="https://tieba.baidu.com/f?"
key = urllib.parse.urlencode({"kw":kw})
fullurl = url + key
tiebaSpider(fullurl,beginPage,endPage+1)
2.结果:
我们打开其中一个html 然后搜索关键字
说明运行成功