背景
-
像我经常需要使用一些爬虫脚本去获取某些网站的信息,所以提供一个模板,只要改一下请求的网址、请求参数、Cookie,或者是headers 的参数,就可以使用,所以准备一个模板分享一下
1、写入空白Excel
wb = openpyxl.Workbook()
ws = wb.active
ws.append(['表头','表头','表头','表头','表头','表头',..........])
2、提供 URL、headers 、cookies、params
url = '请求URL'
cookies = {
'Cookie': '值'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'}
params = {
'xxx1': '值',
'xxx2': '值',
}
3、数据清洗,这里大家各有各的办法,依情况而定
res_data= requests.post(url=url, params=params, headers=headers, cookies=cookies)
res_data.encoding = "utf-8"
data = json.loads(res_data.text)
值1 = data['键']
值2 = data['键'
值3 = data['键']
值4 = data['键']
值5 = data['键']
值6 = data['键']
值7 = data['键']
可以使用re正则表达式,如:
可以使用xpath,如:
可以使用字典,如:
可以使用bs4,如:
4、储存
ws.append([值1,值2,值3,值4,值5,值6,.....])
wb.save("保存结果.xlsx")
5、完整模板代码
'''
@Project :项目名称
@File :程序.py
@IDE :PyCharm
@Author :一晌小贪欢
@Date :2024/01/22 15:33
'''
import json
import openpyxl
import requests
wb = openpyxl.Workbook()
ws = wb.active
ws.append(['表头','表头','表头','表头','表头','表头',..........])
url = '请求URL'
cookies = {
'Cookie': '值'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'}
params = {
'xxx1': '值',
'xxx2': '值',
}
res_data= requests.post(url=url, params=params, headers=headers, cookies=cookies)
res_data.encoding = "utf-8"
data = json.loads(res_data.text)
值1 = data['键']
值2 = data['键'
值3 = data['键']
值4 = data['键']
值5 = data['键']
值6 = data['键']
值7 = data['键']
'''
'''
'''
ws.append([值1,值2,值3,值4,值5,值6,.....])
wb.save("保存结果.xlsx")
更新2POST请求(2024-4-9)
headers = {
"Cookie": "",
"Content-Type": "application/json"
}
params = {
"键": "值",
"键": "值",
"键": "值",
"键": "值"",
"键": "值"
}
response = requests.post(
url="https://xxxxxxx",
data=json.dumps(params),
headers=headers
)
if response.status_code == 200:
data = response.json()
else:
print(f"请求失败,状态码:{response.status_code}")
更新POST请求
import http.client
import json
import openpyxl
conn = http.client.HTTPSConnection("srm.dhvisiontech.com")
payload = json.dumps({
"ettaCheckNo": "",
"ettaNo": "",
"ettaDeclareContractNo": "",
"ettaSegmentType": "",
"ettaBusinessOrderTimeFrom": "2024-8-3 00:00:00",
"ettaBusinessOrderTimeTo": "2024-9-3 00:00:00",
"ettaReqArrivalDateFrom": "",
"ettaReqArrivalDateTo": "",
"ettaTradeTerms": "",
"ettaLoadingMethod": "",
"ettaTranType": "",
"ettaOrderBelongsCode": "",
"ettaExportPortName": "",
"ettaDesCountryName": "",
"ettaDesPortName": "",
"shipDateFrom": "",
"shipDateTo": "",
"queryCarrierName": "",
"size": 1000,
"current": 1
})
headers = {
}
conn.request("POST", "/dahua-scp-logistics/overseaTask/pageList", payload, headers)
res = conn.getresponse()
data = res.read()
json_data = json.loads(data)
count = 0
for i in json_data[
count += 1
print(f
payload2 = json.dumps({
})
conn.request("POST", "/dahua-scp-logistics/ship/getTaskInfo", payload2, headers)
res = conn.getresponse()
data = res.read()
json_data = json.loads(data)
总结:求关注+收藏+点赞