代码实现,两个工具调用
import openai
import json
from openai import OpenAI
# 定义工具函数
def get_temperature(city):
"""模拟获取城市温度的函数"""
# 这里应该是实际的 API 调用,这里用模拟数据代替
temperature_data = {
"北京": 25,
"上海": 28,
"广州": 30,
"深圳": 29,
"成都": 26
}
return temperature_data.get(city, 20) # 默认返回20度
def get_fit_cloth(temperature):
"""根据温度推荐合适的衣物"""
if temperature > 28:
return "短袖T恤和短裤"
elif 24 <= temperature <= 28:
return "短袖T恤和薄长裤"
elif 18 <= temperature < 24:
return "长袖衬衫和长裤"
elif 10 <= temperature < 18:
return "毛衣或薄外套"
else:
return "厚外套或羽绒服"
# 定义工具描述,供模型了解可用的工具
tools = [
{
"type": "function",
"function": {
"name": "get_temperature",
"description": "获取指定城市的当前温度",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "城市名称,如北京、上海",
}
},
"required": ["city"],
},
},
},
{
"type": "function",
"function": {
"name": "get_fit_cloth",
"description": "根据温度获取适合的衣物推荐",
"parameters": {
"type": "object",
"properties": {
"temperature": {
"type": "number",
"description": "温度值,单位是摄氏度",
}
},
"required": ["temperature"],
},
},
}
]
base_url = "https://open.bigmodel.cn/api/paas/v4/"
api_key = "..."
model_name = 'glm-4-flash'
import time
t1 = time.time()
client = OpenAI(api_key=api_key ,base_url=base_url )
def run_conversation(user_input):
# 第一步:发送用户输入和工具描述给模型
messages = [{"role": "user", "content": user_input}]
response = client.chat.completions.create(
model=model_name,
messages=messages,
tools=tools,
# tool_choice="auto",
)
print('-- response : ', response )
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
# 第二步:检查模型是否想要调用工具
if tool_calls:
messages.append(response_message) # 添加模型的响应到消息历史
# 处理每个工具调用
for tool_call in tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# 调用相应的函数
if function_name == "get_temperature":
function_response = get_temperature(
city=function_args.get("city")
)
elif function_name == "get_fit_cloth":
function_response = get_fit_cloth(
temperature=function_args.get("temperature")
)
else:
function_response = "未知工具调用"
tool_message = {
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": str(function_response),
}
messages.append( tool_message )
second_response = client.chat.completions.create(
model=model_name,
messages=messages,
tools=tools,
# tool_choice="auto",
)
print('\n-- second_response : ', second_response)
return second_response.choices[0].message.content
else:
return response_message.content
# 示例使用
if __name__ == "__main__":
# 示例1: 直接询问天气和衣物推荐
print(run_conversation("北京现在适合穿什么衣服?"))
# 示例2: 只询问温度
# print(run_conversation("上海现在的温度是多少?"))
# # 示例3: 已知温度询问衣物推荐
# print(run_conversation("现在温度是22度,我该穿什么?"))
response 日志
--response:
ChatCompletion(
id = '20250619170027a299743fa79c436f',
choices = [
Choice(
finish_reason = 'tool_calls',
index = 0,
logprobs = None,
message = ChatCompletionMessage(
content = None, refusal = None, role = 'assistant',
annotations = None, audio = None,
function_call = None,
tool_calls = [
ChatCompletionMessageToolCall(
id = 'call_-8598006517705558361',
function = Function(
arguments = '{"city": "北京"}', name = 'get_temperature'
),
type = 'function', index = 0)
]))
],
created = 1750323628,
model = 'glm-4-flash',
object = None,
service_tier = None,
system_fingerprint = None,
usage = CompletionUsage(
completion_tokens = 10,
prompt_tokens = 270,
total_tokens = 280,
completion_tokens_details = None,
prompt_tokens_details = None
),
request_id = '20250619170027a299743fa79c436f'
)
--second_response:
ChatCompletion(
id = '202506191700284b5faf039ce14317',
choices = [
Choice(
finish_reason = 'tool_calls',
index = 0,
logprobs = None,
message = ChatCompletionMessage(
content = None,
refusal = None,
role = 'assistant',
annotations = None, audio = None,
function_call = None,
tool_calls = [
ChatCompletionMessageToolCall(
id = 'call_-8598014111208728724',
function = Function(
arguments = '{"temperature": 25}',
name = 'get_fit_cloth'
),
type = 'function',
index = 0
)
]
)
)
],
created = 1750323628,
model = 'glm-4-flash',
object = None,
service_tier = None, system_fingerprint = None,
usage = CompletionUsage(
completion_tokens = 12,
prompt_tokens = 283,
total_tokens = 295,
completion_tokens_details = None,
prompt_tokens_details = None
),
request_id = '202506191700284b5faf039ce14317'
)
tool_message
ChatCompletionMessage(
content=None,
refusal=None,
role='assistant',
annotations=None,
audio=None,
function_call=None,
tool_calls=[
ChatCompletionMessageToolCall(
id='call_-8598006517705558361',
function=Function(
arguments='{"city": "北京"}',
name='get_temperature'
),
type='function',
index=0
)
]
)
2025-06-19(四)