import matplotlib.pyplot as plt
# import matplotlib.figure as fgr
import pandas as pd
import os
import json
from matplotlib.ticker import MaxNLocator
import matplotlib.font_manager as fm
from lab_gpt4_call import send_chat_request,send_chat_request_Azure,send_official_call
from lab_llms_call import send_chat_request_qwen,send_chat_request_glm,send_chat_request_chatglm3_6b,send_chat_request_chatglm_6b
# from lab_llm_local_call import send_chat_request_internlm_chat
#import ast
import re
from tool import *
import tiktoken
import concurrent.futures
from PIL import Image
from io import BytesIO
import queue
# import datetime
from threading import Thread
# plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
# plt.rcParams['axes.unicode_minus'] = False
import openai
import time
# To override the Thread method
# 用于并发执行任务,并获取任务结果
class MyThread(Thread):
def __init__(self, target, args):
super(MyThread, self).__init__()
self.func = target
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
return self.result
# 解析给定的任务字典,执行对应的函数,并将结果存储在结果缓存区中(不接受参数版)
def parse_and_exe1(call_dict, result_buffer, parallel_step: str='1'):
"""
Parse the input and call the corresponding function to obtain the result.
:param call_dict: dict, now only including func, output, and desc
:param result_buffer: dict, storing the corresponding intermediate results
:param parallel_step: str, parallel step
:return: Returns func() and stores the corresponding result in result_buffer.
"""
func_name = call_dict['function' + parallel_step]
output = call_dict['output' + parallel_step]
desc = call_dict['description' + parallel_step]
# 假设所有函数都不接受参数
result = eval(func_name)()
# 存储结果和描述
result_buffer[output] = (result, desc)
return result_buffer
# 解析给定的任务字典,执行对应的函数,并将结果存储在结果缓存区中(接受参数版)
def parse_and_exe2(call_dict, result_buffer, parallel_step: str='1'):
"""
Parse the input and call the corresponding function to obtain the result.
:param call_dict: dict, including arg, func, and output
:param result_buffer: dict, storing the corresponding intermediate results
:param parallel_step: int, parallel step
:return: Returns func(arg) and stores the corresponding result in result_buffer.
"""
arg_list = call_dict['arg' + parallel_step]
replace_arg_list = [result_buffer[item][0] if isinstance(item, str) and ('result' in item or 'input' in item) else item for item in arg_list] # 参数
func_name = call_dict['function' + parallel_step]
output = call_dict['output' + parallel_step]
desc = call_dict['description' + parallel_step]
if func_name == 'loop_rank':
replace_arg_list[1] = eval(replace_arg_list[1])
result = eval(func_name)(*replace_arg_list)
result_buffer[output] = (result, desc) # 'result1': (df1, desc)
return result_buffer
# 读取tool lib 和 tool prompt,将它们整合为一个扁平化的提示字符串
# 将工具库的描述和提示信息整合在一起,形成一个统一的、易于阅读和使用的提示字符串
def load_tool_and_prompt(tool_lib, tool_prompt ):
'''
Read two JSON files.
:param tool_lib: Tool description
:param tool_prompt: Tool prompt
:return: Flattened prompt
'''
with open(tool_lib, 'r', encoding='utf-8') as f:
tool_lib = json.load(f)
with open(tool_prompt, 'r', encoding='utf-8') as f:
tool_prompt = json.load(f)
# 遍历'tool_lib'字典中的所有项,将每个工具函数的描述添加到'tool_prompt'字典中的'Function:'键下
# 这个过程将工具库描述整合到提示信息中,每个工具及其描述之间用空行隔开('\n\n')
for key, value in tool_lib.items():
tool_prompt["Function Library:"] = tool_prompt["Function Library:"] + key + " " + value+ '\n\n'
# 遍历'tool_prompt'字典中的所有项,将每个键值对追加到'prompt_flat'字符串中,每对之间也用空行隔开('\n\n')
# 这一步将所有提示信息整合成一个扁平化的字符串,方便后续使用。
prompt_flat = ''
for key, value in tool_prompt.items():
prompt_flat = prompt_flat + key +' '+ value + '\n\n'
return prompt_flat
# callback function
intermediate_results = queue.Queue() # Create a queue to store intermediate results.
# 将中间结果添加到队列中,以便近一步处理
def add_to_queue(intermediate_result):
intermediate_results.put(f"After planing, the intermediate result is {intermediate_result}")
# 限制请求的频率,以符合某些API的速率限制要求
def check_RPM(run_time_list, new_time, max_RPM=1):
# Check if there are already 3 timestamps in the run_time_list, with a maximum of 3 accesses per minute.
# False means no rest is needed, True means rest is needed.
if len(run_time_list) < 3:
run_time_list.append(new_time)
return 0
else:
if (new_time - run_time_list[0]).seconds < max_RPM:
# Calculate the required rest time.
sleep_time = 60 - (new_time - run_time_list[0]).seconds
print('sleep_time:', sleep_time)
run_time_list.pop(0)
run_time_list.append(new_time)
return sleep_time
else:
run_time_list.pop(0)
run_time_list.append(new_time)
return 0
### 主要的业务逻辑函数,解析指令、规划任务、选择工具、并执行生成最终结果的步骤
def run(instruction, add_to_queue=None, send_chat_request_Azure = send_official_call, openai_key = '', api_base='', engine=''):
'''
:param instruction: 用户提供的指令/查询
:param add_to_queue: 一个回调函数,用于将中间结果添加到队列中,实时反馈给用户
:param send_chat_request_Azure: 用于发送请求到Azure版本的OpenAI API的函数
:openai_key, api_base, engine 与OpenAI API相关的配置参数
:return: output_text, image, output_result, df
'''
output_text = '' # 初始化输出文本为空字符串
## 意图检测,通过读取意图检测的提示库('prompt_intent_detection.json'),结合用户提供的指令,生成一个新的指令('prompt_intent_detection')
################################# Step-1:Task select ###########################################
print('===============================Intent Detecting===========================================')
with open('./prompt_lib/prompt_intent_detection.json', 'r', encoding='utf-8') as f:
prompt_task_dict = json.load(f) # 加载意图检测的提示库
prompt_intent_detection = ''
for key, value in prompt_task_dict.items(): # 遍历提示库,构建意图检测指令
prompt_intent_detection = prompt_intent_detection + key + ": " + value+ '\n\n'
prompt_intent_detection = prompt_intent_detection + '\n\n' + 'Instruction:' + instruction + ' ###New Instruction: '
## 将指令('prompt_intent_detection')发送给语言模型,转化为一个更加明确的任务描述('new_instruction')
response = send_chat_request("gpt", prompt_intent_detection, openai_key=openai_key, api_base=api_base, engine=engine)
new_instruction = response # 将响应内容作为新的指令
print('new_instruction:', new_instruction)
output_text = output_text + '\n======Intent Detecting Stage=====\n\n'
output_text = output_text + new_instruction +'\n\n'
if add_to_queue is not None:
add_to_queue(output_text) # 队列回调函数,将输出文本添加到队列中
event_happen = True # 时间发生标志位
##

Mmnnnbb123
- 粉丝: 783
最新资源
- 回首三百八十年——计算机编年简史.doc
- 企业从单体架构往微服务架构设计难点探讨.doc
- 关于网络安全和信息技术发展态势的思考.docx
- 在 ROS 环境中实现机器人 Cartographer+Movebase 建图导航、Nanodet 目标检测及 Sherpa-onnx 语音播报与导航任务发布
- cadal分布式数字资源共享系统.ppt
- 项目管理主要表格.doc
- 可穿戴计算机设备的领域应用及其对信息服务的影响.docx
- RFID的物联网技术在制造业企业的应用分析研究.doc
- 延长油矿永—的姚油品管输工程生产系统安全性分析(卸油及储罐区).doc
- 当鼠标掠过之时:探索按钮左侧动态光效的交互魅力,html,css,js,javascript,按钮
- AsyncSqlite项目极简说明-异步数据库操作与SQLite集成框架-内容关键词-异步任务处理数据库连接池线程安全非阻塞IOSQL查询优化数据持久化事务管理并发.zip
- 分层教学法在计算机教学中的应用.docx
- JAVA的信息管理系统.doc
- 现代工厂网络视频监控系统设计方案-企业工厂.docx
- PBL教学法在中职计算机课程应用的思考.docx
- 第五章-重叠流水和向量计算机.ppt
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈


