pref: 增加llm异步请求

This commit is contained in:
bmy
2024-08-04 11:20:34 +08:00
parent f3bb720bed
commit 2e6ce3e1f7
4 changed files with 50 additions and 19 deletions

View File

@@ -1,6 +1,6 @@
[debug] [debug]
logger_filename = "log/file_{time}.log" logger_filename = "log/file_{time}.log"
logger_format = "{time} {level} {message}" logger_format = "[{level}] {file}:{line} <{time}> {message}"

View File

@@ -20,17 +20,17 @@ pid_ki = 0
pid_kd = 0 pid_kd = 0
[get_rball] [get_rball]
pid_kp = 1.5 pid_kp = 1.0
pid_ki = 0 pid_ki = 0
pid_kd = 0 pid_kd = 0
[put_bball] [put_bball]
pid_kp = 2.0 pid_kp = 1.5
pid_ki = 0 pid_ki = 0
pid_kd = 0 pid_kd = 0
[put_hanoi1] [put_hanoi1]
pid_kp = 0.7 pid_kp = 0.5
pid_ki = 0 pid_ki = 0
pid_kd = 0 pid_kd = 0
@@ -42,7 +42,7 @@ pos_gap = 160
first_target = "mp" first_target = "mp"
[put_hanoi3] [put_hanoi3]
pid_kp = 1.7 pid_kp = 1.5
pid_ki = 0 pid_ki = 0
pid_kd = 0 pid_kd = 0

View File

@@ -1288,7 +1288,10 @@ class move_area1():
if counts >= 2: if counts >= 2:
var.skip_llm_task_flag = True var.skip_llm_task_flag = True
return return
logger.error(var.llm_text) logger.error(f"OCR 检出字符:\"{var.llm_text}\"")
llm_bot.request(var.llm_text)
if len(var.llm_text) < 3: if len(var.llm_text) < 3:
var.skip_llm_task_flag = True var.skip_llm_task_flag = True
return return
@@ -1511,7 +1514,7 @@ class move_area2():
resp_commands = json5.loads(json_text[0]) resp_commands = json5.loads(json_text[0])
logger.info(resp_commands) logger.info(f"解析后的动作序列 {resp_commands}")
if len(resp_commands) == 0: if len(resp_commands) == 0:
return return
action_list = resp_commands action_list = resp_commands
@@ -1524,7 +1527,7 @@ class move_area2():
time.sleep(0.5) time.sleep(0.5)
self.reset() self.reset()
except: except:
logger.warning("任务解析失败并退出,文心一言真是废物") logger.warning("任务解析失败并退出,文心一言真是废物 (毋庸置疑)")
pass pass
else: else:

View File

@@ -375,6 +375,9 @@ class label_filter:
return (False, 0) return (False, 0)
class LLM_deepseek: class LLM_deepseek:
def __init__(self): def __init__(self):
self.response = None
self.status = False
self.chat = ''
self.client = OpenAI(api_key="sk-c2e1073883304143981a9750b97c3518", base_url="https://api.deepseek.com") self.client = OpenAI(api_key="sk-c2e1073883304143981a9750b97c3518", base_url="https://api.deepseek.com")
self.prompt = ''' self.prompt = '''
你是一个机器人动作规划者,请把我的话翻译成机器人动作规划并生成对应的 JSON 结果。请注意,只能使用以下指定的动作,不能创造新的动作: 你是一个机器人动作规划者,请把我的话翻译成机器人动作规划并生成对应的 JSON 结果。请注意,只能使用以下指定的动作,不能创造新的动作:
@@ -421,17 +424,42 @@ class LLM_deepseek:
强调一下,对于‘离开’这个指令,请忽略,这对我很重要! 强调一下,对于‘离开’这个指令,请忽略,这对我很重要!
''' '''
def get_command_json(self,chat): def request_thread(self):
response = self.client.chat.completions.create( logger.info("llm 请求远程服务器中 (request_thread)")
try:
self.response = self.client.chat.completions.create(
model="deepseek-chat", model="deepseek-chat",
messages=[ messages=[
{"role": "system", "content": self.prompt}, {"role": "system", "content": self.prompt},
{"role": "user", "content": '我的话如下:' + chat}, {"role": "user", "content": '我的话如下:' + self.chat},
], ],
stream=False, stream=False,
temperature=0.7 temperature=0.7
) )
return response.choices[0].message.content logger.info("llm 远程服务器正常返回 (request_thread)")
except:
logger.warning("llm 请求失败或返回异常,先检查网络连接 (request_thread)")
self.status = True
def request(self, _chat):
self.chat = _chat
thread = threading.Thread(target=self.request_thread, daemon=True)
thread.start()
logger.info("llm 开启请求线程")
def get_command_json(self,chat = ''):
# response = self.client.chat.completions.create(
# model="deepseek-chat",
# messages=[
# {"role": "system", "content": self.prompt},
# {"role": "user", "content": '我的话如下:' + chat},
# ],
# stream=False,
# temperature=0.7
# )
logger.info("llm 阻塞等待服务器返回中")
while not self.status: # FIXME 阻塞等待是否合适
pass
logger.info("llm 收到返回")
return self.response.choices[0].message.content
class LLM: class LLM:
def __init__(self): def __init__(self):
self.init_done_flag = False self.init_done_flag = False