diff --git a/app.py b/app.py index d667212..0a19ac6 100644 --- a/app.py +++ b/app.py @@ -10,7 +10,8 @@ import os import time import subprocess import signal -import importlib +import base64 +import json from main_upper import main_func server_command = [ {"path": "/home/evan/Workplace/project_capture/build/", "script": "./capture"}, @@ -51,7 +52,7 @@ logger.add(handler, format="{time:MM-DD HH:mm:ss} {message}", level="DEBUG") fileOptions_path = '/home/evan/Workplace/project_main' fileOptions_list = ['cfg_args.toml','cfg_main.toml', 'cfg_subtask.toml'] cfg_args_path = os.path.join(fileOptions_path, 'cfg_args.toml') - +cfg_move_area_path = os.path.join(fileOptions_path, 'cfg_move_area.json') @app.route('/') def index(): @@ -64,6 +65,17 @@ def run(): config_args['lane_mode']['mode_index'] = int(mode_index) with open(cfg_args_path, 'w') as config_file: toml.dump(config_args, config_file) + try: + action_base64 = request.args.get('action') + decoded_bytes = base64.b64decode(action_base64) + decoded_str = decoded_bytes.decode('utf-8') + json_data = json.loads(decoded_str) + with open(cfg_move_area_path, 'w') as json_file: + json.dump(json_data, json_file) + except: + # 当该字段没有传入参数时 清空配置文件 该任务按照正常流程去做 + with open(cfg_move_area_path, 'w') as json_file: + pass return render_template('index2.html') # @app.route('/csdn') diff --git a/cfg_main.toml b/cfg_main.toml index f32336e..a0dd885 100644 --- a/cfg_main.toml +++ b/cfg_main.toml @@ -15,5 +15,5 @@ PutHanoi1_counts = 7 PutHanoi2_counts = 2 PutHanoi3_counts = 2 MoveArea1_counts = 6 -MoveArea2_counts = 1700 +MoveArea2_counts = 10 KickAss_counts = 10 diff --git a/cfg_move_area.json b/cfg_move_area.json new file mode 100644 index 0000000..e69de29 diff --git a/subtask.py b/subtask.py index 820270d..507856b 100644 --- a/subtask.py +++ b/subtask.py @@ -3,6 +3,7 @@ from loguru import logger from utils import label_filter from utils import tlabel from utils import LLM +from utils import LLM_deepseek from utils import CountRecord import utils import toml @@ -12,10 +13,13 @@ import variable as var import action as act import re import math +import json +import json5 # import threading # import ctypes cfg = None cfg_args = None +cfg_move_area = None by_cmd = None filter = None llm_bot = None @@ -48,9 +52,15 @@ def import_obj(_by_cmd, skip_queue): global cfg global cfg_args + global cfg_move_area global global_skip_queue cfg = toml.load('/home/evan/Workplace/project_main/cfg_subtask.toml') # 加载任务配置 cfg_args = toml.load('/home/evan/Workplace/project_main/cfg_args.toml') + try: + with open('/home/evan/Workplace/project_main/cfg_move_area.json', 'r') as f: + cfg_move_area = json.load(f) + except: + cfg_move_area = None by_cmd = _by_cmd global_skip_queue = skip_queue @@ -68,7 +78,7 @@ def import_obj(_by_cmd, skip_queue): filter = label_filter(socket) if cfg['move_area']['llm_enable']: - llm_bot = LLM() + llm_bot = LLM_deepseek() def car_stop(): for _ in range(3): by_cmd.send_speed_x(0) @@ -467,7 +477,7 @@ class get_block1(): time.sleep(0.1) by_cmd.send_position_axis_x(1, 100) time.sleep(1) - by_cmd.send_position_axis_z(30, 90) + by_cmd.send_position_axis_z(30, 100) time.sleep(0.5) by_cmd.send_angle_claw(63) time.sleep(0.5) @@ -572,7 +582,7 @@ class put_block(): by_cmd.send_angle_claw(85) # by_cmd.send_angle_storage(0) time.sleep(1) - by_cmd.send_position_axis_z(30,90) + by_cmd.send_position_axis_z(30,100) time.sleep(1) by_cmd.send_angle_claw(25) by_cmd.send_distance_x(-10, 110) @@ -703,13 +713,13 @@ class up_tower(): by_cmd.send_distance_x(-10, 120) time.sleep(1) # 上古參數 - by_cmd.send_distance_y(-10, 80) + by_cmd.send_distance_y(-10, 50) # 80 # 6_9 模型參數 # by_cmd.send_distance_y(-10, 40) # 7_12_3 模型參數 # by_cmd.send_distance_y(-10, 50) - time.sleep(2) - car_stop() + # time.sleep(2) + # car_stop() # FIXME 如果下發 distance 後直接 car_stop,則 distance 執行時間僅由指令間處理延時決定 # time.sleep(3) # by_cmd.send_speed_y(-10) @@ -759,21 +769,21 @@ class get_rball(): # 靠近塔 by_cmd.send_angle_scoop(20) # 上古參數 - by_cmd.send_distance_y(-15, 70) # 50 + by_cmd.send_distance_y(-15, 50) # 50 # 70 # 6_9 參數 # by_cmd.send_distance_y(-15, 35) # time.sleep(2) # 7_12_3 參數 # by_cmd.send_distance_y(-15, 45) - time.sleep(2) - car_stop() + # time.sleep(2) + # car_stop() calibrate_new(tlabel.RBALL,offset = 44, run = True) time.sleep(1) logger.info("抓红球") # by_cmd.send_angle_scoop(12) time.sleep(0.5) by_cmd.send_position_axis_z(30, 200) - time.sleep(2.5) + time.sleep(3) by_cmd.send_angle_scoop(12) time.sleep(0.5) by_cmd.send_angle_scoop(7) @@ -1255,33 +1265,36 @@ class move_area1(): # filter_w = (148, 560) # filter_h = (165, 390) - - counts = 0 - while True: - ocr_socket.send_string("") - resp = ocr_socket.recv_pyobj() - var.llm_text = '' - counts += 1 - if resp.get('code') == 0: - for item in resp.get('content'): - - if item['probability']['average'] < 0.80: - continue - # box = item['location'] - # center_x = box['left'] + box['width'] / 2 - # center_y = box['top'] + box['height'] / 2 - # if center_x < filter_w[0] or center_x > filter_w[1] \ - # or center_y < filter_h[0] or center_y > filter_h[1]: - # continue - var.llm_text += item['words'] - break - if counts >= 2: + if cfg_move_area == None: + counts = 0 + while True: + ocr_socket.send_string("") + resp = ocr_socket.recv_pyobj() + var.llm_text = '' + counts += 1 + if resp.get('code') == 0: + for item in resp.get('content'): + + if item['probability']['average'] < 0.80: + continue + # box = item['location'] + # center_x = box['left'] + box['width'] / 2 + # center_y = box['top'] + box['height'] / 2 + # if center_x < filter_w[0] or center_x > filter_w[1] \ + # or center_y < filter_h[0] or center_y > filter_h[1]: + # continue + var.llm_text += item['words'] + break + if counts >= 2: + var.skip_llm_task_flag = True + return + logger.error(var.llm_text) + if len(var.llm_text) < 3: var.skip_llm_task_flag = True return - logger.error(var.llm_text) - if len(var.llm_text) < 3: - var.skip_llm_task_flag = True - return + else: + # 不需要文字识别 直接使用传入的参数执行 action + pass var.task_speed = 9 # 12 @@ -1325,17 +1338,18 @@ class move_area2(): pass def init(self): logger.info("应急避险第二阶段初始化") - self.offset = 15 + self.offset = 60 self.delta_x = 0 self.delta_y = 0 self.delta_omage = 0 def find(self): - if var.skip_llm_task_flag: + if var.skip_llm_task_flag and cfg_move_area == None: return 5000 ret, box = filter.get(tlabel.SHELTER) if ret: error = (box[0][2] + box[0][0] - 320) / 2 + self.offset - if abs(error) < 20: + # 增加了一个宽度过滤 + if abs(error) < 30 and abs(box[0][2] - box[0][0]) > 180: return 5000 return False def add_item(self, item): @@ -1472,41 +1486,60 @@ class move_area2(): logger.info(f"回正后最终位置: ({self.abs_y:.2f}, {self.abs_x:.2f}), 角度: {math.degrees(self.abs_w % (2 * math.pi))}") def exec(self): var.task_speed = 0 - if var.skip_llm_task_flag: - logger.error("ocr 识别出错 直接跳过改任务") + if var.skip_llm_task_flag and cfg_move_area == None: + logger.error("ocr 识别出错 直接跳过该任务") return logger.info("开始寻找停车区域") car_stop() - calibrate_new(tlabel.SHELTER, offset = 15, run = True) + calibrate_new(tlabel.SHELTER, offset = 30, run = True) time.sleep(0.5) - - # 调用大模型 然后执行动作 - try: - resp = llm_bot.get_command_json(var.llm_text) - logger.info(resp) - except: - logger.error("大模型超时,跳过任务") - return - - try: - # FIXME 当前仍然可能存在文心一言分段返回和掺杂解释的问题,不确定当前条件足以过滤 - resp_commands = eval(re.findall("```json(.*?)```", resp, re.S)[0]) - logger.info(resp_commands) - if len(resp_commands) == 0: + if cfg_move_area == None: + # 调用大模型 然后执行动作 + try: + resp = llm_bot.get_command_json(var.llm_text) + logger.info(f"llm 返回原数据 {resp}") + except: + logger.error("大模型 llm_bot 超时,跳过任务") return - action_list = resp_commands - # 进入停车区域 - by_cmd.send_distance_y(10, 450) - time.sleep((450 * 5 / 1000) + 0.5) - for action in action_list: - self.add_item(action) - time.sleep(0.1) - time.sleep(0.5) - self.reset() - except: - logger.warning("任务解析失败并退出,文心一言真是废物") - pass + try: + json_text = re.findall("```json(.*?)```", resp, re.S) + if len(json_text) == 0: + # 返回的内容不带'''json + resp_commands = eval(resp) + else: + resp_commands = json5.loads(json_text[0]) + + + logger.info(resp_commands) + if len(resp_commands) == 0: + return + action_list = resp_commands + # 进入停车区域 + by_cmd.send_distance_y(10, 450) + time.sleep((450 * 5 / 1000) + 0.5) + for action in action_list: + self.add_item(action) + time.sleep(0.1) + time.sleep(0.5) + self.reset() + except: + logger.warning("任务解析失败并退出,文心一言真是废物") + pass + + else: + # 无需调用大模型 直接开始执行传入的参数 + try: + by_cmd.send_distance_y(10, 450) + time.sleep((450 * 5 / 1000) + 0.5) + for action in cfg_move_area: + self.add_item(action) + time.sleep(0.1) + time.sleep(0.5) + self.reset() + except: + pass + pass def nexec(self): logger.warning("正在跳過大模型任務") time.sleep(2) diff --git a/test/test_filter.py b/test/test_filter.py index 0eefd78..c9825e5 100644 --- a/test/test_filter.py +++ b/test/test_filter.py @@ -16,7 +16,7 @@ socket.connect("tcp://localhost:6667") logger.info("subtask yolo client init") filter = label_filter(socket) -filter.switch_camera(2) +filter.switch_camera(1) # find_counts = 0 @@ -25,10 +25,12 @@ filter.switch_camera(2) # label = tlabel.TPLATFORM while True: time.sleep(0.2) - ret, box = filter.get(tlabel.BASE) + ret, box = filter.get(tlabel.SHELTER) if ret: error = (box[0][2] + box[0][0] - 320) / 2 - logger.error(error) + if abs(error) < 30 and abs(box[0][2] - box[0][0]) > 180: + # height = box[0][3] - box[0][1] + logger.error(111) # label = tlabel.HOSPITAL # ret, box = filter.get(label) diff --git a/test/test_ocr.py b/test/test_ocr.py index 97a8031..8236446 100644 --- a/test/test_ocr.py +++ b/test/test_ocr.py @@ -17,13 +17,7 @@ while True: if resp.get('code') == 0: text = '' for item in resp.get('content'): - if item['probability']['average'] < 0.90: - continue - box = item['location'] - center_x = box['left'] + box['width'] / 2 - center_y = box['top'] + box['height'] / 2 - if center_x < filter_w[0] or center_x > filter_w[1] \ - or center_y < filter_h[0] or center_y > filter_h[1]: + if item['probability']['average'] < 0.80: continue text += item['words'] print(text) diff --git a/utils.py b/utils.py index 4a6ae68..d772586 100644 --- a/utils.py +++ b/utils.py @@ -2,6 +2,7 @@ from enum import Enum import numpy as np import erniebot +from openai import OpenAI from simple_pid import PID from loguru import logger import threading @@ -372,15 +373,72 @@ class label_filter: error = (boxes[center_x_index][4] + boxes[center_x_index][2] - self.img_size[0]) / 2 return (True, error) return (False, 0) - - +class LLM_deepseek: + def __init__(self): + self.client = OpenAI(api_key="sk-c2e1073883304143981a9750b97c3518", base_url="https://api.deepseek.com") + self.prompt = ''' + 你是一个机器人动作规划者,请把我的话翻译成机器人动作规划并生成对应的 JSON 结果。请注意,只能使用以下指定的动作,不能创造新的动作: + 允许的动作及其对应格式如下: + [{'properties': {'index': {'title': 'Index', 'type': 'integer'}, 'action': {'title': 'Action', 'type': 'string'}, 'time': {'title': 'Time', 'type': 'number'}}, 'required': ['index', 'action', 'time'], 'title': 'Action', 'type': 'object'}] + 我不允许你自我创造出新的 action,action 字段仅仅包括以下内容: + go_right 向右移动 + go_left 向左移动 + go_front 向前移动 + go_back 向后移动 + go_left_rotate 向左旋转 + go_right_rotate 向右旋转 + beep_seconds 蜂鸣器鸣叫的时间 + beep_counts 蜂鸣器鸣叫的次数 + light_seconds 灯光发光的时间 + light_counts 灯光闪烁的次数 + beep_light_counts 灯光和蜂鸣器一起闪烁的次数 + go_sleep 什么都不做 + 我的话和你的回复示例如下: + 我的话:向左移 0.1m, 向左转弯 85 度 + 你的回复:[{"index":0,"action":"go_left","time":0.1},{"index":1,"action":"go_left_rotate","time":85}] + 我的话:向右移 0.2m, 向前 0.1m + 你的回复:[{"index":0,"action":"go_right","time":0.2},{"index":1,"action":"go_front","time":0.1}] + 我的话:向右转 90 度,向右移 0.1m + 你的回复:[{"index":0,"action":"go_right_rotate","time":90},{"index":1,"action":"go_right","time":0.1}] + 我的话:原地左转 38 度 + 你的回复:[{"index":0,"action":"go_left_rotate","time":38}] + 我的话:蜂鸣器发声 5 秒 + 你的回复:[{"index":0,"action":"beep_seconds","time":5}] + 我的话:发光或者照亮 5 秒 + 你的回复:[{"index":0,"action":"light_seconds","time":5}] + 我的话:向右走 30cm,照亮 2s + 你的回复:[{"index":0,"action":"go_right","time":0.3},{"index":1,"action":"light_seconds","time":2}] + 我的话:向左移 0.2m, 向后 0.1m + 你的回复:[{"index":0,"action":"go_left","time":0.2},{"index":1,"action":"go_back","time":0.1}] + 我的话:鸣叫 3 声 + 你的回复:[{"index":0,"action":"beep_counts","time":3}] + 我的话:前行零点五米 + 你的回复:[{"index":0,"action":"go_front","time":0.5}] + 我的话:闪烁灯光 1 次并伴有蜂鸣器 + 你的回复:[{"index":0,"action":"beep_light_counts","time": 1}] + 我的话:灯光闪烁 3 次同时蜂鸣器也叫 3 次 + 你的回复:[{"index":0,"action":"beep_light_counts","time": 3}] + + 强调一下,对于‘离开’这个指令,请忽略,这对我很重要! + ''' + def get_command_json(self,chat): + response = self.client.chat.completions.create( + model="deepseek-chat", + messages=[ + {"role": "system", "content": self.prompt}, + {"role": "user", "content": '我的话如下:' + chat}, + ], + stream=False, + temperature=0.7 + ) + return response.choices[0].message.content class LLM: def __init__(self): self.init_done_flag = False erniebot.api_type = "qianfan" erniebot.ak = "jReawMtWhPu0wrxN9Rp1MzZX" erniebot.sk = "eowS1BqsNgD2i0C9xNnHUVOSNuAzVTh6" - self.model = 'ernie-3.5' + self.model = 'ernie-lite' # self.prompt = '''你是一个机器人动作规划者,需要把我的话翻译成机器人动作规划并生成对应的 json 结果,机器人工作空间参考右手坐标系。 # 严格按照下面的描述生成给定格式 json,从现在开始你仅仅给我返回 json 数据!''' # self.prompt += '''正确的示例如下: @@ -395,73 +453,122 @@ class LLM: # 鸣叫 3 声 [{'func': 'beep', 'time': 3}] # 前行零点五米 [{'func': 'move', 'x': 0.5, 'y': 0}] # ''' + # self.prompt = ''' + # 你是一个机器人动作规划者,需要把我的话翻译成机器人动作规划并生成对应的 JSON 结果。请注意,只能使用以下指定的动作,不能创造新的动作: + # 允许的动作及其对应格式如下: + # - 向左移:{"index":N,"action":"go_left","time":T} + # - 向右移:{"index":N,"action":"go_right","time":T} + # - 向前移:{"index":N,"action":"go_front","time":T} + # - 向后移:{"index":N,"action":"go_back","time":T} + # - 向左转:{"index":N,"action":"go_left_rotate","time":T} + # - 向右转:{"index":N,"action":"go_right_rotate","time":T} + # - 蜂鸣器发声:{"index":N,"action":"beep_seconds","time":T} + # - 蜂鸣器发声次数:{"index":N,"action":"beep_counts","time":T} + # - 发光或者照亮:{"index":N,"action":"light_seconds","time":T} + # - 发光次数或者闪烁次数:{"index":N,"action":"light_counts","time":T} + # - 发光并伴随蜂鸣器:{"index":N,"action":"beep_light_counts","time":T} + # - 等待{"index":N,"action":"go_sleep","time":T} + # 示例输入输出如下: + # 输入:向左移 0.1m, 向左转弯 85 度 + # 输出:[{"index":0,"action":"go_left","time":0.1},{"index":1,"action":"go_left_rotate","time":85}] + # 输入:向右移 0.2m, 向前 0.1m + # 输出:[{"index":0,"action":"go_right","time":0.2},{"index":1,"action":"go_front","time":0.1}] + # 输入:向右转 90 度,向右移 0.1m + # 输出:[{"index":0,"action":"go_right_rotate","time":90},{"index":1,"action":"go_right","time":0.1}] + # 输入:原地左转 38 度 + # 输出:[{"index":0,"action":"go_left_rotate","time":38}] + # 输入:蜂鸣器发声 5 秒 + # 输出:[{"index":0,"action":"beep_seconds","time":5}] + # 输入:发光或者照亮 5 秒 + # 输出:[{"index":0,"action":"light_seconds","time":5}] + # 输入:向右走 30cm, 照亮 2s + # 输出:[{"index":0,"action":"go_right","time":0.3},{"index":1,"action":"light_seconds","time":2}] + # 输入:向左移 0.2m, 向后 0.1m + # 输出:[{"index":0,"action":"go_left","time":0.2},{"index":1,"action":"go_back","time":0.1}] + # 输入:鸣叫 3 声 + # 输出:[{"index":0,"action":"beep_counts","time":3}] + # 输入:前行零点五米 + # 输出:[{"index":0,"action":"go_front","time":0.5}] + # 输入:闪烁灯光 1 次并伴有蜂鸣器 + # 输出:[{"index":0,"action":"beep_light_counts","time": 1}] + # 输入:灯光闪烁 3 次同时蜂鸣器也叫 3 次 + # 输出:[{"index":0,"action":"beep_light_counts","time": 3}] + # ''' + # self.prompt += '''请根据上面的示例,解析该任务文本,并返回相应的 JSON 字段。确保 JSON 中包含了键 index action 和 time 以及相应的值。不要附带其他的解释和注释,只需要 JSON 字段。''' self.prompt = ''' 你是一个机器人动作规划者,需要把我的话翻译成机器人动作规划并生成对应的 JSON 结果。请注意,只能使用以下指定的动作,不能创造新的动作: 允许的动作及其对应格式如下: - - 向左移:{"index":N,"action":"go_left","time":T} - - 向右移:{"index":N,"action":"go_right","time":T} - - 向前移:{"index":N,"action":"go_front","time":T} - - 向后移:{"index":N,"action":"go_back","time":T} - - 向左转:{"index":N,"action":"go_left_rotate","time":T} - - 向右转:{"index":N,"action":"go_right_rotate","time":T} - - 蜂鸣器发声:{"index":N,"action":"beep_seconds","time":T} - - 蜂鸣器发声次数:{"index":N,"action":"beep_counts","time":T} - - 发光或者照亮:{"index":N,"action":"light_seconds","time":T} - - 发光次数或者闪烁次数:{"index":N,"action":"light_counts","time":T} - - 发光并伴随蜂鸣器:{"index":N,"action":"beep_light_counts","time":T} - - 等待{"index":N,"action":"go_sleep","time":T} - 示例输入输出如下: - 输入:向左移 0.1m, 向左转弯 85 度 - 输出:[{"index":0,"action":"go_left","time":0.1},{"index":1,"action":"go_left_rotate","time":85}] - 输入:向右移 0.2m, 向前 0.1m - 输出:[{"index":0,"action":"go_right","time":0.2},{"index":1,"action":"go_front","time":0.1}] - 输入:向右转 90 度,向右移 0.1m - 输出:[{"index":0,"action":"go_right_rotate","time":90},{"index":1,"action":"go_right","time":0.1}] - 输入:原地左转 38 度 - 输出:[{"index":0,"action":"go_left_rotate","time":38}] - 输入:蜂鸣器发声 5 秒 - 输出:[{"index":0,"action":"beep_seconds","time":5}] - 输入:发光或者照亮 5 秒 - 输出:[{"index":0,"action":"light_seconds","time":5}] - 输入:向右走 30cm, 照亮 2s - 输出:[{"index":0,"action":"go_right","time":0.3},{"index":1,"action":"light_seconds","time":2}] - 输入:向左移 0.2m, 向后 0.1m - 输出:[{"index":0,"action":"go_left","time":0.2},{"index":1,"action":"go_back","time":0.1}] - 输入:鸣叫 3 声 - 输出:[{"index":0,"action":"beep_counts","time":3}] - 输入:前行零点五米 - 输出:[{"index":0,"action":"go_front","time":0.5}] - 输入:闪烁灯光 1 次并伴有蜂鸣器 - 输出:[{"index":0,"action":"beep_light_counts","time": 1}] - 输入:灯光闪烁 3 次同时蜂鸣器也叫 3 次 - 输出:[{"index":0,"action":"beep_light_counts","time": 3}] + [{'properties': {'index': {'title': 'Index', 'type': 'integer'}, 'action': {'title': 'Action', 'type': 'string'}, 'time': {'title': 'Time', 'type': 'number'}}, 'required': ['index', 'action', 'time'], 'title': 'Action', 'type': 'object'}] + 我不允许你自我创造出新的 action,action 字段仅仅包括以下内容: + go_right 向右移动 + go_left 向左移动 + go_front 向前移动 + go_back 向后移动 + go_left_rotate 向左旋转 + go_right_rotate 向右旋转 + beep_seconds 蜂鸣器鸣叫的时间 + beep_counts 蜂鸣器鸣叫的次数 + light_seconds 灯光发光的时间 + light_counts 灯光闪烁的次数 + beep_light_counts 灯光和蜂鸣器一起闪烁的次数 + go_sleep 什么都不做 + 我的话和你的回复示例如下: + 我的话:向左移 0.1m, 向左转弯 85 度 + 你的回复:[{"index":0,"action":"go_left","time":0.1},{"index":1,"action":"go_left_rotate","time":85}] + 我的话:向右移 0.2m, 向前 0.1m + 你的回复:[{"index":0,"action":"go_right","time":0.2},{"index":1,"action":"go_front","time":0.1}] + 我的话:向右转 90 度,向右移 0.1m + 你的回复:[{"index":0,"action":"go_right_rotate","time":90},{"index":1,"action":"go_right","time":0.1}] + 我的话:原地左转 38 度 + 你的回复:[{"index":0,"action":"go_left_rotate","time":38}] + 我的话:蜂鸣器发声 5 秒 + 你的回复:[{"index":0,"action":"beep_seconds","time":5}] + 我的话:发光或者照亮 5 秒 + 你的回复:[{"index":0,"action":"light_seconds","time":5}] + 我的话:向右走 30cm,照亮 2s + 你的回复:[{"index":0,"action":"go_right","time":0.3},{"index":1,"action":"light_seconds","time":2}] + 我的话:向左移 0.2m, 向后 0.1m + 你的回复:[{"index":0,"action":"go_left","time":0.2},{"index":1,"action":"go_back","time":0.1}] + 我的话:鸣叫 3 声 + 你的回复:[{"index":0,"action":"beep_counts","time":3}] + 我的话:前行零点五米 + 你的回复:[{"index":0,"action":"go_front","time":0.5}] + 我的话:闪烁灯光 1 次并伴有蜂鸣器 + 你的回复:[{"index":0,"action":"beep_light_counts","time": 1}] + 我的话:灯光闪烁 3 次同时蜂鸣器也叫 3 次 + 你的回复:[{"index":0,"action":"beep_light_counts","time": 3}] + + 我的话如下: ''' - self.prompt += '''请根据上面的示例,解析该任务文本,并返回相应的 JSON 字段。确保 JSON 中包含了键 index action 和 time 以及相应的值。不要附带其他的解释和注释,只需要 JSON 字段。''' self.messages = [] self.resp = None worker = threading.Thread(target=self.reset, daemon=True) worker.start() def reset(self): - self.messages = [self.make_message(self.prompt)] - self.resp = erniebot.ChatCompletion.create( - model=self.model, - messages=self.messages, - ) - self.messages.append(self.resp.to_message()) - self.init_done_flag = True - logger.info("LLM init done") + try: + self.messages = [self.make_message(self.prompt)] + self.resp = erniebot.ChatCompletion.create( + model=self.model, + messages=self.messages, + ) + self.messages.append(self.resp.to_message()) + self.init_done_flag = True + logger.info("LLM init done") + except: + logger.error("LLM init error") def make_message(self,content): return {'role': 'user', 'content': content} def get_command_json(self,chat): while self.init_done_flag == False: # 等待初始化 (要是等到调用还没初始化,那就是真寄了) pass + chat = '我的话如下:' + chat self.messages.append(self.make_message(chat)) self.resp = erniebot.ChatCompletion.create( model=self.model, messages=self.messages, ) self.messages.append(self.resp.to_message()) - resp = self.resp.get_result().replace(' ', '').replace('\n', '').replace('\t', '') + resp = self.resp.get_result().replace(' ', '') return resp class CountRecord: