From cf167180b5d74af0ec46f6a68d7d0f17f3b35654 Mon Sep 17 00:00:00 2001 From: 2024snow <2103200855@qq.com> Date: Sat, 13 Jul 2024 15:24:03 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E5=A2=9E=E5=8A=A0=E5=BE=AA=E8=B7=A1?= =?UTF-8?q?=E5=9B=9E=E6=AD=B8=E9=9B=99=E6=9C=8D=E5=8B=99=E7=AB=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cfg_infer_server.toml | 1 + lane_server/infer.py | 6 +- lane_server/lane_infer_server.py | 2 +- lane_server/lane_infer_server1.py | 99 ++++++++++++++++++++++++ lane_server/lane_model/6_9/说明.txt | 4 + lane_server/lane_model/7_12_3/readme.txt | 1 + lane_server/lane_model/说明.txt | 8 ++ ocr_server/ocr_infer_server.py | 43 +++++++--- yolo_server/readme.txt | 1 + 9 files changed, 153 insertions(+), 12 deletions(-) create mode 100644 lane_server/lane_infer_server1.py create mode 100644 lane_server/lane_model/6_9/说明.txt create mode 100644 lane_server/lane_model/7_12_3/readme.txt create mode 100644 lane_server/lane_model/说明.txt create mode 100644 yolo_server/readme.txt diff --git a/cfg_infer_server.toml b/cfg_infer_server.toml index 2361342..6deaa3c 100644 --- a/cfg_infer_server.toml +++ b/cfg_infer_server.toml @@ -7,6 +7,7 @@ logger_format = "{time} {level} {message}" lane_infer_port = 6666 yolo_infer_port = 6667 ocr_infer_port = 6668 +lane_infer_port1 = 6669 [camera] front_camera_port = 5555 diff --git a/lane_server/infer.py b/lane_server/infer.py index 1f7d13d..f66eb03 100644 --- a/lane_server/infer.py +++ b/lane_server/infer.py @@ -2,7 +2,8 @@ import paddle.inference as paddle_infer import numpy as np import paddle.vision.transforms as T class Lane_model_infer: - def __init__(self, model_dir="./lane_model"): + # def __init__(self, model_dir="./lane_model/7_12_6"): + def __init__(self, model_dir="./lane_model/6_9"): # 初始化 paddle 推理 self.model_dir = model_dir self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams") @@ -21,6 +22,9 @@ class Lane_model_infer: self.normalize_transform = T.Normalize(mean=[127.5], std=[127.5]) # print(self.config.summary()) def infer(self,src) -> np.ndarray: + # 7_12_3 [60:240,:] + # crop_src = src[60:240,:] + # image = self.normalize_transform(crop_src) image = self.normalize_transform(src) image = image.transpose(2, 0, 1) image = np.expand_dims(image, axis=0) diff --git a/lane_server/lane_infer_server.py b/lane_server/lane_infer_server.py index 8f0e50b..ff468f1 100644 --- a/lane_server/lane_infer_server.py +++ b/lane_server/lane_infer_server.py @@ -16,8 +16,8 @@ response = {'code': 0, 'data': 0} # 处理 server 响应数据 def server_resp(lane_infer_port): - logger.info("lane server thread init success") global response + logger.info("lane server thread init success") context = zmq.Context() # 启动 server diff --git a/lane_server/lane_infer_server1.py b/lane_server/lane_infer_server1.py new file mode 100644 index 0000000..f402e7d --- /dev/null +++ b/lane_server/lane_infer_server1.py @@ -0,0 +1,99 @@ +import toml +import threading +from loguru import logger +import logging +import zmq +from infer import Lane_model_infer +import numpy as np +import cv2 + + +lock = threading.Lock() +response = {'code': 0, 'data': 0} + + + + +# 处理 server 响应数据 +def server_resp(lane_infer_port): + global response + global model_id + logger.info("lane server1 thread init success") + + context = zmq.Context() + # 启动 server + socket = context.socket(zmq.REP) + socket.bind(f"tcp://*:{lane_infer_port}") + logger.info("lane infer1 server init success") + while True: + message = socket.recv_string() + with lock: + socket.send_pyobj(response) + + +def lane_infer1_server_main(queue): + + # context2 = zmq.Context() + # socket_server = context2.socket(zmq.PUB) + # socket_server.setsockopt(zmq.SNDHWM,10) + # socket_server.bind("tcp://*:7778") + + if queue != None: + class Handler(logging.Handler): + def emit(self, record): + log_entry = self.format(record) + queue.put({'level': record.levelname.lower(), 'content': log_entry}) + # logger.remove() + handler = Handler() + logger.add(handler, format="{time:MM-DD HH:mm:ss} {message}", level="DEBUG") + + cfg = toml.load('/home/evan/Workplace/project_infer/cfg_infer_server.toml') + + # 配置日志输出 + logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO") + + # 连接摄像头 server 巡线只需要连接前摄像头 + context = zmq.Context() + camera_socket = context.socket(zmq.REQ) + camera_socket.connect(f"tcp://localhost:{cfg['camera']['front_camera_port']}") + logger.info("connect camera success") + + # 初始化 paddle 推理器 + predictor = Lane_model_infer(model_dir="./lane_model/7_12_6") + logger.info("lane model load success") + # 启动 lane_infer_server 线程 + mythread = threading.Thread(target=server_resp, + args=(cfg['server']['lane_infer_port1'],), + daemon=True) + + mythread.start() + import signal + import sys + def signal_handler(signum, frame): + logger.info("Received signal, exiting...") + camera_socket.close() + context.term() + sys.exit(0) + + # 注册信号处理函数 + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + + + while True: + camera_socket.send_string("") + message = camera_socket.recv() + np_array = np.frombuffer(message, dtype=np.uint8) + frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR) + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + frame = frame[60:240,:] + result = predictor.infer(frame) + with lock: + response['data'] = result + if cv2.waitKey(1) == 27: + break + mythread.join() + logger.info("lane infer1 server exit") + +if __name__ == "__main__": + lane_infer1_server_main(None) \ No newline at end of file diff --git a/lane_server/lane_model/6_9/说明.txt b/lane_server/lane_model/6_9/说明.txt new file mode 100644 index 0000000..7a64951 --- /dev/null +++ b/lane_server/lane_model/6_9/说明.txt @@ -0,0 +1,4 @@ +十字增强 +补充十字_备份原数据 + +与初代相似 直角弯内切 十字正常 红色球后的弯压线 \ No newline at end of file diff --git a/lane_server/lane_model/7_12_3/readme.txt b/lane_server/lane_model/7_12_3/readme.txt new file mode 100644 index 0000000..c45ffe8 --- /dev/null +++ b/lane_server/lane_model/7_12_3/readme.txt @@ -0,0 +1 @@ +crop_frame = frame[60:240,:]能用的 \ No newline at end of file diff --git a/lane_server/lane_model/说明.txt b/lane_server/lane_model/说明.txt new file mode 100644 index 0000000..1dd6254 --- /dev/null +++ b/lane_server/lane_model/说明.txt @@ -0,0 +1,8 @@ +根目录下的模型 最初版本 很久没调了 +2024-04-28-19-19-15 + + +6_9 在 2024-04-28-19-19-15 数据基础上补充了十字 mobilenet v3 small 全图 +7_10_2 新标注的数据集 mobilenet v3 small 全图 +7_12_3 使用和 6_9 相同的数据集 mobilenet v3 large 裁剪上面 60 行像素 [60:240,:] +7_12_6 使用和 6_9 相同的数据集 mobilenet v3 small 裁剪上面 60 行像素 [60:240,:] \ No newline at end of file diff --git a/ocr_server/ocr_infer_server.py b/ocr_server/ocr_infer_server.py index 0e0e290..9f2406b 100644 --- a/ocr_server/ocr_infer_server.py +++ b/ocr_server/ocr_infer_server.py @@ -5,6 +5,7 @@ import cv2 import numpy as np import requests import base64 +import datetime @@ -58,6 +59,7 @@ def ocr_api_request(image_base64): if __name__ == "__main__": + logger.info("ocr server 开始加载") cfg = toml.load('/home/evan/Workplace/project_infer/cfg_infer_server.toml') # 配置日志输出 @@ -68,11 +70,11 @@ if __name__ == "__main__": # camera_socket.connect(f"tcp://localhost:{cfg['camera']['camera2_port']}") # logger.info("connect camera success") - cap = cv2.VideoCapture(20) - cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G')) - cap.set(cv2.CAP_PROP_FPS, 20) - cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960) - cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540) + # cap = cv2.VideoCapture(20) + # cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G')) + # cap.set(cv2.CAP_PROP_FPS, 20) + # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960) + # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540) # 初始化 server context = zmq.Context() @@ -83,7 +85,7 @@ if __name__ == "__main__": import signal import sys def signal_handler(signum, frame): - logger.info("Received signal, exiting...") + logger.info(f"接收到退出信号 {signum}, 退出中") socket.close() context.term() sys.exit(0) @@ -92,15 +94,30 @@ if __name__ == "__main__": signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) + logger.info("ocr server 加载完成") + while True: message1 = socket.recv_string() - logger.info("recv client request") - ret, frame = cap.read() + logger.info("收到客户端请求") + + logger.info("构造摄像头") + cap = cv2.VideoCapture(20) + cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G')) + cap.set(cv2.CAP_PROP_FPS, 20) + cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540) + + for _ in range(5): + ret, frame = cap.read() + cv2.waitKey(1) + if ret: frame = frame[:,0:480] frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) - output_file_path = 'rotate.jpg' + now = datetime.datetime.now() + timestamp = now.strftime("%Y%m%d_%H%M%S") + output_file_path = f"./saved_picture/{timestamp}.jpg" success = cv2.imwrite(output_file_path, frame) _, frame = cv2.imencode('.jpg', frame) @@ -108,13 +125,19 @@ if __name__ == "__main__": encoded_image = base64.b64encode(frame).decode('utf-8') result = ocr_api_request(encoded_image) - print(result) + # print(result) if result != None: socket.send_pyobj({'code': 0, 'content': result.get('words_result')}) + logger.info(f"ocr 返回 {result.get('words_result')}") else: socket.send_pyobj({'code': -1, 'content': " ocr 没找到文字"}) + logger.error("ocr 没找到文字") else: socket.send_pyobj({'code': -1, 'content': "ocr 摄像头读取出错"}) + logger.critical("ocr 摄像头读取出错") + + cap.release() + if cv2.waitKey(1) == 27: break logger.info("ocr infer server exit") diff --git a/yolo_server/readme.txt b/yolo_server/readme.txt new file mode 100644 index 0000000..5449da4 --- /dev/null +++ b/yolo_server/readme.txt @@ -0,0 +1 @@ +上次使用模型 0622