feat: 增加循跡回歸雙服務端

This commit is contained in:
2024-07-13 15:24:03 +08:00
parent d81d0d7cff
commit cf167180b5
9 changed files with 153 additions and 12 deletions

View File

@@ -7,6 +7,7 @@ logger_format = "{time} {level} {message}"
lane_infer_port = 6666
yolo_infer_port = 6667
ocr_infer_port = 6668
lane_infer_port1 = 6669
[camera]
front_camera_port = 5555

View File

@@ -2,7 +2,8 @@ import paddle.inference as paddle_infer
import numpy as np
import paddle.vision.transforms as T
class Lane_model_infer:
def __init__(self, model_dir="./lane_model"):
# def __init__(self, model_dir="./lane_model/7_12_6"):
def __init__(self, model_dir="./lane_model/6_9"):
# 初始化 paddle 推理
self.model_dir = model_dir
self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams")
@@ -21,6 +22,9 @@ class Lane_model_infer:
self.normalize_transform = T.Normalize(mean=[127.5], std=[127.5])
# print(self.config.summary())
def infer(self,src) -> np.ndarray:
# 7_12_3 [60:240,:]
# crop_src = src[60:240,:]
# image = self.normalize_transform(crop_src)
image = self.normalize_transform(src)
image = image.transpose(2, 0, 1)
image = np.expand_dims(image, axis=0)

View File

@@ -16,8 +16,8 @@ response = {'code': 0, 'data': 0}
# 处理 server 响应数据
def server_resp(lane_infer_port):
logger.info("lane server thread init success")
global response
logger.info("lane server thread init success")
context = zmq.Context()
# 启动 server

View File

@@ -0,0 +1,99 @@
import toml
import threading
from loguru import logger
import logging
import zmq
from infer import Lane_model_infer
import numpy as np
import cv2
lock = threading.Lock()
response = {'code': 0, 'data': 0}
# 处理 server 响应数据
def server_resp(lane_infer_port):
global response
global model_id
logger.info("lane server1 thread init success")
context = zmq.Context()
# 启动 server
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{lane_infer_port}")
logger.info("lane infer1 server init success")
while True:
message = socket.recv_string()
with lock:
socket.send_pyobj(response)
def lane_infer1_server_main(queue):
# context2 = zmq.Context()
# socket_server = context2.socket(zmq.PUB)
# socket_server.setsockopt(zmq.SNDHWM,10)
# socket_server.bind("tcp://*:7778")
if queue != None:
class Handler(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
queue.put({'level': record.levelname.lower(), 'content': log_entry})
# logger.remove()
handler = Handler()
logger.add(handler, format="{time:MM-DD HH:mm:ss} {message}", level="DEBUG")
cfg = toml.load('/home/evan/Workplace/project_infer/cfg_infer_server.toml')
# 配置日志输出
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO")
# 连接摄像头 server 巡线只需要连接前摄像头
context = zmq.Context()
camera_socket = context.socket(zmq.REQ)
camera_socket.connect(f"tcp://localhost:{cfg['camera']['front_camera_port']}")
logger.info("connect camera success")
# 初始化 paddle 推理器
predictor = Lane_model_infer(model_dir="./lane_model/7_12_6")
logger.info("lane model load success")
# 启动 lane_infer_server 线程
mythread = threading.Thread(target=server_resp,
args=(cfg['server']['lane_infer_port1'],),
daemon=True)
mythread.start()
import signal
import sys
def signal_handler(signum, frame):
logger.info("Received signal, exiting...")
camera_socket.close()
context.term()
sys.exit(0)
# 注册信号处理函数
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
while True:
camera_socket.send_string("")
message = camera_socket.recv()
np_array = np.frombuffer(message, dtype=np.uint8)
frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame = frame[60:240,:]
result = predictor.infer(frame)
with lock:
response['data'] = result
if cv2.waitKey(1) == 27:
break
mythread.join()
logger.info("lane infer1 server exit")
if __name__ == "__main__":
lane_infer1_server_main(None)

View File

@@ -0,0 +1,4 @@
十字增强
补充十字_备份原数据
与初代相似 直角弯内切 十字正常 红色球后的弯压线

View File

@@ -0,0 +1 @@
crop_frame = frame[60:240,:]能用的

View File

@@ -0,0 +1,8 @@
根目录下的模型 最初版本 很久没调了
2024-04-28-19-19-15
6_9 在 2024-04-28-19-19-15 数据基础上补充了十字 mobilenet v3 small 全图
7_10_2 新标注的数据集 mobilenet v3 small 全图
7_12_3 使用和 6_9 相同的数据集 mobilenet v3 large 裁剪上面 60 行像素 [60:240,:]
7_12_6 使用和 6_9 相同的数据集 mobilenet v3 small 裁剪上面 60 行像素 [60:240,:]

View File

@@ -5,6 +5,7 @@ import cv2
import numpy as np
import requests
import base64
import datetime
@@ -58,6 +59,7 @@ def ocr_api_request(image_base64):
if __name__ == "__main__":
logger.info("ocr server 开始加载")
cfg = toml.load('/home/evan/Workplace/project_infer/cfg_infer_server.toml')
# 配置日志输出
@@ -68,11 +70,11 @@ if __name__ == "__main__":
# camera_socket.connect(f"tcp://localhost:{cfg['camera']['camera2_port']}")
# logger.info("connect camera success")
cap = cv2.VideoCapture(20)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G'))
cap.set(cv2.CAP_PROP_FPS, 20)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
# cap = cv2.VideoCapture(20)
# cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G'))
# cap.set(cv2.CAP_PROP_FPS, 20)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
# 初始化 server
context = zmq.Context()
@@ -83,7 +85,7 @@ if __name__ == "__main__":
import signal
import sys
def signal_handler(signum, frame):
logger.info("Received signal, exiting...")
logger.info(f"接收到退出信号 {signum}, 退出中")
socket.close()
context.term()
sys.exit(0)
@@ -92,15 +94,30 @@ if __name__ == "__main__":
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
logger.info("ocr server 加载完成")
while True:
message1 = socket.recv_string()
logger.info("recv client request")
logger.info("收到客户端请求")
logger.info("构造摄像头")
cap = cv2.VideoCapture(20)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G'))
cap.set(cv2.CAP_PROP_FPS, 20)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
for _ in range(5):
ret, frame = cap.read()
cv2.waitKey(1)
if ret:
frame = frame[:,0:480]
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
output_file_path = 'rotate.jpg'
now = datetime.datetime.now()
timestamp = now.strftime("%Y%m%d_%H%M%S")
output_file_path = f"./saved_picture/{timestamp}.jpg"
success = cv2.imwrite(output_file_path, frame)
_, frame = cv2.imencode('.jpg', frame)
@@ -108,13 +125,19 @@ if __name__ == "__main__":
encoded_image = base64.b64encode(frame).decode('utf-8')
result = ocr_api_request(encoded_image)
print(result)
# print(result)
if result != None:
socket.send_pyobj({'code': 0, 'content': result.get('words_result')})
logger.info(f"ocr 返回 {result.get('words_result')}")
else:
socket.send_pyobj({'code': -1, 'content': " ocr 没找到文字"})
logger.error("ocr 没找到文字")
else:
socket.send_pyobj({'code': -1, 'content': "ocr 摄像头读取出错"})
logger.critical("ocr 摄像头读取出错")
cap.release()
if cv2.waitKey(1) == 27:
break
logger.info("ocr infer server exit")

1
yolo_server/readme.txt Normal file
View File

@@ -0,0 +1 @@
上次使用模型 0622