Files
project_infer/ocr_server/ocr_infer_server.py

47 lines
1.3 KiB
Python
Raw Normal View History

2024-06-07 20:19:04 +08:00
import toml
from loguru import logger
import logging
import zmq
from paddleocr import PaddleOCR
import cv2
logging.getLogger('paddleocr').setLevel(logging.CRITICAL)
if __name__ == "__main__":
cfg = toml.load('../cfg_infer_server.toml')
# 配置日志输出
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO")
# 连接摄像头
cap = cv2.VideoCapture(4)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# 初始化 paddle 推理器
predictor = PaddleOCR(use_angle_cls=False, use_gpu=True)
logger.info("ocr model load success")
# 初始化 server
context = zmq.Context()
# 启动 server
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{cfg['server']['ocr_infer_port']}")
while True:
socket.recv_string("")
ret, frame = cap.read()
try:
if ret:
result = predictor.ocr(frame)
response = {'code': 0, 'data': result}
socket.send_pyobj(response)
else:
socket.send_pyobj({'code': -1, 'data': None})
except:
socket.send_pyobj({'code': -1, 'data': None})
if cv2.waitKey(1) == 27:
break
logger.info("ocr infer server exit")