initial commit

This commit is contained in:
2024-06-07 20:19:04 +08:00
parent 5840e5883f
commit 381d05efa1
15 changed files with 2146 additions and 15 deletions

202
.gitignore vendored Normal file
View File

@@ -0,0 +1,202 @@
# Created by https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
# Edit at https://www.toptal.com/developers/gitignore?templates=python,visualstudiocode
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
### VisualStudioCode ###
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Local History for Visual Studio Code
.history/
# Built Visual Studio Code Extensions
*.vsix
### VisualStudioCode Patch ###
# Ignore all local history of files
.history
.ionide
# End of https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
# yolo 模型路径
yolo_server/ppyoloe_plus_crn_t_auxhead_320_300e_coco
yolo_server/*.zip
# 任务识别 模型路径
person_yolo_server/model

View File

@@ -5,6 +5,10 @@ logger_format = "{time} {level} {message}"
[server]
lane_infer_port = 6666
yolo_infer_port = 6667
ocr_infer_port = 6668
[camera]
front_camera_port = 5555
camera1_port = 5556
camera2_port = 5557

View File

@@ -1,20 +1,25 @@
import paddle.inference as paddle_infer
import numpy as np
import paddle.vision.transforms as T
class Lane_model_infer:
def __init__(self, model_dir="./lane_model"):
# 初始化paddle推理
# 初始化 paddle 推理
self.model_dir = model_dir
self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams")
self.config.disable_glog_info()
self.config.enable_use_gpu(200, 0)
# self.config.enable_memory_optim(True)
# self.config.switch_ir_optim(True)
# self.config.switch_use_feed_fetch_ops(False)
# self.config.delete_pass("conv_elementwise_add_act_fuse_pass")
# self.config.delete_pass("conv_elementwise_add_fuse_pass")
self.predictor = paddle_infer.create_predictor(self.config)
self.input_names = self.predictor.get_input_names()
self.input_handle = self.predictor.get_input_handle(self.input_names[0])
self.output_names = self.predictor.get_output_names()
self.output_handle = self.predictor.get_output_handle(self.output_names[0])
self.normalize_transform = T.Normalize(mean=[127.5], std=[127.5])
# print(self.config.summary())
def infer(self,src) -> np.ndarray:
image = self.normalize_transform(src)
image = image.transpose(2, 0, 1)
@@ -22,4 +27,11 @@ class Lane_model_infer:
self.input_handle.copy_from_cpu(image)
self.predictor.run()
results = self.output_handle.copy_to_cpu()[0]
return results
return results
# if __name__ == "__main__":
# predictor = Lane_model_infer()
# import time
# while True:
# time.sleep(1)
# print('123')

View File

@@ -5,16 +5,22 @@ import zmq
from infer import Lane_model_infer
import numpy as np
import cv2
lock = threading.Lock()
response = {'code': 0, 'data': 0}
# 处理server响应数据
# context2 = zmq.Context()
# socket_server = context2.socket(zmq.PUB)
# socket_server.bind("tcp://*:7778")
# 处理 server 响应数据
def server_resp(lane_infer_port):
logger.info("lane server thread init success")
global response
context = zmq.Context()
# 启动server
# 启动 server
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{lane_infer_port}")
logger.info("lane infer server init success")
@@ -23,8 +29,6 @@ def server_resp(lane_infer_port):
with lock:
socket.send_pyobj(response)
if __name__ == "__main__":
cfg = toml.load('../cfg_infer_server.toml')
@@ -32,29 +36,36 @@ if __name__ == "__main__":
# 配置日志输出
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO")
# 连接摄像头server 巡线只需要连接前摄像头
# 连接摄像头 server 巡线只需要连接前摄像头
context = zmq.Context()
camera_socket = context.socket(zmq.SUB)
camera_socket = context.socket(zmq.REQ)
camera_socket.connect(f"tcp://localhost:{cfg['camera']['front_camera_port']}")
camera_socket.setsockopt_string(zmq.SUBSCRIBE, "")
logger.info("connect camera success")
# 初始化paddle推理器
# 初始化 paddle 推理器
predictor = Lane_model_infer()
logger.info("lane model load success")
# 启动lane_infer_server线程
# 启动 lane_infer_server 线程
mythread = threading.Thread(target=server_resp,
args=(cfg['server']['lane_infer_port'],),
daemon=True)
mythread.start()
while True:
camera_socket.send_string("")
message = camera_socket.recv()
np_array = np.frombuffer(message, dtype=np.uint8)
frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
frame = cv2.resize(frame,(320,240))
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
result = predictor.infer(frame)
with lock:
response['data'] = result
# print(result)
# cv2.circle(frame,(int(result[0]),int(result[1])),5,(0,255,0),-1)
# socket_server.send_pyobj(frame)
if cv2.waitKey(1) == 27:
break
mythread.join()
logger.info("lane infer server exit")

View File

@@ -5,7 +5,7 @@ import cv2
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect("tcp://localhost:5555")
socket.connect("tcp://localhost:5556")
socket.setsockopt_string(zmq.SUBSCRIBE, '')
while True:

View File

@@ -0,0 +1,46 @@
import toml
from loguru import logger
import logging
import zmq
from paddleocr import PaddleOCR
import cv2
logging.getLogger('paddleocr').setLevel(logging.CRITICAL)
if __name__ == "__main__":
cfg = toml.load('../cfg_infer_server.toml')
# 配置日志输出
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO")
# 连接摄像头
cap = cv2.VideoCapture(4)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
# 初始化 paddle 推理器
predictor = PaddleOCR(use_angle_cls=False, use_gpu=True)
logger.info("ocr model load success")
# 初始化 server
context = zmq.Context()
# 启动 server
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{cfg['server']['ocr_infer_port']}")
while True:
socket.recv_string("")
ret, frame = cap.read()
try:
if ret:
result = predictor.ocr(frame)
response = {'code': 0, 'data': result}
socket.send_pyobj(response)
else:
socket.send_pyobj({'code': -1, 'data': None})
except:
socket.send_pyobj({'code': -1, 'data': None})
if cv2.waitKey(1) == 27:
break
logger.info("ocr infer server exit")

View File

@@ -0,0 +1,51 @@
import paddle.inference as paddle_infer
import numpy as np
import cv2
class Person_model_infer:
def __init__(self, model_dir="./model", target_size=[640, 640]):
# 初始化 paddle 推理
self.model_dir = model_dir
self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams")
self.config.disable_glog_info()
self.config.enable_use_gpu(200, 0)
self.predictor = paddle_infer.create_predictor(self.config)
self.input_names = self.predictor.get_input_names()
self.input_handle = self.predictor.get_input_handle(self.input_names[0])
self.input_handle1 = self.predictor.get_input_handle(self.input_names[1])
self.output_names = self.predictor.get_output_names()
self.output_handle = self.predictor.get_output_handle(self.output_names[0])
self.target_size = target_size
origin_shape = (240, 320)
resize_h, resize_w = self.target_size
self.im_scale_y = resize_h / float(origin_shape[0])
self.im_scale_x = resize_w / float(origin_shape[1])
self.scale_info = np.array([[self.im_scale_y, self.im_scale_x]]).astype('float32')
def infer(self,src) -> np.ndarray:
image = self.preprocess(src)
self.input_handle.copy_from_cpu(image)
self.input_handle1.copy_from_cpu(self.scale_info)
self.predictor.run()
results = self.output_handle.copy_to_cpu()
return results
def preprocess(self, src):
# resize
# keep_ratio=0
img = cv2.resize(
src,
None,
None,
fx=self.im_scale_x,
fy=self.im_scale_y,
interpolation=2)
# Permute
img = img.astype(np.float32, copy=False)
img = img.transpose((2, 0, 1))
img = np.array((img, ))
return img
if __name__ == "__main__":
predictor = Person_model_infer()
# import time
# while True:
# time.sleep(1)
# print('123')

View File

@@ -0,0 +1,53 @@
import toml
from loguru import logger
import zmq
import numpy as np
import cv2
from infer import Person_model_infer
from visualize import visualize_box_mask
labels = [
"pedestrian"
]
if __name__ == "__main__":
cfg = toml.load('../cfg_infer_server.toml')
# Configure log output
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention=5, level="INFO")
# Initialize YOLO inference model
predictor = Person_model_infer()
logger.info("person yolo model load success")
context1 = zmq.Context()
camera1_socket = context1.socket(zmq.REQ)
camera1_socket.connect(f"tcp://localhost:{cfg['camera']['camera1_port']}")
logger.info("connect camera1 success")
context2 = zmq.Context()
socket = context2.socket(zmq.REP)
socket.bind("tcp://*:7778")
logger.info("bind server success")
while True:
message = socket.recv()
camera1_socket.send_string("")
message = camera1_socket.recv()
np_array = np.frombuffer(message, dtype=np.uint8)
frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
result = predictor.infer(frame)
img = visualize_box_mask(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),result,labels)
showim = np.array(img)
socket.send_pyobj(showim)
if cv2.waitKey(1) == 27:
break
camera1_socket.close()
socket.close()
context1.term()
context2.term()
logger.info("Interrupt received, stopping...")
logger.info("person yolo infer server exit")

View File

@@ -0,0 +1,649 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import cv2
import math
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def imagedraw_textsize_c(draw, text):
if int(PIL.__version__.split('.')[0]) < 10:
tw, th = draw.textsize(text)
else:
left, top, right, bottom = draw.textbbox((0, 0), text)
tw, th = right - left, bottom - top
return tw, th
def visualize_box_mask(im, results, labels, threshold=0.5):
"""
Args:
im (str/np.ndarray): path of image/np.ndarray read by cv2
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape:[N, im_h, im_w]
labels (list): labels:['class1', ..., 'classn']
threshold (float): Threshold of score.
Returns:
im (PIL.Image.Image): visualized image
"""
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
# if 'masks' in results and 'boxes' in results and len(results['boxes']) > 0:
# im = draw_mask(
# im, results['boxes'], results['masks'], labels, threshold=threshold)
# if 'boxes' in results and len(results['boxes']) > 0:
im = draw_box(im, results, labels, threshold=threshold)
# if 'segm' in results:
# im = draw_segm(
# im,
# results['segm'],
# results['label'],
# results['score'],
# labels,
# threshold=threshold)
return im
def get_color_map_list(num_classes):
"""
Args:
num_classes (int): number of class
Returns:
color_map (list): RGB color list
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def draw_mask(im, np_boxes, np_masks, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
np_masks (np.ndarray): shape:[N, im_h, im_w]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of mask
Returns:
im (PIL.Image.Image): visualized image
"""
color_list = get_color_map_list(len(labels))
w_ratio = 0.4
alpha = 0.7
im = np.array(im).astype('float32')
clsid2color = {}
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
np_masks = np_masks[expect_boxes, :, :]
im_h, im_w = im.shape[:2]
np_masks = np_masks[:, :im_h, :im_w]
for i in range(len(np_masks)):
clsid, score = int(np_boxes[i][0]), np_boxes[i][1]
mask = np_masks[i]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color_mask = clsid2color[clsid]
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
im[idx[0], idx[1], :] *= 1.0 - alpha
im[idx[0], idx[1], :] += alpha * color_mask
return Image.fromarray(im.astype('uint8'))
def draw_box(im, np_boxes, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of box
Returns:
im (PIL.Image.Image): visualized image
"""
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
clsid2color = {}
color_list = get_color_map_list(len(labels))
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for dt in np_boxes:
clsid, bbox, score = int(dt[0]), dt[2:], dt[1]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color = tuple(clsid2color[clsid])
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
# print('class_id:{:d}, confidence:{:.4f}, left_top:[{:.2f},{:.2f}],'
# 'right_bottom:[{:.2f},{:.2f}]'.format(
# int(clsid), score, xmin, ymin, xmax, ymax))
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=color)
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
# draw label
text = "{} {} {:.4f}".format(clsid, labels[clsid], score)
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return im
def draw_segm(im,
np_segms,
np_label,
np_score,
labels,
threshold=0.5,
alpha=0.7):
"""
Draw segmentation on image
"""
mask_color_id = 0
w_ratio = .4
color_list = get_color_map_list(len(labels))
im = np.array(im).astype('float32')
clsid2color = {}
np_segms = np_segms.astype(np.uint8)
for i in range(np_segms.shape[0]):
mask, score, clsid = np_segms[i], np_score[i], np_label[i]
if score < threshold:
continue
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color_mask = clsid2color[clsid]
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
idx0 = np.minimum(idx[0], im.shape[0] - 1)
idx1 = np.minimum(idx[1], im.shape[1] - 1)
im[idx0, idx1, :] *= 1.0 - alpha
im[idx0, idx1, :] += alpha * color_mask
sum_x = np.sum(mask, axis=0)
x = np.where(sum_x > 0.5)[0]
sum_y = np.sum(mask, axis=1)
y = np.where(sum_y > 0.5)[0]
x0, x1, y0, y1 = x[0], x[-1], y[0], y[-1]
cv2.rectangle(im, (x0, y0), (x1, y1),
tuple(color_mask.astype('int32').tolist()), 1)
bbox_text = '%s %.2f' % (labels[clsid], score)
t_size = cv2.getTextSize(bbox_text, 0, 0.3, thickness=1)[0]
cv2.rectangle(im, (x0, y0), (x0 + t_size[0], y0 - t_size[1] - 3),
tuple(color_mask.astype('int32').tolist()), -1)
cv2.putText(
im,
bbox_text, (x0, y0 - 2),
cv2.FONT_HERSHEY_SIMPLEX,
0.3, (0, 0, 0),
1,
lineType=cv2.LINE_AA)
return Image.fromarray(im.astype('uint8'))
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def visualize_pose(imgfile,
results,
visual_thresh=0.6,
save_name='pose.jpg',
save_dir='output',
returnimg=False,
ids=None):
try:
import matplotlib.pyplot as plt
import matplotlib
plt.switch_backend('agg')
except Exception as e:
print('Matplotlib not found, please install matplotlib.'
'for example: `pip install matplotlib`.')
raise e
skeletons, scores = results['keypoint']
skeletons = np.array(skeletons)
kpt_nums = 17
if len(skeletons) > 0:
kpt_nums = skeletons.shape[1]
if kpt_nums == 17: #plot coco keypoint
EDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),
(7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14),
(13, 15), (14, 16), (11, 12)]
else: #plot mpii keypoint
EDGES = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 6), (3, 6), (6, 7), (7, 8),
(8, 9), (10, 11), (11, 12), (13, 14), (14, 15), (8, 12),
(8, 13)]
NUM_EDGES = len(EDGES)
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
cmap = matplotlib.cm.get_cmap('hsv')
plt.figure()
img = cv2.imread(imgfile) if type(imgfile) == str else imgfile
color_set = results['colors'] if 'colors' in results else None
if 'bbox' in results and ids is None:
bboxs = results['bbox']
for j, rect in enumerate(bboxs):
xmin, ymin, xmax, ymax = rect
color = colors[0] if color_set is None else colors[color_set[j] %
len(colors)]
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)
canvas = img.copy()
for i in range(kpt_nums):
for j in range(len(skeletons)):
if skeletons[j][i, 2] < visual_thresh:
continue
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.circle(
canvas,
tuple(skeletons[j][i, 0:2].astype('int32')),
2,
color,
thickness=-1)
to_plot = cv2.addWeighted(img, 0.3, canvas, 0.7, 0)
fig = matplotlib.pyplot.gcf()
stickwidth = 2
for i in range(NUM_EDGES):
for j in range(len(skeletons)):
edge = EDGES[i]
if skeletons[j][edge[0], 2] < visual_thresh or skeletons[j][edge[
1], 2] < visual_thresh:
continue
cur_canvas = canvas.copy()
X = [skeletons[j][edge[0], 1], skeletons[j][edge[1], 1]]
Y = [skeletons[j][edge[0], 0], skeletons[j][edge[1], 0]]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)),
(int(length / 2), stickwidth),
int(angle), 0, 360, 1)
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.fillConvexPoly(cur_canvas, polygon, color)
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
if returnimg:
return canvas
save_name = os.path.join(
save_dir, os.path.splitext(os.path.basename(imgfile))[0] + '_vis.jpg')
plt.imsave(save_name, canvas[:, :, ::-1])
print("keypoint visualize image saved to: " + save_name)
plt.close()
def visualize_attr(im, results, boxes=None, is_mtmct=False):
if isinstance(im, str):
im = Image.open(im)
im = np.ascontiguousarray(np.copy(im))
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
else:
im = np.ascontiguousarray(np.copy(im))
im_h, im_w = im.shape[:2]
text_scale = max(0.5, im.shape[0] / 3000.)
text_thickness = 1
line_inter = im.shape[0] / 40.
for i, res in enumerate(results):
if boxes is None:
text_w = 3
text_h = 1
elif is_mtmct:
box = boxes[i] # multi camera, bbox shape is x,y, w,h
text_w = int(box[0]) + 3
text_h = int(box[1])
else:
box = boxes[i] # single camera, bbox shape is 0, 0, x,y, w,h
text_w = int(box[2]) + 3
text_h = int(box[3])
for text in res:
text_h += int(line_inter)
text_loc = (text_w, text_h)
cv2.putText(
im,
text,
text_loc,
cv2.FONT_ITALIC,
text_scale, (0, 255, 255),
thickness=text_thickness)
return im
def visualize_action(im,
mot_boxes,
action_visual_collector=None,
action_text="",
video_action_score=None,
video_action_text=""):
im = cv2.imread(im) if isinstance(im, str) else im
im_h, im_w = im.shape[:2]
text_scale = max(1, im.shape[1] / 400.)
text_thickness = 2
if action_visual_collector:
id_action_dict = {}
for collector, action_type in zip(action_visual_collector, action_text):
id_detected = collector.get_visualize_ids()
for pid in id_detected:
id_action_dict[pid] = id_action_dict.get(pid, [])
id_action_dict[pid].append(action_type)
for mot_box in mot_boxes:
# mot_box is a format with [mot_id, class, score, xmin, ymin, w, h]
if mot_box[0] in id_action_dict:
text_position = (int(mot_box[3] + mot_box[5] * 0.75),
int(mot_box[4] - 10))
display_text = ', '.join(id_action_dict[mot_box[0]])
cv2.putText(im, display_text, text_position,
cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), 2)
if video_action_score:
cv2.putText(
im,
video_action_text + ': %.2f' % video_action_score,
(int(im_w / 2), int(15 * text_scale) + 5),
cv2.FONT_ITALIC,
text_scale, (0, 0, 255),
thickness=text_thickness)
return im
def visualize_vehicleplate(im, results, boxes=None):
if isinstance(im, str):
im = Image.open(im)
im = np.ascontiguousarray(np.copy(im))
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
else:
im = np.ascontiguousarray(np.copy(im))
im_h, im_w = im.shape[:2]
text_scale = max(1.0, im.shape[0] / 400.)
text_thickness = 2
line_inter = im.shape[0] / 40.
for i, res in enumerate(results):
if boxes is None:
text_w = 3
text_h = 1
else:
box = boxes[i]
text = res
if text == "":
continue
text_w = int(box[2])
text_h = int(box[5] + box[3])
text_loc = (text_w, text_h)
cv2.putText(
im,
"LP: " + text,
text_loc,
cv2.FONT_ITALIC,
text_scale, (0, 255, 255),
thickness=text_thickness)
return im
def draw_press_box_lanes(im, np_boxes, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of box
Returns:
im (PIL.Image.Image): visualized image
"""
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
clsid2color = {}
color_list = get_color_map_list(len(labels))
if np_boxes.shape[1] == 7:
np_boxes = np_boxes[:, 1:]
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for dt in np_boxes:
clsid, bbox, score = int(dt[0]), dt[2:], dt[1]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color = tuple(clsid2color[clsid])
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=(0, 0, 255))
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
# draw label
text = "{}".format(labels[clsid])
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmin + 1, ymax - th), (xmin + tw + 1, ymax)], fill=color)
draw.text((xmin + 1, ymax - th), text, fill=(0, 0, 255))
return im
def visualize_vehiclepress(im, results, threshold=0.5):
results = np.array(results)
labels = ['violation']
im = draw_press_box_lanes(im, results, labels, threshold=threshold)
return im
def visualize_lane(im, lanes):
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
if len(lanes) > 0:
for lane in lanes:
draw.line(
[(lane[0], lane[1]), (lane[2], lane[3])],
width=draw_thickness,
fill=(0, 0, 255))
return im
def visualize_vehicle_retrograde(im, mot_res, vehicle_retrograde_res):
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
lane = vehicle_retrograde_res['fence_line']
if lane is not None:
draw.line(
[(lane[0], lane[1]), (lane[2], lane[3])],
width=draw_thickness,
fill=(0, 0, 0))
mot_id = vehicle_retrograde_res['output']
if mot_id is None or len(mot_id) == 0:
return im
if mot_res is None:
return im
np_boxes = mot_res['boxes']
if np_boxes is not None:
for dt in np_boxes:
if dt[0] not in mot_id:
continue
bbox = dt[3:]
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=(0, 255, 0))
# draw label
text = "retrograde"
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmax + 1, ymin - th), (xmax + tw + 1, ymin)],
fill=(0, 255, 0))
draw.text((xmax + 1, ymin - th), text, fill=(0, 255, 0))
return im
COLORS = [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(128, 255, 0),
(255, 128, 0),
(128, 0, 255),
(255, 0, 128),
(0, 128, 255),
(0, 255, 128),
(128, 255, 255),
(255, 128, 255),
(255, 255, 128),
(60, 180, 0),
(180, 60, 0),
(0, 60, 180),
(0, 180, 60),
(60, 0, 180),
(180, 0, 60),
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(128, 255, 0),
(255, 128, 0),
(128, 0, 255),
]
def imshow_lanes(img, lanes, show=False, out_file=None, width=4):
lanes_xys = []
for _, lane in enumerate(lanes):
xys = []
for x, y in lane:
if x <= 0 or y <= 0:
continue
x, y = int(x), int(y)
xys.append((x, y))
lanes_xys.append(xys)
lanes_xys.sort(key=lambda xys: xys[0][0] if len(xys) > 0 else 0)
for idx, xys in enumerate(lanes_xys):
for i in range(1, len(xys)):
cv2.line(img, xys[i - 1], xys[i], COLORS[idx], thickness=width)
if show:
cv2.imshow('view', img)
cv2.waitKey(0)
if out_file:
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
cv2.imwrite(out_file, img)

View File

@@ -0,0 +1,65 @@
import paddle.inference as paddle_infer
import numpy as np
import cv2
class Yolo_model_infer:
def __init__(self, model_dir="./yolo_model", target_size=[640, 640]):
# 初始化 paddle 推理
self.model_dir = model_dir
self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams")
self.config.enable_memory_optim()
self.config.switch_ir_optim()
self.config.enable_use_gpu(1000, 0)
self.predictor = paddle_infer.create_predictor(self.config)
self.input_names = self.predictor.get_input_names()
self.input_handle = self.predictor.get_input_handle(self.input_names[0])
self.input_handle1 = self.predictor.get_input_handle(self.input_names[1])
self.output_names = self.predictor.get_output_names()
self.output_handle = self.predictor.get_output_handle(self.output_names[0])
self.target_size = target_size
self.fill_value = [114.0, 114.0, 114.0]
def infer(self,src) -> np.ndarray:
image, scale_info = self.preprocess(src)
self.input_handle.copy_from_cpu(image)
self.input_handle1.copy_from_cpu(scale_info)
self.predictor.run()
results = self.output_handle.copy_to_cpu()
return results
def preprocess(self,src):
# resize
origin_shape = src.shape[:2]
# keep_ratio==1
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
img = cv2.resize(
src,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=1)
# pad
# pad = Pad((640, 640))
# img = pad(img)
im_h, im_w = img.shape[:2]
h, w = self.target_size
canvas = np.ones((h, w, 3), dtype=np.float32)
canvas *= np.array(self.fill_value, dtype=np.float32)
canvas[0:im_h, 0:im_w, :] = img.astype(np.float32)
img = canvas
# Permute
img = img.transpose((2, 0, 1)).copy()
img = np.array((img, )).astype('float32')
return img, np.array([im_scale_y, im_scale_x]).astype('float32')

53
yolo_server/infer_new.py Normal file
View File

@@ -0,0 +1,53 @@
import paddle.inference as paddle_infer
import numpy as np
import cv2
class Yolo_model_infer:
def __init__(self, model_dir="./ppyoloe_plus_crn_t_auxhead_320_300e_coco", target_size=[320, 320]):
# 初始化 paddle 推理
self.model_dir = model_dir
self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams")
self.config.disable_glog_info()
self.config.enable_use_gpu(500, 0)
self.config.enable_memory_optim()
self.config.switch_ir_optim()
self.config.switch_use_feed_fetch_ops(False)
self.predictor = paddle_infer.create_predictor(self.config)
self.input_names = self.predictor.get_input_names()
self.input_handle = self.predictor.get_input_handle(self.input_names[0])
self.input_handle1 = self.predictor.get_input_handle(self.input_names[1])
self.output_names = self.predictor.get_output_names()
self.output_handle = self.predictor.get_output_handle(self.output_names[0])
self.target_size = target_size
origin_shape = (240, 320)
resize_h, resize_w = self.target_size
self.im_scale_y = resize_h / float(origin_shape[0])
self.im_scale_x = resize_w / float(origin_shape[1])
self.scale_info = np.array([[self.im_scale_y, self.im_scale_x]]).astype('float32')
def infer(self,src) -> np.ndarray:
image = self.preprocess(src)
self.input_handle.copy_from_cpu(image)
self.input_handle1.copy_from_cpu(self.scale_info)
self.predictor.run()
results = self.output_handle.copy_to_cpu()
return results
def preprocess(self,src):
# resize
# keep_ratio=0
img = cv2.resize(
src,
None,
None,
fx=self.im_scale_x,
fy=self.im_scale_y,
interpolation=2)
# NormalizeImage
img = img.astype(np.float32, copy=False)
scale = 1.0 / 255.0
img *= scale
# Permute
img = img.transpose((2, 0, 1))
img = np.array((img, ))
# .astype('float32')
return img

649
yolo_server/visualize.py Normal file
View File

@@ -0,0 +1,649 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import cv2
import math
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def imagedraw_textsize_c(draw, text):
if int(PIL.__version__.split('.')[0]) < 10:
tw, th = draw.textsize(text)
else:
left, top, right, bottom = draw.textbbox((0, 0), text)
tw, th = right - left, bottom - top
return tw, th
def visualize_box_mask(im, results, labels, threshold=0.5):
"""
Args:
im (str/np.ndarray): path of image/np.ndarray read by cv2
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape:[N, im_h, im_w]
labels (list): labels:['class1', ..., 'classn']
threshold (float): Threshold of score.
Returns:
im (PIL.Image.Image): visualized image
"""
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
# if 'masks' in results and 'boxes' in results and len(results['boxes']) > 0:
# im = draw_mask(
# im, results['boxes'], results['masks'], labels, threshold=threshold)
# if 'boxes' in results and len(results['boxes']) > 0:
im = draw_box(im, results, labels, threshold=threshold)
# if 'segm' in results:
# im = draw_segm(
# im,
# results['segm'],
# results['label'],
# results['score'],
# labels,
# threshold=threshold)
return im
def get_color_map_list(num_classes):
"""
Args:
num_classes (int): number of class
Returns:
color_map (list): RGB color list
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def draw_mask(im, np_boxes, np_masks, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
np_masks (np.ndarray): shape:[N, im_h, im_w]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of mask
Returns:
im (PIL.Image.Image): visualized image
"""
color_list = get_color_map_list(len(labels))
w_ratio = 0.4
alpha = 0.7
im = np.array(im).astype('float32')
clsid2color = {}
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
np_masks = np_masks[expect_boxes, :, :]
im_h, im_w = im.shape[:2]
np_masks = np_masks[:, :im_h, :im_w]
for i in range(len(np_masks)):
clsid, score = int(np_boxes[i][0]), np_boxes[i][1]
mask = np_masks[i]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color_mask = clsid2color[clsid]
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
im[idx[0], idx[1], :] *= 1.0 - alpha
im[idx[0], idx[1], :] += alpha * color_mask
return Image.fromarray(im.astype('uint8'))
def draw_box(im, np_boxes, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of box
Returns:
im (PIL.Image.Image): visualized image
"""
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
clsid2color = {}
color_list = get_color_map_list(len(labels))
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for dt in np_boxes:
clsid, bbox, score = int(dt[0]), dt[2:], dt[1]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color = tuple(clsid2color[clsid])
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
# print('class_id:{:d}, confidence:{:.4f}, left_top:[{:.2f},{:.2f}],'
# 'right_bottom:[{:.2f},{:.2f}]'.format(
# int(clsid), score, xmin, ymin, xmax, ymax))
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=color)
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
# draw label
text = "{} {} {:.4f}".format(clsid, labels[clsid], score)
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return im
def draw_segm(im,
np_segms,
np_label,
np_score,
labels,
threshold=0.5,
alpha=0.7):
"""
Draw segmentation on image
"""
mask_color_id = 0
w_ratio = .4
color_list = get_color_map_list(len(labels))
im = np.array(im).astype('float32')
clsid2color = {}
np_segms = np_segms.astype(np.uint8)
for i in range(np_segms.shape[0]):
mask, score, clsid = np_segms[i], np_score[i], np_label[i]
if score < threshold:
continue
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color_mask = clsid2color[clsid]
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
idx0 = np.minimum(idx[0], im.shape[0] - 1)
idx1 = np.minimum(idx[1], im.shape[1] - 1)
im[idx0, idx1, :] *= 1.0 - alpha
im[idx0, idx1, :] += alpha * color_mask
sum_x = np.sum(mask, axis=0)
x = np.where(sum_x > 0.5)[0]
sum_y = np.sum(mask, axis=1)
y = np.where(sum_y > 0.5)[0]
x0, x1, y0, y1 = x[0], x[-1], y[0], y[-1]
cv2.rectangle(im, (x0, y0), (x1, y1),
tuple(color_mask.astype('int32').tolist()), 1)
bbox_text = '%s %.2f' % (labels[clsid], score)
t_size = cv2.getTextSize(bbox_text, 0, 0.3, thickness=1)[0]
cv2.rectangle(im, (x0, y0), (x0 + t_size[0], y0 - t_size[1] - 3),
tuple(color_mask.astype('int32').tolist()), -1)
cv2.putText(
im,
bbox_text, (x0, y0 - 2),
cv2.FONT_HERSHEY_SIMPLEX,
0.3, (0, 0, 0),
1,
lineType=cv2.LINE_AA)
return Image.fromarray(im.astype('uint8'))
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def visualize_pose(imgfile,
results,
visual_thresh=0.6,
save_name='pose.jpg',
save_dir='output',
returnimg=False,
ids=None):
try:
import matplotlib.pyplot as plt
import matplotlib
plt.switch_backend('agg')
except Exception as e:
print('Matplotlib not found, please install matplotlib.'
'for example: `pip install matplotlib`.')
raise e
skeletons, scores = results['keypoint']
skeletons = np.array(skeletons)
kpt_nums = 17
if len(skeletons) > 0:
kpt_nums = skeletons.shape[1]
if kpt_nums == 17: #plot coco keypoint
EDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),
(7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14),
(13, 15), (14, 16), (11, 12)]
else: #plot mpii keypoint
EDGES = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 6), (3, 6), (6, 7), (7, 8),
(8, 9), (10, 11), (11, 12), (13, 14), (14, 15), (8, 12),
(8, 13)]
NUM_EDGES = len(EDGES)
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
cmap = matplotlib.cm.get_cmap('hsv')
plt.figure()
img = cv2.imread(imgfile) if type(imgfile) == str else imgfile
color_set = results['colors'] if 'colors' in results else None
if 'bbox' in results and ids is None:
bboxs = results['bbox']
for j, rect in enumerate(bboxs):
xmin, ymin, xmax, ymax = rect
color = colors[0] if color_set is None else colors[color_set[j] %
len(colors)]
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)
canvas = img.copy()
for i in range(kpt_nums):
for j in range(len(skeletons)):
if skeletons[j][i, 2] < visual_thresh:
continue
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.circle(
canvas,
tuple(skeletons[j][i, 0:2].astype('int32')),
2,
color,
thickness=-1)
to_plot = cv2.addWeighted(img, 0.3, canvas, 0.7, 0)
fig = matplotlib.pyplot.gcf()
stickwidth = 2
for i in range(NUM_EDGES):
for j in range(len(skeletons)):
edge = EDGES[i]
if skeletons[j][edge[0], 2] < visual_thresh or skeletons[j][edge[
1], 2] < visual_thresh:
continue
cur_canvas = canvas.copy()
X = [skeletons[j][edge[0], 1], skeletons[j][edge[1], 1]]
Y = [skeletons[j][edge[0], 0], skeletons[j][edge[1], 0]]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)),
(int(length / 2), stickwidth),
int(angle), 0, 360, 1)
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.fillConvexPoly(cur_canvas, polygon, color)
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
if returnimg:
return canvas
save_name = os.path.join(
save_dir, os.path.splitext(os.path.basename(imgfile))[0] + '_vis.jpg')
plt.imsave(save_name, canvas[:, :, ::-1])
print("keypoint visualize image saved to: " + save_name)
plt.close()
def visualize_attr(im, results, boxes=None, is_mtmct=False):
if isinstance(im, str):
im = Image.open(im)
im = np.ascontiguousarray(np.copy(im))
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
else:
im = np.ascontiguousarray(np.copy(im))
im_h, im_w = im.shape[:2]
text_scale = max(0.5, im.shape[0] / 3000.)
text_thickness = 1
line_inter = im.shape[0] / 40.
for i, res in enumerate(results):
if boxes is None:
text_w = 3
text_h = 1
elif is_mtmct:
box = boxes[i] # multi camera, bbox shape is x,y, w,h
text_w = int(box[0]) + 3
text_h = int(box[1])
else:
box = boxes[i] # single camera, bbox shape is 0, 0, x,y, w,h
text_w = int(box[2]) + 3
text_h = int(box[3])
for text in res:
text_h += int(line_inter)
text_loc = (text_w, text_h)
cv2.putText(
im,
text,
text_loc,
cv2.FONT_ITALIC,
text_scale, (0, 255, 255),
thickness=text_thickness)
return im
def visualize_action(im,
mot_boxes,
action_visual_collector=None,
action_text="",
video_action_score=None,
video_action_text=""):
im = cv2.imread(im) if isinstance(im, str) else im
im_h, im_w = im.shape[:2]
text_scale = max(1, im.shape[1] / 400.)
text_thickness = 2
if action_visual_collector:
id_action_dict = {}
for collector, action_type in zip(action_visual_collector, action_text):
id_detected = collector.get_visualize_ids()
for pid in id_detected:
id_action_dict[pid] = id_action_dict.get(pid, [])
id_action_dict[pid].append(action_type)
for mot_box in mot_boxes:
# mot_box is a format with [mot_id, class, score, xmin, ymin, w, h]
if mot_box[0] in id_action_dict:
text_position = (int(mot_box[3] + mot_box[5] * 0.75),
int(mot_box[4] - 10))
display_text = ', '.join(id_action_dict[mot_box[0]])
cv2.putText(im, display_text, text_position,
cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), 2)
if video_action_score:
cv2.putText(
im,
video_action_text + ': %.2f' % video_action_score,
(int(im_w / 2), int(15 * text_scale) + 5),
cv2.FONT_ITALIC,
text_scale, (0, 0, 255),
thickness=text_thickness)
return im
def visualize_vehicleplate(im, results, boxes=None):
if isinstance(im, str):
im = Image.open(im)
im = np.ascontiguousarray(np.copy(im))
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
else:
im = np.ascontiguousarray(np.copy(im))
im_h, im_w = im.shape[:2]
text_scale = max(1.0, im.shape[0] / 400.)
text_thickness = 2
line_inter = im.shape[0] / 40.
for i, res in enumerate(results):
if boxes is None:
text_w = 3
text_h = 1
else:
box = boxes[i]
text = res
if text == "":
continue
text_w = int(box[2])
text_h = int(box[5] + box[3])
text_loc = (text_w, text_h)
cv2.putText(
im,
"LP: " + text,
text_loc,
cv2.FONT_ITALIC,
text_scale, (0, 255, 255),
thickness=text_thickness)
return im
def draw_press_box_lanes(im, np_boxes, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of box
Returns:
im (PIL.Image.Image): visualized image
"""
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
clsid2color = {}
color_list = get_color_map_list(len(labels))
if np_boxes.shape[1] == 7:
np_boxes = np_boxes[:, 1:]
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for dt in np_boxes:
clsid, bbox, score = int(dt[0]), dt[2:], dt[1]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color = tuple(clsid2color[clsid])
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=(0, 0, 255))
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
# draw label
text = "{}".format(labels[clsid])
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmin + 1, ymax - th), (xmin + tw + 1, ymax)], fill=color)
draw.text((xmin + 1, ymax - th), text, fill=(0, 0, 255))
return im
def visualize_vehiclepress(im, results, threshold=0.5):
results = np.array(results)
labels = ['violation']
im = draw_press_box_lanes(im, results, labels, threshold=threshold)
return im
def visualize_lane(im, lanes):
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
if len(lanes) > 0:
for lane in lanes:
draw.line(
[(lane[0], lane[1]), (lane[2], lane[3])],
width=draw_thickness,
fill=(0, 0, 255))
return im
def visualize_vehicle_retrograde(im, mot_res, vehicle_retrograde_res):
if isinstance(im, str):
im = Image.open(im).convert('RGB')
elif isinstance(im, np.ndarray):
im = Image.fromarray(im)
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
lane = vehicle_retrograde_res['fence_line']
if lane is not None:
draw.line(
[(lane[0], lane[1]), (lane[2], lane[3])],
width=draw_thickness,
fill=(0, 0, 0))
mot_id = vehicle_retrograde_res['output']
if mot_id is None or len(mot_id) == 0:
return im
if mot_res is None:
return im
np_boxes = mot_res['boxes']
if np_boxes is not None:
for dt in np_boxes:
if dt[0] not in mot_id:
continue
bbox = dt[3:]
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=(0, 255, 0))
# draw label
text = "retrograde"
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmax + 1, ymin - th), (xmax + tw + 1, ymin)],
fill=(0, 255, 0))
draw.text((xmax + 1, ymin - th), text, fill=(0, 255, 0))
return im
COLORS = [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(128, 255, 0),
(255, 128, 0),
(128, 0, 255),
(255, 0, 128),
(0, 128, 255),
(0, 255, 128),
(128, 255, 255),
(255, 128, 255),
(255, 255, 128),
(60, 180, 0),
(180, 60, 0),
(0, 60, 180),
(0, 180, 60),
(60, 0, 180),
(180, 0, 60),
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(128, 255, 0),
(255, 128, 0),
(128, 0, 255),
]
def imshow_lanes(img, lanes, show=False, out_file=None, width=4):
lanes_xys = []
for _, lane in enumerate(lanes):
xys = []
for x, y in lane:
if x <= 0 or y <= 0:
continue
x, y = int(x), int(y)
xys.append((x, y))
lanes_xys.append(xys)
lanes_xys.sort(key=lambda xys: xys[0][0] if len(xys) > 0 else 0)
for idx, xys in enumerate(lanes_xys):
for i in range(1, len(xys)):
cv2.line(img, xys[i - 1], xys[i], COLORS[idx], thickness=width)
if show:
cv2.imshow('view', img)
cv2.waitKey(0)
if out_file:
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
cv2.imwrite(out_file, img)

View File

@@ -0,0 +1,151 @@
import toml
import threading
from loguru import logger
import zmq
import numpy as np
import cv2
import time
# Custom imports
from infer_new import Yolo_model_infer
from visualize import visualize_box_mask
# Initialize locks
lock1 = threading.Lock()
lock2 = threading.Lock()
lock3 = threading.Lock()
# Global variables
src_camera_id = 1
response = {'code': 0, 'data': []}
frame = None
start = False
exit_event = threading.Event()
context2 = zmq.Context()
socket_server = context2.socket(zmq.PUB)
socket_server.setsockopt(zmq.SNDHWM,10)
socket_server.bind("tcp://*:7777")
labels = [
"tplatform", "tower", "sign", "shelter", "hospital", "basket", "base",
"Yball", "Spiller", "Rmark", "Rblock", "Rball", "Mpiller",
"Lpiller", "Lmark", "Bblock", "Bball"
]
# Handle server response data
def server_resp(yolo_infer_port):
logger.info("yolo server thread init success")
global response
global src_camera_id
context = zmq.Context()
# Start server
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{yolo_infer_port}")
logger.info("yolo infer server init success")
while not exit_event.is_set():
try:
message = socket.recv_string()
# Send character 1 and 2 to switch camera, empty string requests inference data
if message != '':
with lock1:
logger.error(message)
src_camera_id = int(message)
logger.info("switch camera")
socket.send_pyobj(response)
else:
with lock2:
socket.send_pyobj(response)
response['data'] = np.array([])
except zmq.Again:
time.sleep(0.01)
socket.close()
context.term()
# Handle camera data
def camera_resp(camera1_port, camera2_port):
global frame
global src_camera_id
global start
context = zmq.Context()
camera1_socket = context.socket(zmq.REQ)
camera1_socket.connect(f"tcp://localhost:{camera1_port}")
logger.info("connect camera1 success")
context1 = zmq.Context()
camera2_socket = context1.socket(zmq.REQ)
camera2_socket.connect(f"tcp://localhost:{camera2_port}")
logger.info("connect camera2 success")
while not exit_event.is_set():
with lock1:
try:
if src_camera_id == 1:
camera1_socket.send_string("")
message = camera1_socket.recv()
else:
camera2_socket.send_string("")
message = camera2_socket.recv()
np_array = np.frombuffer(message, dtype=np.uint8)
with lock3:
frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
start = True
except:
time.sleep(0.01)
camera1_socket.close()
camera2_socket.close()
context.term()
context1.term()
if __name__ == "__main__":
cfg = toml.load('../cfg_infer_server.toml')
# Configure log output
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention=5, level="INFO")
# Initialize YOLO inference model
predictor = Yolo_model_infer()
logger.info("yolo model load success")
# Start threads
mythread1 = threading.Thread(target=server_resp, args=(cfg['server']['yolo_infer_port'],), daemon=True)
mythread2 = threading.Thread(target=camera_resp, args=(cfg['camera']['camera1_port'], cfg['camera']['front_camera_port']), daemon=True)
mythread1.start()
mythread2.start()
while not exit_event.is_set():
with lock3:
if start:
result = predictor.infer(frame)
img = visualize_box_mask(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),result,labels)
showim = np.array(img)
socket_server.send_pyobj(showim)
with lock2:
response['data'] = result
# time.sleep(0.01)
if cv2.waitKey(1) == 27:
break
logger.info("Interrupt received, stopping...")
exit_event.set()
mythread1.join()
mythread2.join()
logger.info("yolo infer server exit")
# try:
# while not exit_event.is_set():
# with lock3:
# if start:
# result = predictor.infer(frame)
# img = visualize_box_mask(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),result,labels)
# showim = np.array(img)
# socket_server.send_pyobj(showim)
# with lock2:
# response['data'] = result
# time.sleep(0.01)
# except KeyboardInterrupt:
# logger.info("Interrupt received, stopping...")
# exit_event.set()
# mythread1.join()
# mythread2.join()
# logger.info("yolo infer server exit")

View File

@@ -0,0 +1,101 @@
import toml
import threading
from loguru import logger
import zmq
from infer_new import Yolo_model_infer
import numpy as np
import cv2
import time
lock1 = threading.Lock()
lock2 = threading.Lock()
lock3 = threading.Lock()
src_camera_id = 1
response = {'code': 0, 'data': []}
frame = None
start = False
# 处理 server 响应数据
def server_resp(yolo_infer_port):
logger.info("yolo server thread init success")
global response
global src_camera_id
context = zmq.Context()
# 启动 server
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{yolo_infer_port}")
logger.info("yolo infer server init success")
while True:
message = socket.recv_string()
# 发送字符 1 和 2 切换摄像头 空字符表示请求推理数据
if message != '':
with lock1:
src_camera_id = int(message)
socket.send_pyobj({'code': 0, 'data': []})
else:
with lock2:
socket.send_pyobj(response)
# 处理摄像头数据
def camera_resp(camera1_port, camera2_port):
global frame
global src_camera_id
global start
context = zmq.Context()
camera1_socket = context.socket(zmq.SUB)
camera1_socket.connect(f"tcp://localhost:{camera1_port}")
camera1_socket.setsockopt_string(zmq.SUBSCRIBE, "")
logger.info("connect camera1 success")
context1 = zmq.Context()
camera2_socket = context1.socket(zmq.SUB)
camera2_socket.connect(f"tcp://localhost:{camera2_port}")
camera2_socket.setsockopt_string(zmq.SUBSCRIBE, "")
logger.info("connect camera2 success")
while True:
logger.info('111')
with lock1:
if src_camera_id == 1:
message = camera1_socket.recv()
else:
message = camera2_socket.recv()
# logger.info('111')
np_array = np.frombuffer(message, dtype=np.uint8)
with lock3:
frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
start = True
if __name__ == "__main__":
cfg = toml.load('../cfg_infer_server.toml')
# 配置日志输出
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO")
# 初始化 paddle 推理器
predictor = Yolo_model_infer()
logger.info("yolo model load success")
# 启动 线程 1
mythread1 = threading.Thread(target=server_resp,
args=(cfg['server']['yolo_infer_port'],),
daemon=True)
mythread2 = threading.Thread(target=camera_resp,
args=(
cfg['camera']['camera1_port'],
cfg['camera']['camera2_port']
),
daemon=True)
mythread1.start()
mythread2.start()
while True:
with lock3:
if start:
result = predictor.infer(frame)
with lock2:
response['data'] = result
mythread1.join()
mythread2.join()
logger.info("yolo infer server exit")

View File

@@ -0,0 +1,84 @@
# from infer import Yolo_model_infer
# import cv2
# infer = Yolo_model_infer()
# image = cv2.imread("ball_0094.png")
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# results = infer.infer(image)
# expect_boxes = (results[:, 1] > 0.5) & (results[:, 0] > -1)
# np_boxes = results[expect_boxes, :]
# print(np_boxes)
from infer_new import Yolo_model_infer
import cv2
from visualize import visualize_box_mask
import zmq
import numpy as np
from loguru import logger
import time
# infer = Yolo_model_infer()
# image = cv2.imread("20240108161722.jpg")
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# results = infer.infer(image)
# print(results)
# expect_boxes = (results[:, 1] > 0.5) & (results[:, 0] > -1)
# np_boxes = results[expect_boxes, :]
# print(np_boxes)
# context = zmq.Context()
# camera1_socket = context.socket(zmq.SUB)
# hwm = 5
# camera1_socket.setsockopt(zmq.RCVHWM, hwm)
# camera1_socket.connect("tcp://localhost:5556")
# camera1_socket.setsockopt_string(zmq.SUBSCRIBE, "")
# camera1_socket.set_hwm(1)
context1 = zmq.Context()
socket_server = context1.socket(zmq.PUB)
socket_server.bind("tcp://*:7777")
labels = [
"tower", "sign", "shelter", "hospital", "basket", "base",
"Yball", "Spiller", "Rmark", "Rblock", "Rball", "Mpiller",
"Lpiller", "Lmark", "Bblock", "Bball"
]
infer = Yolo_model_infer()
cap = cv2.VideoCapture(2)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,240)
ret = True
while True:
# message = camera1_socket.recv()
# np_array = np.frombuffer(message, dtype=np.uint8)
# frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
ret, frame = cap.read()
if ret:
results = infer.infer(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# logger.info("111")
img = visualize_box_mask(frame,results,labels)
showim = np.array(img)
# cv2.imshow("Received", showim)
_, encode_frame = cv2.imencode(".jpg", showim)
socket_server.send(encode_frame.tobytes())
# if cv2.waitKey(1) == 27:
# break
# image = cv2.imread("20240525_170248.jpg")
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# results = infer.infer(image)
# # expect_boxes = (results[:, 1] > 0.5) & (results[:, 0] > -1)
# # np_boxes = results[expect_boxes, :]
# # print(np_boxes)
# # img = visualize_box_mask(image,results,labels)
# # img.save('20240525_170248_box.jpg', quality=95)