您现在的位置是:首页 > 技术教程 正文

Yolov5(v5.0) + pyqt5界面设计

admin 阅读: 2024-03-21
后台-插件-广告管理-内容页头部广告(手机)

1.下载安装pyqt5工具包以及配置ui界面开发环境

  1. pip install PyQt5
  2. pip install PyQt5-tools

2.点击File->Settings->External Tools进行工具添加,依次进行Qt Designer、PyUIC环境配置.

 2.1 添加QtDesigner

 Qt Designer 是通过拖拽的方式放置控件,并实时查看控件效果进行快速UI设计

位置内容
name可以随便命名,只要便于记忆就可以,本次采取通用命名:Qt Designer
Programdesigner.exe路径,一般在python中.\Library\bin\designer.exe
Arguments固定格式,直接复制也可:$FileDir$\$FileName$
Working directory固定格式,直接复制也可:$FileDir$

2.2 添加PyUIC

 PyUIC主要是把Qt Designer生成的.ui文件换成.py文件

位置内容
name可以随便命名,只要便于记忆就可以,本次采取通用命名:PyUiC
Programpython.exe路径,一般在python安装根目录中
Arguments固定格式,直接复制也可:-m PyQt5.uic.pyuic $FileName$ -o $FileNameWithoutExtension$.py
Working directory固定格式,直接复制也可:$FileDir$

3. QtDesigner建立图形化窗口界面 

3.1 在根目录下新建UI文件夹进行UI文件的专门存储,点击Tools->External Tools->Qt Designer进行图形界面创建.

 3.2 创建一个Main Window窗口

3.3 完成基本界面开发后,保存其为Detect.ui,放置在UI文件夹下,利用PyUic工具将其转化为Detect.py文件。

转换完成后,进行相应的槽函数的建立与修改,此处建议直接看我后面给出的demo。

4. demo

使用时只需将parser.add_argument中的'--weights'设为响应权重即可。

  1. # -*- coding: utf-8 -*-
  2. # Form implementation generated from reading ui file '.\project.ui'
  3. #
  4. # Created by: PyQt5 UI code generator 5.9.2
  5. #
  6. # WARNING! All changes made in this file will be lost!
  7. import sys
  8. import cv2
  9. import argparse
  10. import random
  11. import torch
  12. import numpy as np
  13. import torch.backends.cudnn as cudnn
  14. from PyQt5 import QtCore, QtGui, QtWidgets
  15. from utils.torch_utils import select_device
  16. from models.experimental import attempt_load
  17. from utils.general import check_img_size, non_max_suppression, scale_coords
  18. from utils.datasets import letterbox
  19. from utils.plots import plot_one_box
  20. class Ui_MainWindow(QtWidgets.QMainWindow):
  21. def __init__(self, parent=None):
  22. super(Ui_MainWindow, self).__init__(parent)
  23. self.timer_video = QtCore.QTimer()
  24. self.setupUi(self)
  25. self.init_logo()
  26. self.init_slots()
  27. self.cap = cv2.VideoCapture()
  28. self.out = None
  29. # self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
  30. parser = argparse.ArgumentParser()
  31. parser.add_argument('--weights', nargs='+', type=str,
  32. default='weights/best.pt', help='model.pt path(s)')
  33. # file/folder, 0 for webcam
  34. parser.add_argument('--source', type=str,
  35. default='data/images', help='source')
  36. parser.add_argument('--img-size', type=int,
  37. default=640, help='inference size (pixels)')
  38. parser.add_argument('--conf-thres', type=float,
  39. default=0.25, help='object confidence threshold')
  40. parser.add_argument('--iou-thres', type=float,
  41. default=0.45, help='IOU threshold for NMS')
  42. parser.add_argument('--device', default='',
  43. help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  44. parser.add_argument(
  45. '--view-img', action='store_true', help='display results')
  46. parser.add_argument('--save-txt', action='store_true',
  47. help='save results to *.txt')
  48. parser.add_argument('--save-conf', action='store_true',
  49. help='save confidences in --save-txt labels')
  50. parser.add_argument('--nosave', action='store_true',
  51. help='do not save images/videos')
  52. parser.add_argument('--classes', nargs='+', type=int,
  53. help='filter by class: --class 0, or --class 0 2 3')
  54. parser.add_argument(
  55. '--agnostic-nms', action='store_true', help='class-agnostic NMS')
  56. parser.add_argument('--augment', action='store_true',
  57. help='augmented inference')
  58. parser.add_argument('--update', action='store_true',
  59. help='update all models')
  60. parser.add_argument('--project', default='runs/detect',
  61. help='save results to project/name')
  62. parser.add_argument('--name', default='exp',
  63. help='save results to project/name')
  64. parser.add_argument('--exist-ok', action='store_true',
  65. help='existing project/name ok, do not increment')
  66. self.opt = parser.parse_args()
  67. print(self.opt)
  68. source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size
  69. self.device = select_device(self.opt.device)
  70. self.half = self.device.type != 'cpu' # half precision only supported on CUDA
  71. cudnn.benchmark = True
  72. # Load model
  73. self.model = attempt_load(
  74. weights, map_location=self.device) # load FP32 model
  75. stride = int(self.model.stride.max()) # model stride
  76. self.imgsz = check_img_size(imgsz, s=stride) # check img_size
  77. if self.half:
  78. self.model.half() # to FP16
  79. # Get names and colors
  80. self.names = self.model.module.names if hasattr(
  81. self.model, 'module') else self.model.names
  82. self.colors = [[random.randint(0, 255)
  83. for _ in range(3)] for _ in self.names]
  84. def setupUi(self, MainWindow):
  85. MainWindow.setObjectName("MainWindow")
  86. MainWindow.resize(800, 600)
  87. self.centralwidget = QtWidgets.QWidget(MainWindow)
  88. self.centralwidget.setObjectName("centralwidget")
  89. self.pushButton = QtWidgets.QPushButton(self.centralwidget)
  90. self.pushButton.setGeometry(QtCore.QRect(20, 130, 112, 34))
  91. self.pushButton.setObjectName("pushButton")
  92. self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
  93. self.pushButton_2.setGeometry(QtCore.QRect(20, 220, 112, 34))
  94. self.pushButton_2.setObjectName("pushButton_2")
  95. self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
  96. self.pushButton_3.setGeometry(QtCore.QRect(20, 300, 112, 34))
  97. self.pushButton_3.setObjectName("pushButton_3")
  98. self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
  99. self.groupBox.setGeometry(QtCore.QRect(160, 90, 611, 411))
  100. self.groupBox.setObjectName("groupBox")
  101. self.label = QtWidgets.QLabel(self.groupBox)
  102. self.label.setGeometry(QtCore.QRect(10, 40, 561, 331))
  103. self.label.setObjectName("label")
  104. self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
  105. self.textEdit.setGeometry(QtCore.QRect(150, 10, 471, 51))
  106. self.textEdit.setObjectName("textEdit")
  107. MainWindow.setCentralWidget(self.centralwidget)
  108. self.menubar = QtWidgets.QMenuBar(MainWindow)
  109. self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 30))
  110. self.menubar.setObjectName("menubar")
  111. MainWindow.setMenuBar(self.menubar)
  112. self.statusbar = QtWidgets.QStatusBar(MainWindow)
  113. self.statusbar.setObjectName("statusbar")
  114. MainWindow.setStatusBar(self.statusbar)
  115. self.retranslateUi(MainWindow)
  116. QtCore.QMetaObject.connectSlotsByName(MainWindow)
  117. def retranslateUi(self, MainWindow):
  118. _translate = QtCore.QCoreApplication.translate
  119. MainWindow.setWindowTitle(_translate("MainWindow", "演示系统"))
  120. self.pushButton.setText(_translate("MainWindow", "图片检测"))
  121. self.pushButton_2.setText(_translate("MainWindow", "摄像头检测"))
  122. self.pushButton_3.setText(_translate("MainWindow", "视频检测"))
  123. self.groupBox.setTitle(_translate("MainWindow", "检测结果"))
  124. self.label.setText(_translate("MainWindow", "TextLabel"))
  125. self.textEdit.setHtml(_translate("MainWindow",
  126. "\n"
  127. "\n"
  128. "

    演示系统

    "
    ))
  129. def init_slots(self):
  130. self.pushButton.clicked.connect(self.button_image_open)
  131. self.pushButton_3.clicked.connect(self.button_video_open)
  132. self.pushButton_2.clicked.connect(self.button_camera_open)
  133. self.timer_video.timeout.connect(self.show_video_frame)
  134. def init_logo(self):
  135. pix = QtGui.QPixmap('wechat.jpg')
  136. self.label.setScaledContents(True)
  137. self.label.setPixmap(pix)
  138. def button_image_open(self):
  139. print('button_image_open')
  140. name_list = []
  141. img_name, _ = QtWidgets.QFileDialog.getOpenFileName(
  142. self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
  143. if not img_name:
  144. return
  145. img = cv2.imread(img_name)
  146. print(img_name)
  147. showimg = img
  148. with torch.no_grad():
  149. img = letterbox(img, new_shape=self.opt.img_size)[0]
  150. # Convert
  151. # BGR to RGB, to 3x416x416
  152. img = img[:, :, ::-1].transpose(2, 0, 1)
  153. img = np.ascontiguousarray(img)
  154. img = torch.from_numpy(img).to(self.device)
  155. img = img.half() if self.half else img.float() # uint8 to fp16/32
  156. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  157. if img.ndimension() == 3:
  158. img = img.unsqueeze(0)
  159. # Inference
  160. pred = self.model(img, augment=self.opt.augment)[0]
  161. # Apply NMS
  162. pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
  163. agnostic=self.opt.agnostic_nms)
  164. print(pred)
  165. # Process detections
  166. for i, det in enumerate(pred):
  167. if det is not None and len(det):
  168. # Rescale boxes from img_size to im0 size
  169. det[:, :4] = scale_coords(
  170. img.shape[2:], det[:, :4], showimg.shape).round()
  171. for *xyxy, conf, cls in reversed(det):
  172. label = '%s %.2f' % (self.names[int(cls)], conf)
  173. name_list.append(self.names[int(cls)])
  174. plot_one_box(xyxy, showimg, label=label,
  175. color=self.colors[int(cls)], line_thickness=2)
  176. cv2.imwrite('prediction.jpg', showimg)
  177. self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
  178. self.result = cv2.resize(
  179. self.result, (640, 480), interpolation=cv2.INTER_AREA)
  180. self.QtImg = QtGui.QImage(
  181. self.result.data, self.result.shape[1], self.result.shape[0], QtGui.QImage.Format_RGB32)
  182. self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
  183. def button_video_open(self):
  184. video_name, _ = QtWidgets.QFileDialog.getOpenFileName(
  185. self, "打开视频", "", "*.mp4;;*.avi;;All Files(*)")
  186. if not video_name:
  187. return
  188. flag = self.cap.open(video_name)
  189. if flag == False:
  190. QtWidgets.QMessageBox.warning(
  191. self, u"Warning", u"打开视频失败", buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
  192. else:
  193. self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(
  194. *'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
  195. self.timer_video.start(30)
  196. self.pushButton_3.setDisabled(True)
  197. self.pushButton.setDisabled(True)
  198. self.pushButton_2.setDisabled(True)
  199. def button_camera_open(self):
  200. if not self.timer_video.isActive():
  201. # 默认使用第一个本地camera
  202. flag = self.cap.open(0)
  203. if flag == False:
  204. QtWidgets.QMessageBox.warning(
  205. self, u"Warning", u"打开摄像头失败", buttons=QtWidgets.QMessageBox.Ok,
  206. defaultButton=QtWidgets.QMessageBox.Ok)
  207. else:
  208. self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(
  209. *'MJPG'), 20, (int(self.cap.get(3)), int(self.cap.get(4))))
  210. self.timer_video.start(30)
  211. self.pushButton_3.setDisabled(True)
  212. self.pushButton.setDisabled(True)
  213. self.pushButton_2.setText(u"关闭摄像头")
  214. else:
  215. self.timer_video.stop()
  216. self.cap.release()
  217. self.out.release()
  218. self.label.clear()
  219. self.init_logo()
  220. self.pushButton_3.setDisabled(False)
  221. self.pushButton.setDisabled(False)
  222. self.pushButton_2.setText(u"摄像头检测")
  223. def show_video_frame(self):
  224. name_list = []
  225. flag, img = self.cap.read()
  226. if img is not None:
  227. showimg = img
  228. with torch.no_grad():
  229. img = letterbox(img, new_shape=self.opt.img_size)[0]
  230. # Convert
  231. # BGR to RGB, to 3x416x416
  232. img = img[:, :, ::-1].transpose(2, 0, 1)
  233. img = np.ascontiguousarray(img)
  234. img = torch.from_numpy(img).to(self.device)
  235. img = img.half() if self.half else img.float() # uint8 to fp16/32
  236. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  237. if img.ndimension() == 3:
  238. img = img.unsqueeze(0)
  239. # Inference
  240. pred = self.model(img, augment=self.opt.augment)[0]
  241. # Apply NMS
  242. pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
  243. agnostic=self.opt.agnostic_nms)
  244. # Process detections
  245. for i, det in enumerate(pred): # detections per image
  246. if det is not None and len(det):
  247. # Rescale boxes from img_size to im0 size
  248. det[:, :4] = scale_coords(
  249. img.shape[2:], det[:, :4], showimg.shape).round()
  250. # Write results
  251. for *xyxy, conf, cls in reversed(det):
  252. label = '%s %.2f' % (self.names[int(cls)], conf)
  253. name_list.append(self.names[int(cls)])
  254. print(label)
  255. plot_one_box(
  256. xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
  257. self.out.write(showimg)
  258. show = cv2.resize(showimg, (640, 480))
  259. self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
  260. showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
  261. QtGui.QImage.Format_RGB888)
  262. self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
  263. else:
  264. self.timer_video.stop()
  265. self.cap.release()
  266. self.out.release()
  267. self.label.clear()
  268. self.pushButton_3.setDisabled(False)
  269. self.pushButton.setDisabled(False)
  270. self.pushButton_2.setDisabled(False)
  271. self.init_logo()
  272. if __name__ == '__main__':
  273. app = QtWidgets.QApplication(sys.argv)
  274. ui = Ui_MainWindow()
  275. ui.show()
  276. sys.exit(app.exec_())

5.添加背景图片

将demo中最后一段代码改为如下,其中background-image为背景图片地址。

  1. if __name__ == '__main__':
  2. stylesheet = """
  3. Ui_MainWindow {
  4. background-image: url("4K.jpg");
  5. background-repeat: no-repeat;
  6. background-position: center;
  7. }
  8. """
  9. app = QtWidgets.QApplication(sys.argv)
  10. app.setStyleSheet(stylesheet)
  11. ui = Ui_MainWindow()
  12. ui.show()
  13. sys.exit(app.exec_())

 

6.reference

http://t.csdn.cn/ZVtSKicon-default.png?t=M85Bhttp://t.csdn.cn/ZVtSKPyQt5系列教程(三)利用QtDesigner设计UI界面 - 迷途小书童的Note迷途小书童的Note (xugaoxiang.com)icon-default.png?t=M85Bhttps://xugaoxiang.com/2019/12/04/pyqt5-3-qtdesigner/ 

标签:
声明

1.本站遵循行业规范,任何转载的稿件都会明确标注作者和来源;2.本站的原创文章,请转载时务必注明文章作者和来源,不尊重原创的行为我们将追究责任;3.作者投稿可能会经我们编辑修改或补充。

在线投稿:投稿 站长QQ:1888636

后台-插件-广告管理-内容页尾部广告(手机)
关注我们

扫一扫关注我们,了解最新精彩内容

搜索
排行榜