[英]How do I run a video in a PyQt container?
在 PyQt 上的 QVideoWidget 容器中,您需要從計算機啟動一個視頻,通過 TensorFlow (openCV, cv2) 搜索對象。 問題是當按下按鈕時,視頻只顯示一幀而沒有其他內容。 可能是什么問題呢? 采用 PyCharm、Python 3.7 制造。
from PyQt5 import QtCore, QtGui, QtWidgets, uiс
import os
import cv2
import numpy as np
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QLabel, QVBoxLayout
from PyQt5.QtCore import QThread, pyqtSignal, Qt
from PyQt5.QtGui import QImage, QPixmap
class ThreadOpenCV(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, source):
super().__init__()
def run(self):
# MODEL_NAME = 'inference_graph'
VIDEO_NAME = '20201024161726.mp4'
#
# # Grab path to current working directory
CWD_PATH = os.getcwd()
PATH_TO_VIDEO = os.path.join(CWD_PATH, VIDEO_NAME)
cap = cv2.VideoCapture(PATH_TO_VIDEO)
while True:
ret, frame = cap.read()
if ret:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_expanded = np.expand_dims(frame_rgb, axis=0)
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
class Widget(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('fire_detection.ui', self)
self.show()
self.label_video = QLabel()
layout = QVBoxLayout()
layout.addWidget(self.label_video)
self.widget.setLayout(layout)
self.thread = ThreadOpenCV('20201024161726.mp4')
self.thread.changePixmap.connect(self.setImage)
self.btn1.clicked.connect(self.playVideo)
def playVideo(self):
self.thread.start()
def setImage(self, image):
self.label_video.setPixmap(QPixmap.fromImage(image))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
mw = Widget()
mw.show()
sys.exit(app.exec_())
所有問題都是因為你有錯誤的縮進 - 你在while
-loop 中運行cap.release()
所以它在第一幀之后釋放 stream 。
while True:
ret, frame = cap.read()
if ret:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_expanded = np.expand_dims(frame_rgb, axis=0)
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
# --- after `while` ---
cap.release()
您不會在cv2
中創建 window ,因此您不會cv2.destroyAllWindows()
。 並且cv2.waitKey(1)
也將無用,因為系統僅在顯示 window 並且此 window 處於活動狀態(聚焦)時才會向cv2
發送鍵/鼠標事件。
編輯:
完整的工作代碼。 它使用網絡攝像頭。
它不需要UI
文件。
它有開始和停止流媒體的按鈕,
它還有用於切換的按鈕:RGB <-> GRAYSCALE、NORMAL <-> BLURED。
import os
import sys
import numpy as np
import cv2
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import QLabel, QVBoxLayout, QPushButton, QWidget
from PyQt5.QtGui import QImage, QPixmap
class ThreadOpenCV(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self, source):
super().__init__()
self.source = source
self.running = True
self.grayscale = False
self.blur = False
def run(self):
print('start')
cap = cv2.VideoCapture(self.source)
self.running = True
while self.running:
ret, frame = cap.read()
if ret:
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.blur:
frame = cv2.blur(frame, (15, 15))
h, w, ch = frame.shape
bytes_per_line = ch * w # PEP8: `lower_case_names` for variables
image = QImage(frame.data, w, h, bytes_per_line, QImage.Format_RGB888)
image = image.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(image)
cap.release()
print('stop')
def stop(self):
self.running = False
class Widget(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
# MODEL_NAME = 'inference_graph'
VIDEO_NAME = '20201024161726.mp4'
CWD_PATH = os.getcwd()
PATH_TO_VIDEO = os.path.join(CWD_PATH, VIDEO_NAME)
# webcam
PATH_TO_VIDEO = 0
self.thread = ThreadOpenCV(PATH_TO_VIDEO)
self.thread.changePixmap.connect(self.setImage)
layout = QVBoxLayout()
self.label_video = QLabel()
layout.addWidget(self.label_video)
self.btn1 = QPushButton("PLAY")
self.btn1.clicked.connect(self.playVideo)
layout.addWidget(self.btn1)
self.btn_stop = QPushButton("STOP")
self.btn_stop.clicked.connect(self.stopVideo)
layout.addWidget(self.btn_stop)
self.btn_gray = QPushButton("RGB <-> GRAYSCALE")
self.btn_gray.clicked.connect(self.grayVideo)
layout.addWidget(self.btn_gray)
self.btn_blur = QPushButton("NORMAL <-> BLURED")
self.btn_blur.clicked.connect(self.blurVideo)
layout.addWidget(self.btn_blur)
self.widget = QWidget()
self.widget.setLayout(layout)
self.setCentralWidget(self.widget)
def playVideo(self):
self.thread.start()
def stopVideo(self):
self.thread.running = False
def grayVideo(self):
self.thread.grayscale = not self.thread.grayscale
def blurVideo(self):
self.thread.blur = not self.thread.blur
def setImage(self, image):
self.label_video.setPixmap(QPixmap.fromImage(image))
if __name__ == '__main__':
app = QtWidgets.QApplication([])
mw = Widget()
mw.show()
app.exec()
RGB,正常:
灰度,模糊:
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.