[英]Python error while running opencv (Blinking detection)
我使用Visual Studio 2019,(Python3.6 / 64bit)
这是一个使用 opencv 和 dlib 检测眨眼和张嘴的代码。
Traceback (most recent call last):
File "C:\Users\이시우\source\repos\planners facedetect\test1.py", line 132, in <module>
mouths = get_mouth_pen_ratio( mouth_points, landmarks)
NameError: name 'landmarks' is not defined
[ WARN:0] global D:\bld\libopencv_1632857399304\work\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
转码时出现上述错误。
起初,它运行良好,但突然出现此错误。 有什么问题?
for face in faces:
landmarks = predictor(gray, face)
我一边执行语句一边找问题,发现上面的代码跳过了for语句中的语句。
以下是我编写的代码的全文。
在while语句中,我们通过if、elif语句测量了睁眼和闭眼时的眼睛比例,并通过中间值写入检测眨眼。
import cv2
import dlib
from math import hypot
import timeit
import winsound as sd
def beepsound():
fr = 2000
du = 1000
sd.Beep(fr, du)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("./shape_predictor_68_face_landmarks.dat")
font = cv2.FONT_HERSHEY_SIMPLEX
mouth_points = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67]
r_eye_points = [42, 43, 44, 45, 46, 47]
l_eye_points = [36, 37, 38, 39, 40, 41]
count_mouth_open = 0
def midpoint(p1, p2):
return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)
def get_mouth_pen_ratio(mouth_points, facial_landmarks):
left_point = (facial_landmarks.part( mouth_points[12]).x, facial_landmarks.part(mouth_points[12]).y)
right_point = (facial_landmarks.part( mouth_points[16]).x, facial_landmarks.part(mouth_points[16]).y)
center_top = midpoint(facial_landmarks.part( mouth_points[13]), facial_landmarks.part(mouth_points[14]))
center_bottom = midpoint(facial_landmarks.part( mouth_points[19]), facial_landmarks.part(mouth_points[18]))
hor_line = cv2.line(image, left_point, right_point, (0, 255, 0), 2)
ver_line = cv2.line(image, center_top, center_bottom, (0, 255, 0), 2)
hor_line_lenght = hypot( (left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
ver_line_lenght = hypot( (center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))
if ver_line_lenght != 0:
ratio = hor_line_lenght / ver_line_lenght
else:
ratio = 60
return ratio
def get_blinking_ratio(eye_points, facial_landmarks):
left_point = (facial_landmarks.part( eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)
right_point = (facial_landmarks.part( eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)
center_top = midpoint(facial_landmarks.part( eye_points[1]), facial_landmarks.part(eye_points[2]))
center_bottom = midpoint(facial_landmarks.part( eye_points[5]), facial_landmarks.part(eye_points[4]))
hor_line = cv2.line(image, left_point, right_point, (0, 255, 0), 2)
ver_line = cv2.line(image, center_top, center_bottom, (0, 255, 0), 2)
hor_line_lenght = hypot( (left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
ver_line_lenght = hypot( (center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))
ratio = ver_line_lenght / hor_line_lenght
return ratio
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
frame = 0
sum =0
open_average_ear = 0
close_average_ear = 0
step_count = 0
sleep_frame = 0
while True :
if step_count == 0:
print("3초간 평소 눈의 크기를 측정합니다\n")
while True:
ready = input("편안하게 눈을 떠주시고 준비가 되셨으면 r을 입력해주세요: ")
if ready == 'r':
step_count += 1
break
else:
print("잘못된 입력 값입니다. 다시 입력해주세요\n")
elif step_count == 2:
print("3초간 감은 눈의 크기를 측정합니다\n")
print("부저소리가 날때까지 눈을 편안하게 감아주세요\n")
while True:
ready = input("준비가 되셨으면 r을 입력해주세요: ")
if ready == 'r':
step_count += 1
break
else:
print("잘못된 입력 값입니다. 다시 입력해주세요\n")
elif step_count == 4:
print("s를 입력하면 졸음 인식 시스템이 시작됩니다.")
while True:
ready = input("준비가 되셨으면 s을 입력해주세요: ")
if ready == 's':
break
else:
print("잘못된 입력 값입니다. 다시 입력해주세요\n")
print("q를 누르면 시스템이 종료됩니다.")
step_count += 1
_, image = capture.read()
# convert frame to gray
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face in faces:
landmarks = predictor(gray, face)
mouths = get_mouth_pen_ratio( mouth_points, landmarks)
if mouths <= 5.0:
count_mouth_open += 1
left_eye_ratio = get_blinking_ratio( l_eye_points, landmarks)
right_eye_ratio = get_blinking_ratio( r_eye_points, landmarks)
blinking_ratio = (left_eye_ratio + right_eye_ratio) / 2
if frame < 100:
frame = frame + 1
sum = sum + blinking_ratio
elif frame == 100:
open_average_ear = sum / frame
print("ear = %f" %open_average_ear)
sum = 0
step_count += 1
frame +=1
continue
elif frame > 100 and frame < 200:
frame = frame + 1
sum = sum + blinking_ratio
elif frame == 200:
close_average_ear = sum / (frame-100)
print("ear = %f" %close_average_ear)
step_count += 1
frame += 1
beepsound()
continue
if close_average_ear != 0:
cv2.putText(image, "Mouth open: " + str(count_mouth_open), (50, 50), font, 2, (255, 0, 0))
print("Mouth open: " + str(count_mouth_open))
if blinking_ratio <= (open_average_ear + close_average_ear)/2:
cv2.putText(image, "blinking", (50, 50), font, 2, (255, 0, 0))
print("blink")
sleep_frame += 1
if sleep_frame > 2:
frame += 1
if frame - 201 == 15:
if sleep_frame > 13:
print("졸음운전 판단")
beepsound()
sleep_frame = 0
frame = 201
pass
else:
sleep_frame = 0
frame = 201
cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
想象一下,如果面部检测器没有找到任何东西会发生什么:
faces = detector(gray)
for face in faces:
landmarks = predictor(gray, face)
mouths = get_mouth_pen_ratio( mouth_points, landmarks)
那么您的landmarks
确实没有在这里定义,因此存在错误。
现在,这真的取决于当没有检测到人脸时你想要做什么。 例如跳到下一帧? 像这样的东西?
faces = detector(gray)
if not faces:
continue
for face in faces:
landmarks = predictor(gray, face)
if not landmarks:
continue
mouths = get_mouth_pen_ratio( mouth_points, landmarks)
我也相信你的代码中的逻辑是错误的。 如果你有更多的面孔,你只保留最后一张的地标,只检测最后一张脸上的嘴巴。 怎么样:
faces = detector(gray)
for face in faces:
landmarks = predictor(gray, face)
if landmarks:
mouths = get_mouth_pen_ratio( mouth_points, landmarks)
# do something with mouths...
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.