[英]How to execute if else statement in for loop only once in while loop
早上好,我是python語言的初學者,我想問一個關於python代碼的問題。 Fyi,目前我正在研究帶語音的人臉識別。 目前我在調用get_frame()函數時遇到的問題。 talk.tts(“您的名字” + name,lang)代碼反復執行且不停。 我的問題是,當我在app.py中調用此函數時,我將僅執行一次,因此不會重復發出聲音。 在下面我分享我的代碼,如果您不理解該代碼,請告訴我,我將盡力解釋,也許可以添加更多詳細信息代碼。 希望有人可以幫助謝謝。
app.py
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
camera.py
class VideoCamera:
def __init__(self,app):
self.known_encoding_faces = aface.known_encoding_faces
self.user_id = aface.face_user_keys
self.faces = []
self.test = []
self.video_capture = cv2.VideoCapture(0)
self.face_user_keys = {}
self.name_face()
def get_frame(self):
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
success, frame = self.video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
flag = False
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame,number_of_times_to_upsample=2)
#print(face_locations)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#print(face_encodings)
if len(face_encodings) > 0:
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)[0]
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_encoding_faces, face_encodings, tolerance=0.6)
#print(matches)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = self.faces[first_match_index]['name']
face_names.append(name)
#print(face_names)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
#Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
# description = ', '.join(name)
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# tts = gTTS(name, lang='en')
# tts.save('tts.mp3')
# tts = AudioSegment.from_mp3("tts.mp3")
# subprocess.call(["ffplay", "-nodisp", "-autoexit", "tts.mp3"])
if (val == 9):
speak.tts("your name"+name,lang)
break
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def __del__(self):
self.video_capture.release()
最好的方法似乎是在循環外調用get_frame()
。 如果在調用gen(camera)
函數時只想調用一次get_frame()
,則不應將調用置於循環中,因為循環將重復執行其指令。
def gen(camera):
frame = camera.get_frame()
while True:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.