[英]How to spawn child thread from main thread while main thread keeps executing in Python
我正在制作一个应用程序来将从麦克风录制的音频转换为文本。 录音的长度可能很长,比如 3 小时,所以我猜最好将其转换为持续时间短的波形文件,比如一分钟左右,然后生成一个子线程,在其中执行音频到文本的操作,而主线程可以开始录制下一分钟。 音频到文本的操作比录制部分快得多,因此时间不会成为问题。
这是我认为它应该如何工作的流程图。
我正在使用pyaudio
录制音频。 它的代码:
import pyaudio
import wave
import time
def read_audio(stream):
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = 10
filename = 'record.wav'
frames = [] # Initialize array to store frames
# Store data in chunks for 3 seconds
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
# Stop and close the stream
stream.stop_stream()
stream.close()
p = pyaudio.PyAudio() # Create an interface to PortAudio
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100
stream = p.open(format=sample_format,channels=channels,rate=fs,
frames_per_buffer=chunk,input=True)
read_audio(stream)
p.terminate() # Terminate the PortAudio interface
对于语音识别,使用 Google 的 API speech_recognition
。 它的代码:
import speech_recognition as sr
def convert():
sound = "record.wav"
r = sr.Recognizer()
with sr.AudioFile(sound) as source:
r.adjust_for_ambient_noise(source)
print("Converting Audio To Text and saving to file..... ")
audio = r.listen(source)
try:
value = r.recognize_google(audio) ##### API call to google for speech recognition
if str is bytes:
result = u"{}".format(value).encode("utf-8")
else:
result = "{}".format(value)
with open("test.txt","a") as f:
f.write(result)
print("Done !\n\n")
except sr.UnknownValueError:
print("")
except sr.RequestError as e:
print("{0}".format(e))
except KeyboardInterrupt:
pass
convert()
由于 GIL,Python 从来都不是真正的多线程,但这在您的情况下可能并不重要,因为您正在使用 api 调用来为您进行语音识别。
所以你可以试试这个来启动一个线程来做转换
from threading import Thread
t = Thread(target=convert)
t.start()
在您尝试转换下一分钟之前,您可能会尝试加入最后一个线程以确保它已完成
t.join()
您可能还可以使用 asyncio 库
虽然这可能有点矫枉过正,但我可能会使用多处理库。 在您的情况下,您可能有一个不断录制和保存新声音文件的侦听器工作进程,以及一个不断寻找新文件并转换它们的转换工作进程。
如果需要,这将允许您编写更强大的系统。 例如,如果您失去了互联网连接并且在几分钟内无法通过谷歌 api 转换您的声音文件,那么录音机工作人员将继续保存声音文件而不关心在互联网连接恢复时会得到处理的声音文件。
无论如何,这里有一个你可以使用的转换工作进程的小例子。
import multiprocessing as mp
import os
from pathlib import Path
from time import sleep
class ConversionWorker:
def __init__(self, sound_file_directory_path: str, text_save_filepath: str):
self.sound_directory_path = Path(sound_file_directory_path)
self.text_filepath = Path(text_save_filepath)
def run(self):
while True:
# find and convert all wav files in the target directory
filepaths = self.sound_directory_path.glob('*.wav')
for path in filepaths:
# convert from path
# save to self.text_filepath
convert()
# we can delete the sound file after converting it
os.remove(path)
# sleep for a bit since we are only saving files once a minute or so
sleep(5)
def main():
conversion_worker = ConversionWorker(sound_file_directory_path='path/to/sounds', text_save_filepath='path/to/text')
p = mp.Process(target=conversion_worker.run)
p.start()
# do the recording and saving for as long as you want
p.terminate()
我解决这个问题的方法是受到 Jeremy Bare 的启发。 我决定发布一个关于我最终是如何做到的答案,所以如果有人想做类似的事情,那么他们可以获得完整的代码。
import speech_recognition as sr
import pyaudio
import wave
import time
import threading
import os
def read_audio(stream, filename):
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = 10 # Number of seconds to record at once
filename = filename
frames = [] # Initialize array to store frames
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
# Stop and close the stream
stream.stop_stream()
stream.close()
def convert(i):
if i >= 0:
sound = 'record' + str(i) +'.wav'
r = sr.Recognizer()
with sr.AudioFile(sound) as source:
r.adjust_for_ambient_noise(source)
print("Converting Audio To Text and saving to file..... ")
audio = r.listen(source)
try:
value = r.recognize_google(audio) ##### API call to google for speech recognition
os.remove(sound)
if str is bytes:
result = u"{}".format(value).encode("utf-8")
else:
result = "{}".format(value)
with open("test.txt","a") as f:
f.write(result)
f.write(" ")
f.close()
except sr.UnknownValueError:
print("")
except sr.RequestError as e:
print("{0}".format(e))
except KeyboardInterrupt:
pass
p = pyaudio.PyAudio() # Create an interface to PortAudio
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100
def save_audios(i):
stream = p.open(format=sample_format,channels=channels,rate=fs,
frames_per_buffer=chunk,input=True)
filename = 'record'+str(i)+'.wav'
read_audio(stream, filename)
for i in range(30//10): # Number of total seconds to record/ Number of seconds per recording
t1 = threading.Thread(target=save_audios, args=[i])
x = i-1
t2 = threading.Thread(target=convert, args=[x]) # send one earlier than being recorded
t1.start()
t2.start()
t1.join()
t2.join()
if i==2:
flag = True
if flag:
convert(i)
p.terminate()
read_audio()
和convert()
函数与问题中发布的几乎相同,但是,它们现在将 integer 作为参数。 这对于确定他们将在该特定线程中处理的文件号很有用。
由于必须先进行录制,因此语音识别 function 传递的值比提供给录制 function 的 integer 的值小 1。 这确保它仅适用于录制的音频。
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.