[英]How to change string in setTitle for a generated text from outside class
當我嘗試更新self.graphWidget.setTitle(phrase, ...)
中的字符串變量時,該變量不會更新。 我正在使用pyaudio和PyQT從麥克風錄制時繪制實時波形圖,我想要做的不是在終端中打印,而是希望語音到文本短語在識別完成后顯示在 MainWindow 中
'''GUI'''
import struct
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication
import sys
'''Graph'''
import pyqtgraph as pg
from PyQt5 import QtCore
import numpy as np
'''Audio Processing'''
import pyaudio
import wave
import speech_recognition as sr
import multiprocessing as mlti
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 1024 * 2
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK,
)
frames = []
seconds = 6
phrase = "..."
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
pg.setConfigOptions(antialias=True)
self.traces = dict()
'''Display'''
self.graphWidget = pg.PlotWidget()
self.setCentralWidget(self.graphWidget)
self.setWindowTitle("Waveform")
self.setGeometry(55, 115, 970, 449)
'''Data'''
self.x = np.arange(0, 2 * CHUNK, 2)
self.f = np.linspace(0, RATE // 2, CHUNK // 2)
'''Animate'''
self.timer = QtCore.QTimer()
self.timer.setInterval(50)
self.timer.timeout.connect(self.update)
self.timer.start()
def set_plotdata(self, name, data_x, data_y):
if name in self.traces:
self.traces[name].setData(data_x, data_y)
else:
if name == 'waveform':
self.traces[name] = self.graphWidget.plot(pen='c', width=3)
self.graphWidget.setYRange(0, 255, padding=0)
self.graphWidget.setXRange(0, 2 * CHUNK, padding=0.005)
def update(self):
self.wf_data = stream.read(CHUNK)
self.wf_data = struct.unpack(str(2 * CHUNK) + 'B', self.wf_data)
self.wf_data = np.array(self.wf_data, dtype='b')[::2] + 128
self.set_plotdata(name='waveform', data_x=self.x, data_y=self.wf_data)
self.graphWidget.setTitle(phrase, color="w", size="30pt") #### Change it
def main():
app = QtWidgets.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
def Record():
for i in range(0, int(RATE/CHUNK*seconds)):
data = stream.read(CHUNK)
frames.append(data)
print(i)
def Refine_Stream():
stream.stop_stream()
stream.close()
p.terminate()
obj = wave.open("output.wav", "wb")
obj.setnchannels(CHANNELS)
obj.setsampwidth(p.get_sample_size(FORMAT))
obj.setframerate(RATE)
obj.writeframes(b"".join(frames))
obj.close()
def Speech_Recog():
print("Function Started")
r = sr.Recognizer()
#usando o microfone
with sr.AudioFile("output.wav") as source:
r.adjust_for_ambient_noise(source, duration=1)
#Armazena o que foi dito numa variavel
audio = r.listen(source)
phrase = ""
try:
#Into recog
phrase = r.recognize_google(audio,language='pt-BR') #### To it
print(phrase)
#Couldn't make it
except sr.UnknownValueError:
phrase = "Not understood"
print(phrase)
if __name__ == '__main__':
p1 = mlti.Process(target=main)
p1.start()
Record()
Refine_Stream()
Speech_Recog()
希望它是足夠干凈的代碼
我學習了很多,我想出了如何做我想做的事
我正在使用信號和插槽系統作為my_signal = QtCore.pyqtSignal(str)
將我的字符串變量作為信號發送到setTitle
一段時間后使用QTimer
激活
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
'''Display'''
self.graphWidget = pg.PlotWidget()
self.setCentralWidget(self.graphWidget)
self.setWindowTitle("Waveform")
self.setGeometry(55, 115, 970, 449)
self.graphWidget.setTitle(phrase, color="w", size="30pt")
'''Animate Title'''
self.picktimer = QTimer()
self.picktimer.setInterval(7000) ###seconds needs to be more than seconds recording
self.picktimer.timeout.connect(self.Transitioning)
self.picktimer.start()
def Transitioning(self):
self.example_class = Speech_Recognition(self)
self.example_class.my_signal.connect(self.Update_Title)
self.example_class.Speech_Recog() ###init SpeechRecog
def Update_Title(self, my_title_phrase):
self.graphWidget.setTitle(my_title_phrase, color="w", size="30pt")
QtWidgets.qApp.processEvents()
def main():
app = QtWidgets.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
def Record():
for i in range(0, int(RATE/CHUNK*seconds)):
data = stream.read(CHUNK)
frames.append(data)
print(i)
def Refine_Stream():
stream.stop_stream()
stream.close()
p.terminate()
obj = wave.open("output.wav", "wb")
obj.setnchannels(CHANNELS)
obj.setsampwidth(p.get_sample_size(FORMAT))
obj.setframerate(RATE)
obj.writeframes(b"".join(frames))
obj.close()
class Speech_Recognition(QtWidgets.QWidget):
my_signal = QtCore.pyqtSignal(str)
def Speech_Recog(self):
r = sr.Recognizer()
recorded_phrase = ""
#usando o microfone
with sr.AudioFile("output.wav") as source:
r.adjust_for_ambient_noise(source, duration=1)
#Armazena o que foi dito numa variavel
audio = r.listen(source)
try:
recorded_phrase = r.recognize_google(audio,language='pt-BR')
except sr.UnknownValueError:
recorded_phrase = "Not understood"
self.my_signal.emit(recorded_phrase)
if __name__ == '__main__':
p2 = mlti.Process(target=main)
p2.start()
Record()
Refine_Stream()
仍然需要干凈的代碼
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.