![](/img/trans.png)
[英]Get timestamps of censored words from Azure Speech to Text Services using Python
[英]How to get Word Level Timestamps using Azure Speech to Text and the Python SDK?
在我在 GitHub 上找到的示例的幫助下,我的代碼目前能夠讀取音頻文件並使用 Azure Speech to Text 進行轉錄。 但是,我需要在轉錄中包含所有單詞的時間戳。 根據文檔,此功能是在 1.5.0 版中添加的,可通過方法 request_word_level_timestamps() 訪問。 但即使我打電話給它,我也得到和以前一樣的回應。 我無法從文檔中弄清楚如何使用它。 有誰知道它是如何工作的?
我使用的是 Python SDK 1.5.1 版。
import azure.cognitiveservices.speech as speechsdk
import time
from allennlp.predictors.predictor import Predictor
import json
inputPath = "(inputlocation)"
outputPath = "(outputlocation)"
# Creates an instance of a speech config with specified subscription key and service region.
# Replace with your own subscription key and service region (e.g., "westus").
speech_key, service_region = "apikey", "region"
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
speech_config.request_word_level_timestamps()
speech_config.output_format=speechsdk.OutputFormat.Detailed
#print("VALUE: " + speech_config.get_property(property_id=speechsdk.PropertyId.SpeechServic eResponse_RequestWordLevelTimestamps))
filename = input("Enter filename: ")
print(speech_config)
try:
audio_config = speechsdk.audio.AudioConfig(filename= inputPath + filename)
# Creates a recognizer with the given settings
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
def start():
done = False
#output = ""
fileOpened = open(outputPath+ filename[0: len(filename) - 4] + "_MS_recognized.txt", "w+")
fileOpened.truncate(0)
fileOpened.close()
def stop_callback(evt):
print("Closing on {}".format(evt))
speech_recognizer.stop_continuous_recognition()
nonlocal done
done = True
def add_to_res(evt):
#nonlocal output
#print("Recognized: {}".format(evt.result.text))
#output = output + evt.result.text + "\n"
fileOpened = open( outputPath + filename[0: len(filename) - 4] + "_MS_recognized.txt", "a")
fileOpened.write(evt.result.text + "\n")
fileOpened.close()
#print(output)
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: print('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(lambda evt: print('RECOGNIZED: {}'.format(evt)))
speech_recognizer.recognized.connect(add_to_res)
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_callback)
speech_recognizer.canceled.connect(stop_callback)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
# </SpeechContinuousRecognitionWithFile>
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. The task returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
start()
except Exception as e:
print("File does not exist")
#print(e)
結果只包含 session_id 和一個包含 result_id、text 和 reason 的結果對象。
我參考了您的代碼並按照官方教程Quickstart: Recognize speech with the Speech SDK for Python
編寫了下面的示例代碼,它可以打印每個單詞的Offset
和Duration
值。 我使用了一個名為whatstheweatherlike.wav
的音頻文件, whatstheweatherlike.wav
來自 GitHub Repo Azure-Samples/cognitive-services-speech-sdk
samples/csharp/sharedcontent/console/whatstheweatherlike.wav
。
這是我的示例代碼及其結果。
import azure.cognitiveservices.speech as speechsdk
speech_key, service_region = "<your api key>", "<your region>"
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
speech_config.request_word_level_timestamps()
audio_config = speechsdk.audio.AudioConfig(filename='whatstheweatherlike.wav')
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
result = speech_recognizer.recognize_once()
# print(result.json)
# If without `request_word_level_timestamps`, the result:
# {"DisplayText":"What's the weather like?","Duration":13400000,"Offset":400000,"RecognitionStatus":"Success"}
# Enable `request_word_level_timestamps`, the result includes word level timestamps.
# {"Duration":13400000,"NBest":[{"Confidence":0.9761951565742493,"Display":"What's the weather like?","ITN":"What's the weather like","Lexical":"what's the weather like","MaskedITN":"What's the weather like","Words":[{"Duration":3800000,"Offset":600000,"Word":"what's"},{"Duration":1200000,"Offset":4500000,"Word":"the"},{"Duration":2900000,"Offset":5800000,"Word":"weather"},{"Duration":4700000,"Offset":8800000,"Word":"like"}]},{"Confidence":0.9245584011077881,"Display":"what is the weather like","ITN":"what is the weather like","Lexical":"what is the weather like","MaskedITN":"what is the weather like","Words":[{"Duration":2900000,"Offset":600000,"Word":"what"},{"Duration":700000,"Offset":3600000,"Word":"is"},{"Duration":1300000,"Offset":4400000,"Word":"the"},{"Duration":2900000,"Offset":5800000,"Word":"weather"},{"Duration":4700000,"Offset":8800000,"Word":"like"}]}],"Offset":400000,"RecognitionStatus":"Success"}
import json
stt = json.loads(result.json)
confidences_in_nbest = [item['Confidence'] for item in stt['NBest']]
best_index = confidences_in_nbest.index(max(confidences_in_nbest))
words = stt['NBest'][best_index]['Words']
print(words)
print(f"Word\tOffset\tDuration")
for word in words:
print(f"{word['Word']}\t{word['Offset']}\t{word['Duration']}")
上面腳本的輸出是:
[{'Duration': 3800000, 'Offset': 600000, 'Word': "what's"}, {'Duration': 1200000, 'Offset': 4500000, 'Word': 'the'}, {'Duration': 2900000, 'Offset': 5800000, 'Word': 'weather'}, {'Duration': 4700000, 'Offset': 8800000, 'Word': 'like'}]
Word Offset Duration
what's 600000 3800000
the 4500000 1200000
weather 5800000 2900000
like 8800000 4700000
希望能幫助到你。
根據關於它如何幫助連續識別的評論,如果您使用request_word_level_timestamps()
設置SpeechConfig
,則可以將其作為連續識別運行。 您可以使用evt.result.json
檢查 json 結果。
例如,
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
speech_config.request_word_level_timestamps()
那么你的語音識別器:
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
當您將回調連接到由語音識別器觸發的事件時,您可以看到單詞級時間戳:
speech_recognizer.recognized.connect(lambda evt: print('JSON: {}'.format(evt.result.json)))
我的問題是 Translation 對象不包含 word-level ,因為它不接受speech_config
。
環境
speech_config.request_word_level_timestamps()
在 azure sdk 的語音配置中,您可以獲取每個單詞的文本和時間戳。
speech_config.output_format = speechsdk.OutputFormat(1)
此語句將允許您從 azure sdk 獲取詳細的 json 對象。
下面是一個示例代碼。 確保更換鑰匙。 在語音到文本可能失敗的地方可能需要一些錯誤處理。
def process(self):
logger.debug("Speech to text request received")
speechapi_settings = SpeechAPIConf()
audio_filepath = <PATH_TO_AUDIO_FILE>
locale = "en-US" # Change as per requirement
logger.debug(audio_filepath)
audio_config = speechsdk.audio.AudioConfig(filename=audio_filepath)
speech_config = speechsdk.SpeechConfig(subscription=<SUBSCRIPTION_KEY>, region=<SERVICE_REGION>)
speech_config.request_word_level_timestamps()
speech_config.speech_recognition_language = locale
speech_config.output_format = speechsdk.OutputFormat(1)
# Creates a recognizer with the given settings
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
# Variable to monitor status
done = False
# Service callback for recognition text
transcript_display_list = []
transcript_ITN_list = []
confidence_list = []
words = []
def parse_azure_result(evt):
import json
response = json.loads(evt.result.json)
transcript_display_list.append(response['DisplayText'])
confidence_list_temp = [item.get('Confidence') for item in response['NBest']]
max_confidence_index = confidence_list_temp.index(max(confidence_list_temp))
confidence_list.append(response['NBest'][max_confidence_index]['Confidence'])
transcript_ITN_list.append(response['NBest'][max_confidence_index]['ITN'])
words.extend(response['NBest'][max_confidence_index]['Words'])
logger.debug(evt)
# Service callback that stops continuous recognition upon receiving an event `evt`
def stop_cb(evt):
print('CLOSING on {}'.format(evt))
speech_recognizer.stop_continuous_recognition()
nonlocal done
done = True
# Do something with the combined responses
print(transcript_display_list)
print(confidence_list)
print(words)
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: logger.debug('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(parse_azure_result)
speech_recognizer.session_started.connect(lambda evt: logger.debug('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: logger.debug('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: logger.debug('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
logger.debug("Initiating speech to text")
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.