forked from openvinotoolkit/openvino.genai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
whisper_speech_recognition.py
executable file
·43 lines (31 loc) · 1.2 KB
/
whisper_speech_recognition.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#!/usr/bin/env python3
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
import openvino_genai
import librosa
def read_wav(filepath):
raw_speech, samplerate = librosa.load(filepath, sr=16000)
return raw_speech.tolist()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("model_dir")
parser.add_argument("wav_file_path")
args = parser.parse_args()
device = "CPU" # GPU can be used as well
pipe = openvino_genai.WhisperPipeline(args.model_dir, device)
config = openvino_genai.WhisperGenerationConfig(
args.model_dir + "/generation_config.json"
)
config.max_new_tokens = 100 # increase this based on your speech length
# 'task' and 'language' parameters are supported for multilingual models only
config.language = "<|en|>" # can switch to <|zh|> for Chinese language
config.task = "transcribe"
config.return_timestamps = True
raw_speech = read_wav(args.wav_file_path)
result = pipe.generate(raw_speech, config)
print(result)
for chunk in result.chunks:
print(f"timestamps: [{chunk.start_ts}, {chunk.end_ts}] text: {chunk.text}")
if "__main__" == __name__:
main()