Skip to content

Commit

Permalink
Add new tests (SYSTRAN#1158)
Browse files Browse the repository at this point in the history
  • Loading branch information
MahmoudAshraf97 authored Nov 20, 2024
1 parent f830c6f commit 491852e
Show file tree
Hide file tree
Showing 3 changed files with 171 additions and 101 deletions.
Binary file added tests/data/hotwords.mp3
Binary file not shown.
120 changes: 120 additions & 0 deletions tests/test_tokenizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
from faster_whisper import WhisperModel
from faster_whisper.tokenizer import Tokenizer
from faster_whisper.transcribe import get_suppressed_tokens


def test_suppressed_tokens_minus_1():
model = WhisperModel("tiny.en")

tokenizer = Tokenizer(model.hf_tokenizer, False)
tokens = get_suppressed_tokens(tokenizer, [-1])
assert tokens == (
1,
2,
7,
8,
9,
10,
14,
25,
26,
27,
28,
29,
31,
58,
59,
60,
61,
62,
63,
90,
91,
92,
93,
357,
366,
438,
532,
685,
705,
796,
930,
1058,
1220,
1267,
1279,
1303,
1343,
1377,
1391,
1635,
1782,
1875,
2162,
2361,
2488,
3467,
4008,
4211,
4600,
4808,
5299,
5855,
6329,
7203,
9609,
9959,
10563,
10786,
11420,
11709,
11907,
13163,
13697,
13700,
14808,
15306,
16410,
16791,
17992,
19203,
19510,
20724,
22305,
22935,
27007,
30109,
30420,
33409,
34949,
40283,
40493,
40549,
47282,
49146,
50257,
50357,
50358,
50359,
50360,
)


def test_suppressed_tokens_minus_value():
model = WhisperModel("tiny.en")

tokenizer = Tokenizer(model.hf_tokenizer, False)
tokens = get_suppressed_tokens(tokenizer, [13])
assert tokens == (13, 50257, 50357, 50358, 50359, 50360)


def test_split_on_unicode():
model = WhisperModel("tiny")
tokenizer = Tokenizer(model.hf_tokenizer, False)

tokens = [8404, 871, 287, 6, 246, 526, 3210, 20378]
words, word_tokens = tokenizer.split_tokens_on_unicode(tokens)

assert words == [" elle", " est", " l", "'", "\ufffd", "é", "rit", "oire"]
assert word_tokens == [[8404], [871], [287], [6], [246], [526], [3210], [20378]]
152 changes: 51 additions & 101 deletions tests/test_transcribe.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
import inspect
import os

import numpy as np

from faster_whisper import BatchedInferencePipeline, WhisperModel, decode_audio
from faster_whisper.tokenizer import Tokenizer
from faster_whisper.transcribe import get_suppressed_tokens


def test_supported_languages():
Expand Down Expand Up @@ -215,107 +214,58 @@ def test_multilingual_transcription(data_dir):
)


def test_suppressed_tokens_minus_1():
model = WhisperModel("tiny.en")
def test_hotwords(data_dir):
model = WhisperModel("tiny")
pipeline = BatchedInferencePipeline(model)

audio_path = os.path.join(data_dir, "hotwords.mp3")
audio = decode_audio(audio_path)

segments, info = model.transcribe(audio, hotwords="ComfyUI")
segments = list(segments)

assert "ComfyUI" in segments[0].text
assert info.transcription_options.hotwords == "ComfyUI"

segments, info = pipeline.transcribe(audio, hotwords="ComfyUI")
segments = list(segments)

assert "ComfyUI" in segments[0].text
assert info.transcription_options.hotwords == "ComfyUI"

tokenizer = Tokenizer(model.hf_tokenizer, False)
tokens = get_suppressed_tokens(tokenizer, [-1])
assert tokens == (
1,
2,
7,
8,
9,
10,
14,
25,
26,
27,
28,
29,
31,
58,
59,
60,
61,
62,
63,
90,
91,
92,
93,
357,
366,
438,
532,
685,
705,
796,
930,
1058,
1220,
1267,
1279,
1303,
1343,
1377,
1391,
1635,
1782,
1875,
2162,
2361,
2488,
3467,
4008,
4211,
4600,
4808,
5299,
5855,
6329,
7203,
9609,
9959,
10563,
10786,
11420,
11709,
11907,
13163,
13697,
13700,
14808,
15306,
16410,
16791,
17992,
19203,
19510,
20724,
22305,
22935,
27007,
30109,
30420,
33409,
34949,
40283,
40493,
40549,
47282,
49146,
50257,
50357,
50358,
50359,
50360,

def test_transcribe_signature():
model_transcribe_args = set(inspect.getargs(WhisperModel.transcribe.__code__).args)
pipeline_transcribe_args = set(
inspect.getargs(BatchedInferencePipeline.transcribe.__code__).args
)
pipeline_transcribe_args.remove("batch_size")

assert model_transcribe_args == pipeline_transcribe_args

def test_suppressed_tokens_minus_value():
model = WhisperModel("tiny.en")

tokenizer = Tokenizer(model.hf_tokenizer, False)
tokens = get_suppressed_tokens(tokenizer, [13])
assert tokens == (13, 50257, 50357, 50358, 50359, 50360)
def test_monotonic_timestamps(physcisworks_path):
model = WhisperModel("tiny")
pipeline = BatchedInferencePipeline(model=model)

segments, info = model.transcribe(physcisworks_path, word_timestamps=True)
segments = list(segments)

for i in range(len(segments) - 1):
assert segments[i].start <= segments[i].end
assert segments[i].end <= segments[i + 1].start
for word in segments[i].words:
assert word.start <= word.end
assert word.end <= segments[i].end
assert segments[-1].end <= info.duration

segments, info = pipeline.transcribe(physcisworks_path, word_timestamps=True)
segments = list(segments)

for i in range(len(segments) - 1):
assert segments[i].start <= segments[i].end
assert segments[i].end <= segments[i + 1].start
for word in segments[i].words:
assert word.start <= word.end
assert word.end <= segments[i].end
assert segments[-1].end <= info.duration

0 comments on commit 491852e

Please sign in to comment.