diff --git a/scripts/whisper/export-onnx.py b/scripts/whisper/export-onnx.py index 1b1dba8bb..fbb0b132b 100755 --- a/scripts/whisper/export-onnx.py +++ b/scripts/whisper/export-onnx.py @@ -11,6 +11,7 @@ """ import argparse +import os from pathlib import Path from typing import Any, Dict, Optional @@ -418,6 +419,9 @@ def main(): }, ) + if 'large' in args.model: + # it causes errors for large models, so skip it. + return # Generate int8 quantization models # See https://onnxruntime.ai/docs/performance/model-optimizations/quantization.html#data-type-selection