From 649547047a17b4f1a7af4eabe0a2afdb432c9145 Mon Sep 17 00:00:00 2001 From: leonardozcm Date: Thu, 16 Jan 2025 12:14:05 +0800 Subject: [PATCH] woq int4 fwd --- python/llm/src/ipex_llm/transformers/low_bit_linear.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index f78a5168aec..f6cfa1f79d6 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -273,7 +273,7 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int): and x.shape[1] % 128 == 0 and ( ( - qtype in [SYM_INT4, ASYM_INT4, FP8E5, FP8E4] + qtype in [SYM_INT4, ASYM_INT4, FP8E5, FP8E4, WOQ_INT4] and ( batch_size <= 48 or ( @@ -284,7 +284,7 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int): ) ) or ( - qtype in [SYM_INT8, FP4, FP6, Q4_K, Q6_K] + qtype in [SYM_INT8, FP4, FP6, Q4_K, Q6_K, WOQ_INT4] and batch_size <= 48 and device_name in ["arc", "pvc", "mtl", "arl"] and x.shape[1] % 256 == 0 @@ -297,8 +297,8 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int): batch_size > 1 or (device_name in ["arc"] and qtype in [SYM_INT8, FP4]) or (device_name in ["arc", "mtl"] and qtype in [FP8E4]) - or (device_name in ["lnl"] and qtype in [SYM_INT4] and x.shape[1] % 512 == 0) - or (device_name in ["bmg"] and qtype in [SYM_INT4, FP8E5]) + or (device_name in ["lnl"] and qtype in [SYM_INT4, WOQ_INT4] and x.shape[1] % 512 == 0) + or (device_name in ["bmg"] and qtype in [SYM_INT4, WOQ_INT4, FP8E5]) ) return False