Skip to content

Commit

Permalink
Fix nit
Browse files Browse the repository at this point in the history
  • Loading branch information
xiaoyu-work committed Feb 3, 2025
1 parent f7eb27f commit 3bd8e18
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 6 deletions.
8 changes: 5 additions & 3 deletions olive/passes/pytorch/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -551,13 +551,14 @@ def get_peft_model(model: "PreTrainedModel", config: ConfigBase, config_kwargs:
"""Get the PEFT model for LoHa fine-tuning."""
from peft import LoHaConfig, LoHaModel

target_modules = config.target_modules or "all-linear"
config = LoHaConfig(
r=config.r,
alpha=config.alpha,
rank_dropout=config.rank_dropout,
module_dropout=config.module_dropout,
use_effective_conv2d=config.use_effective_conv2d,
target_modules=config.target_modules,
target_modules=target_modules,
exclude_modules=config.exclude_modules,
init_weights=config.init_weights,
layers_to_transform=config.layers_to_transform,
Expand Down Expand Up @@ -611,6 +612,7 @@ def get_peft_model(model: "PreTrainedModel", config: ConfigBase, config_kwargs:
"""Get the PEFT model for LoKr fine-tuning."""
from peft import LoKrConfig, LoKrModel

target_modules = config.target_modules or "all-linear"
config = LoKrConfig(
r=config.r,
alpha=config.alpha,
Expand All @@ -620,7 +622,7 @@ def get_peft_model(model: "PreTrainedModel", config: ConfigBase, config_kwargs:
decompose_factor=config.decompose_factor,
rank_dropout_scale=config.rank_dropout_scale,
use_effective_conv2d=config.use_effective_conv2d,
target_modules=config.target_modules,
target_modules=target_modules,
exclude_modules=config.exclude_modules,
init_weights=config.init_weights,
layers_to_transform=config.layers_to_transform,
Expand Down Expand Up @@ -864,7 +866,7 @@ def get_quant_model(

# enable lora fine-tuning with the loftq initialized adapter weights
pytorch_model = self.enable_lora(
pytorch_model, new_model_handler.task, config, adapter_path=loftq_init_adapter_path
pytorch_model, config, new_model_handler.task, adapter_path=loftq_init_adapter_path
)

return new_model_handler, pytorch_model, bnb_quant_config, quantized_modules
9 changes: 6 additions & 3 deletions test/unit_test/passes/pytorch/test_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,8 @@ def run_finetuning(pass_class, tmp_path, **pass_config_kwargs):
not torch.cuda.is_available() or True,
reason="lora finetuning requires GPU.",
)
def test_lora(tmp_path):
@patch("transformers.Trainer.train")
def test_lora(mock_tain, tmp_path):
# execute
# bfloat16 is not supported on all gpu
out = run_finetuning(LoRA, tmp_path, torch_dtype="float32")
Expand All @@ -91,7 +92,8 @@ def test_lora(tmp_path):
platform.system() == OS.WINDOWS or not torch.cuda.is_available() or True,
reason="bitsandbytes requires Linux GPU.",
)
def test_qlora(tmp_path):
@patch("transformers.Trainer.train")
def test_qlora(mock_tain, tmp_path):
# execute
# bfloat16 is not supported on all gpu
out = run_finetuning(QLoRA, tmp_path, torch_dtype="float32")
Expand All @@ -105,7 +107,8 @@ def test_qlora(tmp_path):
platform.system() == OS.WINDOWS or not torch.cuda.is_available() or True,
reason="bitsandbytes requires Linux GPU.",
)
def test_loftq(tmp_path):
@patch("transformers.Trainer.train")
def test_loftq(mock_tain, tmp_path):
# execute
# bfloat16 is not supported on all gpu
out = run_finetuning(LoftQ, tmp_path, torch_dtype="float32")
Expand Down

0 comments on commit 3bd8e18

Please sign in to comment.