From 15addc9ad4a0190985217a48191ad67792bf96ca Mon Sep 17 00:00:00 2001 From: Shikai Chen Date: Fri, 7 Feb 2025 09:47:24 +0000 Subject: [PATCH] fix file name --- rewardbench/models/__init__.py | 6 +++--- rewardbench/models/{lenovo.py => ldlreward.py} | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) rename rewardbench/models/{lenovo.py => ldlreward.py} (99%) diff --git a/rewardbench/models/__init__.py b/rewardbench/models/__init__.py index ffe186c..720398e 100644 --- a/rewardbench/models/__init__.py +++ b/rewardbench/models/__init__.py @@ -29,7 +29,7 @@ from .grm import GRewardModel, GRMPipeline from .inform import INFORMForSequenceClassification from .internlm import InternLMPipeline -from .lenovo import LDLRewardModel27B, LenovoPipeline +from .ldlreward import LDLPipeline, LDLRewardModel27B from .openassistant import * # noqa from .openbmb import LlamaRewardModel, OpenBMBPipeline from .pairrm import DebertaV2PairRM, PairRMPipeline @@ -53,9 +53,9 @@ "custom_dialogue": False, "model_type": "Seq. Classifier", }, - "lenovo/LDL-Reward-Gemma-2-27B-v0.1": { + "ShikaiChen/LDL-Reward-Gemma-2-27B-v0.1": { "model_builder": LDLRewardModel27B.from_pretrained, - "pipeline_builder": LenovoPipeline, + "pipeline_builder": LDLPipeline, "quantized": False, "custom_dialogue": False, "model_type": "Seq. Classifier", diff --git a/rewardbench/models/lenovo.py b/rewardbench/models/ldlreward.py similarity index 99% rename from rewardbench/models/lenovo.py rename to rewardbench/models/ldlreward.py index be14faf..b17c33d 100644 --- a/rewardbench/models/lenovo.py +++ b/rewardbench/models/ldlreward.py @@ -192,7 +192,7 @@ def from_pretrained(cls, load_directory, device_map=None, *model_args, **kwargs) return model -class LenovoPipeline: +class LDLPipeline: def __init__(self, task, model, tokenizer): self.task = task self.model = model.eval()