diff --git a/lm_eval/models/ibm_watsonx_ai.py b/lm_eval/models/ibm_watsonx_ai.py index 7017184ffc..6f6a09c170 100644 --- a/lm_eval/models/ibm_watsonx_ai.py +++ b/lm_eval/models/ibm_watsonx_ai.py @@ -158,7 +158,7 @@ def __init__( project_id = watsonx_credentials.get("project_id", None) deployment_id = watsonx_credentials.get("deployment_id", None) client.set.default_project(project_id) - self.generate_params = generate_params + self.generate_params = generate_params or {} self.model = ModelInference( model_id=model_id, deployment_id=deployment_id, @@ -167,12 +167,6 @@ def __init__( ) self._model_id = model_id - def dump_parameters(self): - """ - Dumps the model's parameters into a serializable format. - """ - return self._parameters.model_dump() - @staticmethod def _has_stop_token(response_tokens: List[str], context_tokens: List[str]) -> bool: """ diff --git a/pyproject.toml b/pyproject.toml index 37db0b2450..c51852a7d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,6 +62,7 @@ dev = ["pytest", "pytest-cov", "pytest-xdist", "pre-commit", "mypy"] deepsparse = ["deepsparse-nightly[llm]>=1.8.0.20240404"] gptq = ["auto-gptq[triton]>=0.6.0"] hf_transfer = ["hf_transfer"] +ibm_watsonx_ai = ["ibm_watsonx_ai"] ifeval = ["langdetect", "immutabledict", "nltk>=3.9.1"] neuronx = ["optimum[neuronx]"] mamba = ["mamba_ssm", "causal-conv1d==1.0.2"] @@ -81,6 +82,7 @@ all = [ "lm_eval[deepsparse]", "lm_eval[gptq]", "lm_eval[hf_transfer]", + "lm_eval[ibm_watsonx_ai]", "lm_eval[ifeval]", "lm_eval[mamba]", "lm_eval[math]", @@ -93,7 +95,6 @@ all = [ "lm_eval[vllm]", "lm_eval[zeno]", "lm_eval[wandb]", - "lm_eval[ibm_watsonx_ai]" ] [tool.ruff.lint]