diff --git a/README.md b/README.md index b90edcfc..d82b5a6e 100644 --- a/README.md +++ b/README.md @@ -130,6 +130,10 @@ Specifically, and [here](https://github.com/vllm-project/vllm/blob/ee8217e5bee5860469204ee57077a91138c9af02/vllm/engine/arg_utils.py#L201). +When using local model files, specify the path to the model in the `model` field. +By default relative paths are resolved relative to the working directory of the Triton server process. +To specify a path relative to the `model.json` file, set the `resolve_model_relative_to_config_file` field to `true`. + For multi-GPU support, EngineArgs like tensor_parallel_size can be specified in [model.json](samples/model_repository/vllm_model/1/model.json). diff --git a/ci/L0_backend_vllm/vllm_backend/test.sh b/ci/L0_backend_vllm/vllm_backend/test.sh index 87e04b21..c7cc21a1 100755 --- a/ci/L0_backend_vllm/vllm_backend/test.sh +++ b/ci/L0_backend_vllm/vllm_backend/test.sh @@ -49,6 +49,8 @@ function assert_curl_success { } rm -rf models && mkdir -p models + +# operational vllm model cp -r ${SAMPLE_MODELS_REPO}/vllm_model models/vllm_opt # `vllm_opt` model will be loaded on server start and stay loaded throughout # unittesting. To test vllm model load/unload we use a dedicated @@ -58,10 +60,22 @@ cp -r ${SAMPLE_MODELS_REPO}/vllm_model models/vllm_opt sed -i 's/"gpu_memory_utilization": 0.5/"gpu_memory_utilization": 0.4/' models/vllm_opt/1/model.json cp -r models/vllm_opt models/vllm_load_test +# python model mkdir -p models/add_sub/1/ wget -P models/add_sub/1/ https://raw.githubusercontent.com/triton-inference-server/python_backend/main/examples/add_sub/model.py wget -P models/add_sub https://raw.githubusercontent.com/triton-inference-server/python_backend/main/examples/add_sub/config.pbtxt +# local vllm model +cp -r ${SAMPLE_MODELS_REPO}/vllm_model models/vllm_local +sed -i 's/"facebook\/opt-125m"/"./local_model"/' models/vllm_local/1/model.json +sed -i '/"model": /a "resolve_model_relative_to_config_file": true,' models/vllm_local/1/model.json +wget -P models/vllm_local/1/local_model https://huggingface.co/facebook/opt-125m/resolve/main/config.json +wget -P models/vllm_local/1/local_model https://huggingface.co/facebook/opt-125m/resolve/main/merges.txt +wget -P models/vllm_local/1/local_model https://huggingface.co/facebook/opt-125m/resolve/main/pytorch_model.bin +wget -P models/vllm_local/1/local_model https://huggingface.co/facebook/opt-125m/resolve/main/special_tokens_map.json +wget -P models/vllm_local/1/local_model https://huggingface.co/facebook/opt-125m/resolve/main/tokenizer_config.json +wget -P models/vllm_local/1/local_model https://huggingface.co/facebook/opt-125m/resolve/main/vocab.json + # Invalid model attribute cp -r ${SAMPLE_MODELS_REPO}/vllm_model models/vllm_invalid_1/ sed -i 's/"disable_log_requests"/"invalid_attribute"/' models/vllm_invalid_1/1/model.json diff --git a/ci/L0_backend_vllm/vllm_backend/vllm_backend_test.py b/ci/L0_backend_vllm/vllm_backend/vllm_backend_test.py index c53c391a..c1931ef0 100644 --- a/ci/L0_backend_vllm/vllm_backend/vllm_backend_test.py +++ b/ci/L0_backend_vllm/vllm_backend/vllm_backend_test.py @@ -50,6 +50,7 @@ def setUp(self): self.python_model_name = "add_sub" self.ensemble_model_name = "ensemble_model" self.vllm_load_test = "vllm_load_test" + self.local_vllm_model_name = "vllm_local" def test_vllm_triton_backend(self): # Load both vllm and add_sub models @@ -93,6 +94,31 @@ def test_vllm_triton_backend(self): ) self.triton_client.unload_model(self.vllm_load_test) self.assertFalse(self.triton_client.is_model_ready(self.vllm_load_test)) + + def test_local_vllm_model(self): + # Load local vllm model + self.triton_client.load_model(self.local_vllm_model_name) + self.assertTrue(self.triton_client.is_model_ready(self.local_vllm_model_name)) + + # Test local vllm model + self._test_vllm_model( + prompts=PROMPTS, + sampling_parameters=SAMPLING_PARAMETERS, + stream=False, + send_parameters_as_tensor=True, + model_name=self.local_vllm_model_name, + ) + self._test_vllm_model( + prompts=PROMPTS, + sampling_parameters=SAMPLING_PARAMETERS, + stream=False, + send_parameters_as_tensor=False, + model_name=self.local_vllm_model_name, + ) + + # Unload local vllm model + self.triton_client.unload_model(self.local_vllm_model_name) + self.assertFalse(self.triton_client.is_model_ready(self.local_vllm_model_name)) def test_model_with_invalid_attributes(self): model_name = "vllm_invalid_1" diff --git a/src/model.py b/src/model.py index c3d54479..48fe519f 100644 --- a/src/model.py +++ b/src/model.py @@ -219,6 +219,20 @@ def init_engine(self): # Check for LoRA config and set it up if enabled self.setup_lora() + # Resolve the model path relative to the config file + if self.vllm_engine_config.pop("resolve_model_relative_to_config_file", False): + new_path = os.path.abspath( + os.path.join( + pb_utils.get_model_dir(), self.vllm_engine_config["model"] + ) + ) + # Check if the resolved path is subdirectory of the model directory + if not new_path.startswith(pb_utils.get_model_dir()): + raise ValueError( + f"Resolved model path '{new_path}' is not a subdirectory of the model directory '{pb_utils.get_model_dir()}'" + ) + self.vllm_engine_config["model"] = new_path + # Create an AsyncLLMEngine from the config from JSON aync_engine_args = AsyncEngineArgs(**self.vllm_engine_config) self.llm_engine = AsyncLLMEngine.from_engine_args(aync_engine_args)