diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py index 6b154c71dadd0..d1eb5ec7f1800 100755 --- a/vllm/worker/hpu_model_runner.py +++ b/vllm/worker/hpu_model_runner.py @@ -1224,9 +1224,6 @@ def _prepare_decode( assert len(block_list) == len(block_groups) assert len(block_list) == len(block_usage) - if self.bucketing_ctx.num_hpu_blocks is None: - self.bucketing_ctx.num_hpu_blocks = self.cache_config.num_gpu_blocks - padding_fn = None if self.use_contiguous_pa: block_bucket_size = max(max(block_list) + 1, len(block_list)) diff --git a/vllm/worker/hpu_worker.py b/vllm/worker/hpu_worker.py index 2b8f955265792..1004af0eca40a 100644 --- a/vllm/worker/hpu_worker.py +++ b/vllm/worker/hpu_worker.py @@ -166,7 +166,9 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: if is_fake_hpu(): cache_block_size = self.get_cache_block_size_bytes() fake_hpu_cache_alloc = 4 * 2**30 # take 4 GiB flat on fake hpu - return fake_hpu_cache_alloc // cache_block_size, 0 + num_fake_hpu_blocks = fake_hpu_cache_alloc // cache_block_size + self.model_runner.bucketing_ctx.num_hpu_blocks = num_fake_hpu_blocks + return num_fake_hpu_blocks, 0 with HabanaMemoryProfiler() as m: self.model_runner.profile_run() torch.hpu.synchronize() @@ -203,6 +205,8 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: num_hpu_blocks = max(num_hpu_blocks, 0) num_cpu_blocks = max(num_cpu_blocks, 0) + self.model_runner.bucketing_ctx.num_hpu_blocks = num_hpu_blocks + if self.model_runner.lora_manager: self.model_runner.remove_all_loras()