Skip to content

Commit

Permalink
Set it in hpu_worker
Browse files Browse the repository at this point in the history
  • Loading branch information
mfylcek committed Nov 22, 2024
1 parent b0c60b6 commit 34f3c1f
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 4 deletions.
3 changes: 0 additions & 3 deletions vllm/worker/hpu_model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -1224,9 +1224,6 @@ def _prepare_decode(
assert len(block_list) == len(block_groups)
assert len(block_list) == len(block_usage)

if self.bucketing_ctx.num_hpu_blocks is None:
self.bucketing_ctx.num_hpu_blocks = self.cache_config.num_gpu_blocks

padding_fn = None
if self.use_contiguous_pa:
block_bucket_size = max(max(block_list) + 1, len(block_list))
Expand Down
6 changes: 5 additions & 1 deletion vllm/worker/hpu_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,9 @@ def determine_num_available_blocks(self) -> Tuple[int, int]:
if is_fake_hpu():
cache_block_size = self.get_cache_block_size_bytes()
fake_hpu_cache_alloc = 4 * 2**30 # take 4 GiB flat on fake hpu
return fake_hpu_cache_alloc // cache_block_size, 0
num_fake_hpu_blocks = fake_hpu_cache_alloc // cache_block_size
self.model_runner.bucketing_ctx.num_hpu_blocks = num_fake_hpu_blocks
return num_fake_hpu_blocks, 0
with HabanaMemoryProfiler() as m:
self.model_runner.profile_run()
torch.hpu.synchronize()
Expand Down Expand Up @@ -203,6 +205,8 @@ def determine_num_available_blocks(self) -> Tuple[int, int]:
num_hpu_blocks = max(num_hpu_blocks, 0)
num_cpu_blocks = max(num_cpu_blocks, 0)

self.model_runner.bucketing_ctx.num_hpu_blocks = num_hpu_blocks

if self.model_runner.lora_manager:
self.model_runner.remove_all_loras()

Expand Down

0 comments on commit 34f3c1f

Please sign in to comment.