Skip to content

Commit

Permalink
[Bugfix] Fix gpt2 GGUF inference (vllm-project#12467)
Browse files Browse the repository at this point in the history
Signed-off-by: Isotr0py <[email protected]>
  • Loading branch information
Isotr0py authored and rasmith committed Jan 30, 2025
1 parent cc38d84 commit 99c977e
Showing 1 changed file with 8 additions and 11 deletions.
19 changes: 8 additions & 11 deletions vllm/model_executor/models/gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,13 +258,13 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
self.transformer = GPT2Model(vllm_config=vllm_config,
prefix=maybe_prefix(
prefix, "transformer"))
self.lm_head = ParallelLMHead(self.config.vocab_size,
self.config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.lm_head")
if self.config.tie_word_embeddings:
self.lm_head = self.transformer.wte
else:
self.lm_head = ParallelLMHead(self.config.vocab_size,
self.config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.lm_head")
self.lm_head = self.lm_head.tie_weights(self.transformer.wte)

self.logits_processor = LogitsProcessor(config.vocab_size)
self.sampler = get_sampler()
self.make_empty_intermediate_tensors = (
Expand Down Expand Up @@ -309,15 +309,12 @@ def load_weights(self, weights: Iterable[Tuple[str,
params_dict = dict(self.named_parameters(remove_duplicate=False))
loaded_params: Set[str] = set()
for name, loaded_weight in weights:
if name.startswith("lm_head"):
# GPT-2 ties the weights of the embedding layer and the final
# linear layer.
continue
if ".attn.bias" in name or ".attn.masked_bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
if not name.startswith("transformer."):
if not name.startswith("transformer.") and not name.startswith(
"lm_head"):
name = "transformer." + name

if is_pp_missing_parameter(name, self):
Expand Down

0 comments on commit 99c977e

Please sign in to comment.