Skip to content

Commit

Permalink
Fixed the issue that precision is always "32-true". (#1802)
Browse files Browse the repository at this point in the history
Co-authored-by: rasbt <[email protected]>
  • Loading branch information
jianpingw and rasbt authored Oct 24, 2024
1 parent d741d26 commit 47ce3bf
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 1 deletion.
1 change: 0 additions & 1 deletion litgpt/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,6 @@ def distribute(
if precision is None:
precision = get_default_supported_precision(training=False)
precision = "32-true"
precision = "32-true"

plugins = None
if quantize is not None and quantize.startswith("bnb."):
Expand Down
18 changes: 18 additions & 0 deletions tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,12 @@
from litgpt.scripts.download import download_from_hub


skip_in_ci_on_macos = pytest.mark.skipif(
sys.platform == "darwin" and os.getenv("GITHUB_ACTIONS") == "true",
reason="Skipped on macOS in CI environment because CI machine does not have enough memory to run this test."
)


if sys.platform == "darwin" and os.getenv("GITHUB_ACTIONS") == "true":
USE_MPS = False
elif torch.backends.mps.is_available():
Expand Down Expand Up @@ -399,3 +405,15 @@ def test_forward_method(tmp_path):
logits, loss = llm(inputs, target_ids=inputs)
assert logits.shape == torch.Size([6, 128, 50304])
assert isinstance(loss.item(), float)


@skip_in_ci_on_macos # The macOS CI machine segfaults here (it works fine locally though)
def test_precision_selection(tmp_path):
llm = LLM.load(
model="EleutherAI/pythia-14m",
init="pretrained"
)

llm.distribute(precision="16-true")
assert llm.model._forward_module.lm_head.weight.dtype == torch.float16, \
f"Expected float16, but got {llm.model._forward_module.lm_head.weight.dtype}"

0 comments on commit 47ce3bf

Please sign in to comment.