diff --git a/litgpt/api.py b/litgpt/api.py index e0f4dcadd4..68dfc073e7 100644 --- a/litgpt/api.py +++ b/litgpt/api.py @@ -330,7 +330,6 @@ def distribute( if precision is None: precision = get_default_supported_precision(training=False) precision = "32-true" - precision = "32-true" plugins = None if quantize is not None and quantize.startswith("bnb."): diff --git a/tests/test_api.py b/tests/test_api.py index 0064ae5400..cf1443dd31 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -21,6 +21,12 @@ from litgpt.scripts.download import download_from_hub +skip_in_ci_on_macos = pytest.mark.skipif( + sys.platform == "darwin" and os.getenv("GITHUB_ACTIONS") == "true", + reason="Skipped on macOS in CI environment because CI machine does not have enough memory to run this test." +) + + if sys.platform == "darwin" and os.getenv("GITHUB_ACTIONS") == "true": USE_MPS = False elif torch.backends.mps.is_available(): @@ -399,3 +405,15 @@ def test_forward_method(tmp_path): logits, loss = llm(inputs, target_ids=inputs) assert logits.shape == torch.Size([6, 128, 50304]) assert isinstance(loss.item(), float) + + +@skip_in_ci_on_macos # The macOS CI machine segfaults here (it works fine locally though) +def test_precision_selection(tmp_path): + llm = LLM.load( + model="EleutherAI/pythia-14m", + init="pretrained" + ) + + llm.distribute(precision="16-true") + assert llm.model._forward_module.lm_head.weight.dtype == torch.float16, \ + f"Expected float16, but got {llm.model._forward_module.lm_head.weight.dtype}"