From 677f389b89752270fab4827efb3cf3eb3f9b56ca Mon Sep 17 00:00:00 2001 From: Dipika Sikka Date: Tue, 21 May 2024 15:45:18 +0000 Subject: [PATCH] update concat txt --- .../finetune/finetune_oneshot_configs/config.yaml | 3 ++- .../finetune/finetune_oneshot_configs/gpu/gpu_config.yaml | 3 ++- .../transformers/finetune/test_oneshot_and_finetune.py | 8 ++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/sparseml/transformers/finetune/finetune_oneshot_configs/config.yaml b/tests/sparseml/transformers/finetune/finetune_oneshot_configs/config.yaml index 6557069860e..4dd7fee114f 100644 --- a/tests/sparseml/transformers/finetune/finetune_oneshot_configs/config.yaml +++ b/tests/sparseml/transformers/finetune/finetune_oneshot_configs/config.yaml @@ -4,4 +4,5 @@ model: "Xenova/llama2.c-stories15M" dataset: wikitext dataset_config_name: "wikitext-2-raw-v1" recipe: "tests/sparseml/transformers/finetune/test_alternate_recipe.yaml" -num_train_epochs: 2 \ No newline at end of file +num_train_epochs: 2 +concat_txt: False \ No newline at end of file diff --git a/tests/sparseml/transformers/finetune/finetune_oneshot_configs/gpu/gpu_config.yaml b/tests/sparseml/transformers/finetune/finetune_oneshot_configs/gpu/gpu_config.yaml index 7114f2a47a5..f3276976fef 100644 --- a/tests/sparseml/transformers/finetune/finetune_oneshot_configs/gpu/gpu_config.yaml +++ b/tests/sparseml/transformers/finetune/finetune_oneshot_configs/gpu/gpu_config.yaml @@ -3,4 +3,5 @@ test_type: "regression" model: "zoo:llama2-7b-ultrachat200k_llama2_pretrain-base" dataset: "ultrachat-200k" recipe: "tests/sparseml/transformers/finetune/test_alternate_recipe.yaml" -num_train_epochs: 1 \ No newline at end of file +num_train_epochs: 1 +concat_txt: False \ No newline at end of file diff --git a/tests/sparseml/transformers/finetune/test_oneshot_and_finetune.py b/tests/sparseml/transformers/finetune/test_oneshot_and_finetune.py index 72917682981..7d6f194e1fb 100644 --- a/tests/sparseml/transformers/finetune/test_oneshot_and_finetune.py +++ b/tests/sparseml/transformers/finetune/test_oneshot_and_finetune.py @@ -33,7 +33,7 @@ def _test_oneshot_and_finetune(self): splits = {"train": "train[:50%]", "calibration": "train[50%:60%]"} if self.dataset == "ultrachat-200k": - splits = {"train": "train_sft[:50%]", "calibration": "train_sft[50%:60%]"} + splits = {"train": "train_gen[:50%]", "calibration": "train_gen[50%:60%]"} apply( model=self.model, @@ -42,7 +42,7 @@ def _test_oneshot_and_finetune(self): output_dir=self.output, recipe=self.recipe, num_train_epochs=self.num_train_epochs, - concatenate_data=True, + concatenate_data=self.concat_txt, splits=splits, oneshot_device=self.device, precision="bfloat16", @@ -51,6 +51,8 @@ def _test_oneshot_and_finetune(self): ) def tearDown(self): + # TODO: we get really nice stats from finetune that we should log + # stored in results.json shutil.rmtree(self.output) @@ -63,6 +65,7 @@ class TestOneshotAndFinetuneSmall(TestOneshotAndFinetune): recipe = None dataset_config_name = None num_train_epochs = None + concat_txt = None def setUp(self): import torch @@ -84,6 +87,7 @@ class TestOneshotAndFinetuneGPU(TestOneshotAndFinetune): recipe = None dataset_config_name = None num_train_epochs = None + concat_txt = None def setUp(self): import torch