diff --git a/.github/workflows/nv-transformers-v100.yml b/.github/workflows/nv-transformers-v100.yml index 7753133f2886..f2323b5d638f 100644 --- a/.github/workflows/nv-transformers-v100.yml +++ b/.github/workflows/nv-transformers-v100.yml @@ -7,6 +7,7 @@ on: - 'blogs/**' - 'deepspeed/inference/v2/**' - "tests/unit/inference/v2/**" + workflow_dispatch: merge_group: branches: [ master ] schedule: @@ -29,7 +30,7 @@ jobs: - name: Install pytorch run: | # use the same pytorch version as transformers CI - pip install -U --cache-dir $TORCH_CACHE torch==2.0.1+cu118 --index-url https://download.pytorch.org/whl/cu118 + pip install -U --cache-dir $TORCH_CACHE torch==2.1.0 --index-url https://download.pytorch.org/whl/cu118 python -c "import torch; print('torch:', torch.__version__, torch)" python -c "import torch; print('CUDA available:', torch.cuda.is_available())" @@ -38,7 +39,7 @@ jobs: git clone https://github.com/huggingface/transformers cd transformers # if needed switch to the last known good SHA until transformers@master is fixed - git checkout e7e9261a2 + #git checkout f370bebdc git rev-parse --short HEAD pip install . diff --git a/deepspeed/launcher/runner.py b/deepspeed/launcher/runner.py index a7fa2b5053e5..fa78497f850f 100755 --- a/deepspeed/launcher/runner.py +++ b/deepspeed/launcher/runner.py @@ -390,7 +390,7 @@ def main(args=None): args = parse_args(args) # For when argparse interprets remaining args as a single string - args.user_args = shlex.split(" ".join(list(map(lambda x: x if x.startswith("-") else f'"{x}"', args.user_args)))) + args.user_args = shlex.split(" ".join(list(map(lambda x: x if x.startswith("-") else f"'{x}'", args.user_args)))) if args.elastic_training: assert args.master_addr != "", "Master Addr is required when elastic training is enabled"