Skip to content

Commit

Permalink
[Build/CI] Extending the set of AMD tests with Regression, Basic Corr…
Browse files Browse the repository at this point in the history
…ectness, Distributed, Engine, Llava Tests (vllm-project#4797)
  • Loading branch information
Alexei-V-Ivanov-AMD authored May 17, 2024
1 parent 0150a10 commit 2614812
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 20 deletions.
11 changes: 6 additions & 5 deletions .buildkite/run-amd-test.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# This script build the ROCm docker image and runs test inside it.
# This script runs test inside the corresponding ROCm docker container.
set -ex

# Print ROCm version
Expand All @@ -19,15 +19,16 @@ done

echo "--- Building container"
sha=$(git rev-parse --short HEAD)
container_name=rocm_${sha}
image_name=rocm_${sha}
container_name=rocm_${sha}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)
docker build \
-t ${container_name} \
-t ${image_name} \
-f Dockerfile.rocm \
--progress plain \
.

remove_docker_container() {
docker rm -f ${container_name} || docker image rm -f ${container_name} || true
docker rm -f ${container_name} || docker image rm -f ${image_name} || true
}
trap remove_docker_container EXIT

Expand All @@ -39,6 +40,6 @@ docker run \
--rm \
-e HF_TOKEN \
--name ${container_name} \
${container_name} \
${image_name} \
/bin/bash -c "${@}"

18 changes: 15 additions & 3 deletions .buildkite/test-pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@

steps:
- label: Regression Test
mirror_hardwares: [amd]
command: pytest -v -s test_regression.py
working_dir: "/vllm-workspace/tests" # optional

- label: AsyncEngine Test
#mirror_hardwares: [amd]
command: pytest -v -s async_engine

- label: Basic Correctness Test
mirror_hardwares: [amd]
commands:
- VLLM_ATTENTION_BACKEND=XFORMERS pytest -v -s basic_correctness/test_basic_correctness.py
- VLLM_ATTENTION_BACKEND=FLASH_ATTN pytest -v -s basic_correctness/test_basic_correctness.py
Expand All @@ -24,14 +27,15 @@ steps:
command: pytest -v -s core

- label: Distributed Comm Ops Test
#mirror_hardwares: [amd]
command: pytest -v -s distributed/test_comm_ops.py
working_dir: "/vllm-workspace/tests"
num_gpus: 2

- label: Distributed Tests
mirror_hardwares: [amd]
working_dir: "/vllm-workspace/tests"
num_gpus: 2
mirror_hardwares: [amd]
commands:
- pytest -v -s distributed/test_pynccl_library.py
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
Expand All @@ -45,16 +49,18 @@ steps:
- pytest -v -s spec_decode/e2e/test_integration_dist.py

- label: Distributed Tests (Multiple Groups)
#mirror_hardwares: [amd]
working_dir: "/vllm-workspace/tests"
num_gpus: 4
commands:
- pytest -v -s distributed/test_pynccl.py

- label: Engine Test
#mirror_hardwares: [amd]
mirror_hardwares: [amd]
command: pytest -v -s engine tokenization test_sequence.py test_config.py test_logger.py

- label: Entrypoints Test
#mirror_hardwares: [amd]
commands:
# these tests have to be separated, because each one will allocate all posible GPU memory
- pytest -v -s entrypoints --ignore=entrypoints/test_server_oot_registration.py
Expand All @@ -74,6 +80,7 @@ steps:
- python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors

- label: Kernels Test %N
#mirror_hardwares: [amd]
command: pytest -v -s kernels --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
parallelism: 4

Expand All @@ -84,7 +91,7 @@ steps:
- pytest -v -s models --ignore=models/test_llava.py

- label: Llava Test
#mirror_hardwares: [amd]
mirror_hardwares: [amd]
commands:
- bash ../.buildkite/download-images.sh
- pytest -v -s models/test_llava.py
Expand All @@ -95,6 +102,7 @@ steps:
- pytest -v -s prefix_caching

- label: Samplers Test
#mirror_hardwares: [amd]
command: pytest -v -s samplers

- label: LogitsProcessor Test
Expand All @@ -110,16 +118,20 @@ steps:
command: pytest -v -s spec_decode

- label: LoRA Test %N
#mirror_hardwares: [amd]
command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
parallelism: 4

- label: Tensorizer Test
#mirror_hardwares: [amd]
command: apt-get install curl libsodium23 && pytest -v -s tensorizer_loader

- label: Metrics Test
mirror_hardwares: [amd]
command: pytest -v -s metrics

- label: Quantization Test
#mirror_hardwares: [amd]
command: pytest -v -s quantization

- label: Benchmarks
Expand Down
3 changes: 1 addition & 2 deletions .buildkite/test-template.j2
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,8 @@
{% set default_working_dir = "/vllm-workspace/tests" %}

steps:

- label: ":docker: build image"
commands:
commands:
- "docker build --build-arg max_jobs=16 --tag {{ docker_image }} --target test --progress plain ."
- "docker push {{ docker_image }}"
env:
Expand Down
6 changes: 5 additions & 1 deletion tests/engine/test_stop_reason.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def test_stop_reason(vllm_model, example_prompts):
# test stop token
outputs = llm.generate(example_prompts,
sampling_params=SamplingParams(
ignore_eos=True,
seed=SEED,
max_tokens=MAX_TOKENS,
stop_token_ids=[stop_token_id]))
Expand All @@ -43,7 +44,10 @@ def test_stop_reason(vllm_model, example_prompts):
# test stop string
outputs = llm.generate(example_prompts,
sampling_params=SamplingParams(
seed=SEED, max_tokens=MAX_TOKENS, stop="."))
ignore_eos=True,
seed=SEED,
max_tokens=MAX_TOKENS,
stop="."))
for output in outputs:
output = output.outputs[0]
assert output.finish_reason == "stop"
Expand Down
10 changes: 1 addition & 9 deletions vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,7 @@ def get_image_input_enum_type(
"bfloat16": torch.bfloat16,
}

_ROCM_NOT_SUPPORTED_DTYPE = ["float", "float32"]
_ROCM_NOT_SUPPORTED_DTYPE: List[str] = [] #


def _get_and_verify_dtype(
Expand Down Expand Up @@ -1092,14 +1092,6 @@ def _get_and_verify_dtype(
else:
raise ValueError(f"Unknown dtype: {dtype}")

if is_hip() and torch_dtype == torch.float32:
rocm_supported_dtypes = [
k for k, v in _STR_DTYPE_TO_TORCH_DTYPE.items()
if (k not in _ROCM_NOT_SUPPORTED_DTYPE)
]
raise ValueError(f"dtype '{dtype}' is not supported in ROCm. "
f"Supported dtypes are {rocm_supported_dtypes}")

# Verify the dtype.
if torch_dtype != config_dtype:
if torch_dtype == torch.float32:
Expand Down

0 comments on commit 2614812

Please sign in to comment.