-
Notifications
You must be signed in to change notification settings - Fork 227
/
vllm_with_openwebui_wizardlm2-8x22.yml
78 lines (74 loc) · 1.36 KB
/
vllm_with_openwebui_wizardlm2-8x22.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
---
version: "2.0"
services:
vllm:
image: vllm/vllm-openai:v0.4.0.post1
expose:
- port: 8000
as: 8000
to:
- service: vllm
command:
- bash
- "-c"
args:
- >-
huggingface-cli download alpindale/WizardLM-2-8x22B && /usr/bin/python3 -m vllm.entrypoints.openai.api_server --model alpindale/WizardLM-2-8x22B
env:
- HUGGING_FACE_HUB_TOKEN=hf_***************
- VLLM_API_KEY=MYPASSWORD
ui:
image: ghcr.io/open-webui/open-webui:main
expose:
- port: 8080
as: 8080
to:
- global: true
env:
- OPENAI_API_BASE_URLS=http://vllm:8000/v1
- OPENAI_API_KEYS=MYPASSWORD
profiles:
compute:
vllm:
resources:
cpu:
units: 16
memory:
size: 100Gi
storage:
- size: 140Gi
- name: shm
size: 10Gi
attributes:
class: ram
gpu:
units: 1
attributes:
vendor:
nvidia:
ui:
resources:
cpu:
units: 4
memory:
size: 20Gi
storage:
- size: 40Gi
placement:
dcloud:
pricing:
vllm:
denom: uakt
amount: 1000000
ui:
denom: uakt
amount: 1000000
deployment:
vllm:
dcloud:
profile: vllm
count: 1
ui:
dcloud:
profile: ui
count: 1