This repository has been archived by the owner on Jul 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
zarf.yaml
104 lines (101 loc) · 4.26 KB
/
zarf.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
kind: ZarfPackageConfig
metadata:
name: "###ZARF_PKG_TMPL_NAME###"
version: "###ZARF_PKG_TMPL_IMAGE_VERSION###"
description: >
A UI for LeapfrogAI
constants:
- name: IMAGE_VERSION
value: "###ZARF_PKG_TMPL_IMAGE_VERSION###"
- name: NAME
value: "###ZARF_PKG_TMPL_NAME###"
variables:
- name: AI4NS_BRANDING
description: Toggles between AI4NS (true) branding and LeapfrogAI (false) branding
default: true
prompt: true
sensitive: false
- name: LEAPFROGAI_BASE_URL
description: The base URL for the Leapfrog AI API
default: http://api.leapfrogai.svc.cluster.local:8080/openai/v1
prompt: true
sensitive: true
- name: LEAPFROGAI_API_KEY
description: The api key the Leapfrog AI API (can be left default for local development)
default: my-test-key
prompt: true
sensitive: true
- name: LEAPFROGAI_RAG_URL
description: The URL for the Leapfrog AI rag api
default: http://rag.leapfrogai.svc.cluster.local:8000
prompt: true
sensitive: true
- name: DOMAIN
description: The domain to use for the application, Istio-ingress configuration
default: https://ai.uds.dev
prompt: true
sensitive: true
- name: CONCURRENT_REQUESTS
description: If false, disables concurrent requests to the LLM
default: true
prompt: true
sensitive: false
- name: MODEL
description: The default LLM model to use for chat and summarization
default: vllm
prompt: true
sensitive: false
- name: TRANSCRIPTION_MODEL
description: The default model to use for transcription
default: whisper
prompt: true
sensitive: false
- name: SYSTEM_PROMPT
description: The default system prompt to use for the LLM
default: "You are a helpful AI assistant."
prompt: true
sensitive: false
- name: FINAL_SUMMARIZATION_PROMPT
description: The default system summarization prompt to use for the LLM
default: "You are a summarizer tasked with creating summaries. You will return an coherent and concise summary using 3 concise sections that are each separated by a newline character: 1) BOTTOM LINE UP FRONT: this section will be a concise paragraph containing an overarching, executive summary of all the notes. 2) NOTES: this section will be bullet points highlighting and summarizing key points, risks, issues, and opportunities. 3) ACTION ITEMS: this section will focus on listing any action items, unanswered questions, or issues present in the text; if there are none that can be identified from the notes, just return 'None' for ACTION ITEMS; if possible, also include the individual or team assigned to each item in ACTION ITEMS."
prompt: true
sensitive: false
- name: INTERMEDIATE_SUMMARIZATION_PROMPT
description: The default system summarization prompt to use for the LLM when summary batching activates
default: "You are a summarizer tasked with creating summaries. Your key activities include identifying the main points and key details in the given text, and condensing the information into a concise summary that accurately reflects the original text. It is important to avoid any risks such as misinterpreting the text, omitting crucial information, or distorting the original meaning. Use clear and specific language, ensuring that the summary is coherent, well-organized, and effectively communicates the main ideas of the original text."
prompt: true
sensitive: false
- name: TEMPERATURE
description: The default temperature for the LLM
default: "0.1"
prompt: true
sensitive: false
- name: MAX_TOKENS
description: The default max tokens for the LLM
default: "8192"
prompt: true
sensitive: false
- name: PREFIX
description: Prefix for requests to the application
default: ""
prompt: true
sensitive: false
components:
- name: leapfrogai-ui
required: true
charts:
- name: leapfrogai-ui
namespace: leapfrogai
localPath: chart
version: "###ZARF_PKG_TMPL_IMAGE_VERSION###"
images:
- "###ZARF_PKG_TMPL_IMAGE_REPOSITORY###:###ZARF_PKG_TMPL_IMAGE_VERSION###"
actions:
onDeploy:
after:
- wait:
cluster:
kind: Deployment
name: leapfrogai-ui
namespace: leapfrogai
condition: Available