Skip to content

Commit

Permalink
update docs
Browse files Browse the repository at this point in the history
  • Loading branch information
cyyeh committed Feb 1, 2025
1 parent 169f2ea commit b1595e2
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 47 deletions.
31 changes: 16 additions & 15 deletions wren-ai-service/docs/config_examples/config.deepseek.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,13 @@ models:

---
type: embedder
provider: openai_embedder
provider: litellm_embedder
models:
# find EMBEDDER_OPENAI_API_KEY and fill in value of api key in ~/.wrenai/.env
- model: text-embedding-3-large # put your openai compatible embedder model name here
url: https://api.openai.com/v1 # change this according to your openai compatible embedder model
timeout: 120
# define OPENAI_API_KEY=<api_key> in ~/.wrenai/.env if you are using openai embedding model
# please refer to LiteLLM documentation for more details: https://docs.litellm.ai/docs/providers
- model: text-embedding-3-large # put your embedding model name here, if it is not openai embedding model, should be <provider>/<model_name>
api_base: https://api.openai.com/v1 # change this according to your embedding model
timeout: 120

---
type: engine
Expand All @@ -57,20 +58,20 @@ recreate_index: false
type: pipeline
pipes:
- name: db_schema_indexing
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: historical_question_indexing
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: table_description_indexing
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: db_schema_retrieval
llm: litellm_llm.deepseek/deepseek-coder
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: historical_question_retrieval
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: sql_generation
llm: litellm_llm.deepseek/deepseek-coder
Expand Down Expand Up @@ -106,7 +107,7 @@ pipes:
llm: litellm_llm.deepseek/deepseek-coder
- name: question_recommendation_db_schema_retrieval
llm: litellm_llm.deepseek/deepseek-coder
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: question_recommendation_sql_generation
llm: litellm_llm.deepseek/deepseek-coder
Expand All @@ -117,19 +118,19 @@ pipes:
llm: litellm_llm.deepseek/deepseek-coder
- name: intent_classification
llm: litellm_llm.deepseek/deepseek-coder
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: data_assistance
llm: litellm_llm.deepseek/deepseek-chat
- name: sql_pairs_indexing
document_store: qdrant
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
- name: sql_pairs_deletion
document_store: qdrant
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
- name: sql_pairs_retrieval
document_store: qdrant
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
llm: litellm_llm.deepseek/deepseek-coder
- name: preprocess_sql_data
llm: litellm_llm.deepseek/deepseek-coder
Expand Down
30 changes: 15 additions & 15 deletions wren-ai-service/docs/config_examples/config.google_ai_studio.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ models:

---
type: embedder
provider: openai_embedder
provider: litellm_embedder
models:
# find EMBEDDER_OPENAI_API_KEY and fill in value of api key in ~/.wrenai/.env
- model: text-embedding-004 # put your openai compatible embedder model name here
url: https://generativelanguage.googleapis.com/v1beta/openai # change this according to your openai compatible embedder model
timeout: 120
# put GEMINI_API_KEY=<your_api_key> in ~/.wrenai/.env
- model: gemini/text-embedding-004 # gemini/<gemini_model_name>
api_base: https://generativelanguage.googleapis.com/v1beta/openai # change this according to your embedding model
timeout: 120

---
type: engine
Expand All @@ -40,20 +40,20 @@ recreate_index: false
type: pipeline
pipes:
- name: db_schema_indexing
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
document_store: qdrant
- name: historical_question_indexing
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
document_store: qdrant
- name: table_description_indexing
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
document_store: qdrant
- name: db_schema_retrieval
llm: litellm_llm.gemini/gemini-2.0-flash-exp
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
document_store: qdrant
- name: historical_question_retrieval
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
document_store: qdrant
- name: sql_generation
llm: litellm_llm.gemini/gemini-2.0-flash-exp
Expand Down Expand Up @@ -89,7 +89,7 @@ pipes:
llm: litellm_llm.gemini/gemini-2.0-flash-exp
- name: question_recommendation_db_schema_retrieval
llm: litellm_llm.gemini/gemini-2.0-flash-exp
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
document_store: qdrant
- name: question_recommendation_sql_generation
llm: litellm_llm.gemini/gemini-2.0-flash-exp
Expand All @@ -100,19 +100,19 @@ pipes:
llm: litellm_llm.gemini/gemini-2.0-flash-exp
- name: intent_classification
llm: litellm_llm.gemini/gemini-2.0-flash-exp
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
document_store: qdrant
- name: data_assistance
llm: litellm_llm.gemini/gemini-2.0-flash-exp
- name: sql_pairs_indexing
document_store: qdrant
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
- name: sql_pairs_deletion
document_store: qdrant
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
- name: sql_pairs_retrieval
document_store: qdrant
embedder: openai_embedder.text-embedding-004
embedder: litellm_embedder.text-embedding-004
llm: litellm_llm.gemini/gemini-2.0-flash-exp
- name: preprocess_sql_data
llm: litellm_llm.gemini/gemini-2.0-flash-exp
Expand Down
31 changes: 16 additions & 15 deletions wren-ai-service/docs/config_examples/config.groq.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@ models:

---
type: embedder
provider: openai_embedder
provider: litellm_embedder
models:
# find EMBEDDER_OPENAI_API_KEY and fill in value of api key in ~/.wrenai/.env
- model: text-embedding-3-large # put your openai compatible embedder model name here
url: https://api.openai.com/v1 # change this according to your openai compatible embedder model
timeout: 120
# define OPENAI_API_KEY=<api_key> in ~/.wrenai/.env if you are using openai embedding model
# please refer to LiteLLM documentation for more details: https://docs.litellm.ai/docs/providers
- model: text-embedding-3-large # put your embedding model name here, if it is not openai embedding model, should be <provider>/<model_name>
api_base: https://api.openai.com/v1 # change this according to your embedding model
timeout: 120

---
type: engine
Expand All @@ -41,20 +42,20 @@ recreate_index: false
type: pipeline
pipes:
- name: db_schema_indexing
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: historical_question_indexing
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: table_description_indexing
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: db_schema_retrieval
llm: litellm_llm.groq/llama-3.3-70b-specdec
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: historical_question_retrieval
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: sql_generation
llm: litellm_llm.groq/llama-3.3-70b-specdec
Expand Down Expand Up @@ -90,7 +91,7 @@ pipes:
llm: litellm_llm.groq/llama-3.3-70b-specdec
- name: question_recommendation_db_schema_retrieval
llm: litellm_llm.groq/llama-3.3-70b-specdec
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: question_recommendation_sql_generation
llm: litellm_llm.groq/llama-3.3-70b-specdec
Expand All @@ -101,19 +102,19 @@ pipes:
llm: litellm_llm.groq/llama-3.3-70b-specdec
- name: intent_classification
llm: litellm_llm.groq/llama-3.3-70b-specdec
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
document_store: qdrant
- name: data_assistance
llm: litellm_llm.groq/llama-3.3-70b-specdec
- name: sql_pairs_indexing
document_store: qdrant
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
- name: sql_pairs_deletion
document_store: qdrant
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
- name: sql_pairs_retrieval
document_store: qdrant
embedder: openai_embedder.text-embedding-3-large
embedder: litellm_embedder.text-embedding-3-large
llm: litellm_llm.groq/llama-3.3-70b-specdec
- name: preprocess_sql_data
llm: litellm_llm.groq/llama-3.3-70b-specdec
Expand Down
4 changes: 2 additions & 2 deletions wren-ai-service/docs/config_examples/config.ollama.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ models:
type: embedder
provider: litellm_embedder
models:
- model: openai/nomic-embed-text # put your ollama embedder model name here
api_base: http://host.docker.internal:11434/v1 # change this to your ollama host, url should be <ollama_url>
- model: openai/nomic-embed-text # put your ollama embedder model name here, openai/<ollama_model_name>
api_base: http://host.docker.internal:11434/v1 # change this to your ollama host, api_base should be <ollama_url>/v1
timeout: 120

---
Expand Down

0 comments on commit b1595e2

Please sign in to comment.