We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
1160b58
v0.16.0
Ubuntu 24.04.1 LTS RAGFLOW v0.16.0 Ollama v0.5.7 chat model qwen2-7b-instruct-q6_k bge-large-zh-v1.5-q8_0
2025-02-13 17:48:40,479 INFO 18 set_progress(0fd90812e9ef11ef9bda0242ac120006), progress: -1, progress_msg: 17:48:40 Page(1~100000001): [ERROR]Generate embedding error:{} 2025-02-13 17:48:40,491 ERROR 18 Generate embedding error:{} Traceback (most recent call last): File "/ragflow/rag/svr/task_executor.py", line 571, in do_handle_task token_count, vector_size = embedding(chunks, embedding_model, task_parser_config, progress_callback) File "/ragflow/rag/svr/task_executor.py", line 368, in embedding vts, c = mdl.encode(cnts[i: i + batch_size]) File "<@beartype(api.db.services.llm_service.LLMBundle.encode) at 0x7098ec7abd00>", line 31, in encode File "/ragflow/api/db/services/llm_service.py", line 238, in encode embeddings, used_tokens = self.mdl.encode(texts) File "<@beartype(rag.llm.embedding_model.OllamaEmbed.encode) at 0x7098ed4b9fc0>", line 31, in encode File "/ragflow/rag/llm/embedding_model.py", line 262, in encode res = self.client.embeddings(prompt=txt, File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 201, in embeddings return self._request( File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 74, in _request raise ResponseError(e.response.text, e.response.status_code) from None ollama._types.ResponseError: {} 2025-02-13 17:48:40,503 INFO 18 set_progress(0fd90812e9ef11ef9bda0242ac120006), progress: -1, progress_msg: 17:48:40 [ERROR][Exception]: {} 2025-02-13 17:48:40,515 ERROR 18 handle_task got exception for task {"id": "0fd90812e9ef11ef9bda0242ac120006", "doc_id": "4ea198cee9e611ef90df0242ac120002", "from_page": 0, "to_page": 100000000, "retry_count": 0, "kb_id": "fd899bb0e9dd11efa2130242ac120002", "parser_id": "naive", "parser_config": {"auto_keywords": 0, "auto_questions": 0, "raptor": {"use_raptor": false}, "graphrag": {"use_graphrag": false}, "chunk_token_num": 214, "delimiter": "\n!?;\u3002\uff1b\uff01\uff1f", "layout_recognize": "llava:7b@Ollama", "html4excel": false}, "name": "\u4e2d\u533b\u65b9\u5242Excel\u6570\u636e\u8868_84295.xlsx", "type": "doc", "location": "\u4e2d\u533b\u65b9\u5242Excel\u6570\u636e\u8868_84295.xlsx", "size": 14393118, "tenant_id": "47d602e2e75611efa32f0242ac120006", "language": "Chinese", "embd_id": "bge-large-zh-v1.5-q8_0:latest@Ollama", "pagerank": 0, "kb_parser_config": {"auto_keywords": 0, "auto_questions": 0, "raptor": {"use_raptor": false}, "graphrag": {"use_graphrag": false}, "chunk_token_num": 214, "delimiter": "\n!?;\u3002\uff1b\uff01\uff1f", "layout_recognize": "llava:7b@Ollama", "html4excel": false}, "img2txt_id": "llava:7b@Ollama", "asr_id": "", "llm_id": "qwen2-7b-instruct-q6_k:latest@Ollama", "update_time": 1739439844426, "task_type": ""} Traceback (most recent call last): File "/ragflow/rag/svr/task_executor.py", line 626, in handle_task do_handle_task(task) File "/ragflow/rag/svr/task_executor.py", line 571, in do_handle_task token_count, vector_size = embedding(chunks, embedding_model, task_parser_config, progress_callback) File "/ragflow/rag/svr/task_executor.py", line 368, in embedding vts, c = mdl.encode(cnts[i: i + batch_size]) File "<@beartype(api.db.services.llm_service.LLMBundle.encode) at 0x7098ec7abd00>", line 31, in encode File "/ragflow/api/db/services/llm_service.py", line 238, in encode embeddings, used_tokens = self.mdl.encode(texts) File "<@beartype(rag.llm.embedding_model.OllamaEmbed.encode) at 0x7098ed4b9fc0>", line 31, in encode File "/ragflow/rag/llm/embedding_model.py", line 262, in encode res = self.client.embeddings(prompt=txt, File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 201, in embeddings return self._request( File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 74, in _request raise ResponseError(e.response.text, e.response.status_code) from None ollama._types.ResponseError: {}
No response
创建知识库 导入excel文档到知识库 解析报错
The text was updated successfully, but these errors were encountered:
No branches or pull requests
Is there an existing issue for the same bug?
RAGFlow workspace code commit ID
1160b58
RAGFlow image version
v0.16.0
Other environment information
Actual behavior
2025-02-13 17:48:40,479 INFO 18 set_progress(0fd90812e9ef11ef9bda0242ac120006), progress: -1, progress_msg: 17:48:40 Page(1~100000001): [ERROR]Generate embedding error:{}
2025-02-13 17:48:40,491 ERROR 18 Generate embedding error:{}
Traceback (most recent call last):
File "/ragflow/rag/svr/task_executor.py", line 571, in do_handle_task
token_count, vector_size = embedding(chunks, embedding_model, task_parser_config, progress_callback)
File "/ragflow/rag/svr/task_executor.py", line 368, in embedding
vts, c = mdl.encode(cnts[i: i + batch_size])
File "<@beartype(api.db.services.llm_service.LLMBundle.encode) at 0x7098ec7abd00>", line 31, in encode
File "/ragflow/api/db/services/llm_service.py", line 238, in encode
embeddings, used_tokens = self.mdl.encode(texts)
File "<@beartype(rag.llm.embedding_model.OllamaEmbed.encode) at 0x7098ed4b9fc0>", line 31, in encode
File "/ragflow/rag/llm/embedding_model.py", line 262, in encode
res = self.client.embeddings(prompt=txt,
File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 201, in embeddings
return self._request(
File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 74, in _request
raise ResponseError(e.response.text, e.response.status_code) from None
ollama._types.ResponseError: {}
2025-02-13 17:48:40,503 INFO 18 set_progress(0fd90812e9ef11ef9bda0242ac120006), progress: -1, progress_msg: 17:48:40 [ERROR][Exception]: {}
2025-02-13 17:48:40,515 ERROR 18 handle_task got exception for task {"id": "0fd90812e9ef11ef9bda0242ac120006", "doc_id": "4ea198cee9e611ef90df0242ac120002", "from_page": 0, "to_page": 100000000, "retry_count": 0, "kb_id": "fd899bb0e9dd11efa2130242ac120002", "parser_id": "naive", "parser_config": {"auto_keywords": 0, "auto_questions": 0, "raptor": {"use_raptor": false}, "graphrag": {"use_graphrag": false}, "chunk_token_num": 214, "delimiter": "\n!?;\u3002\uff1b\uff01\uff1f", "layout_recognize": "llava:7b@Ollama", "html4excel": false}, "name": "\u4e2d\u533b\u65b9\u5242Excel\u6570\u636e\u8868_84295.xlsx", "type": "doc", "location": "\u4e2d\u533b\u65b9\u5242Excel\u6570\u636e\u8868_84295.xlsx", "size": 14393118, "tenant_id": "47d602e2e75611efa32f0242ac120006", "language": "Chinese", "embd_id": "bge-large-zh-v1.5-q8_0:latest@Ollama", "pagerank": 0, "kb_parser_config": {"auto_keywords": 0, "auto_questions": 0, "raptor": {"use_raptor": false}, "graphrag": {"use_graphrag": false}, "chunk_token_num": 214, "delimiter": "\n!?;\u3002\uff1b\uff01\uff1f", "layout_recognize": "llava:7b@Ollama", "html4excel": false}, "img2txt_id": "llava:7b@Ollama", "asr_id": "", "llm_id": "qwen2-7b-instruct-q6_k:latest@Ollama", "update_time": 1739439844426, "task_type": ""}
Traceback (most recent call last):
File "/ragflow/rag/svr/task_executor.py", line 626, in handle_task
do_handle_task(task)
File "/ragflow/rag/svr/task_executor.py", line 571, in do_handle_task
token_count, vector_size = embedding(chunks, embedding_model, task_parser_config, progress_callback)
File "/ragflow/rag/svr/task_executor.py", line 368, in embedding
vts, c = mdl.encode(cnts[i: i + batch_size])
File "<@beartype(api.db.services.llm_service.LLMBundle.encode) at 0x7098ec7abd00>", line 31, in encode
File "/ragflow/api/db/services/llm_service.py", line 238, in encode
embeddings, used_tokens = self.mdl.encode(texts)
File "<@beartype(rag.llm.embedding_model.OllamaEmbed.encode) at 0x7098ed4b9fc0>", line 31, in encode
File "/ragflow/rag/llm/embedding_model.py", line 262, in encode
res = self.client.embeddings(prompt=txt,
File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 201, in embeddings
return self._request(
File "/ragflow/.venv/lib/python3.10/site-packages/ollama/_client.py", line 74, in _request
raise ResponseError(e.response.text, e.response.status_code) from None
ollama._types.ResponseError: {}
Expected behavior
No response
Steps to reproduce
Additional information
The text was updated successfully, but these errors were encountered: