From 48deb0c5c3cb7abd7b398bc8cd72b7de639e6ec3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 12 Nov 2024 00:51:14 +0800 Subject: [PATCH] fix(answer): return error when request to llm failed --- ee/tabby-webserver/src/service/answer.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ee/tabby-webserver/src/service/answer.rs b/ee/tabby-webserver/src/service/answer.rs index 3813d7016283..ef190686d226 100644 --- a/ee/tabby-webserver/src/service/answer.rs +++ b/ee/tabby-webserver/src/service/answer.rs @@ -172,14 +172,14 @@ impl AnswerService { let chunk = match chunk { Ok(chunk) => chunk, Err(err) => { - if let OpenAIError::StreamError(content) = err { + if let OpenAIError::StreamError(content) = &err { if content == "Stream ended" { break; } - } else { - error!("Failed to get chat completion chunk: {:?}", err); } - break; + error!("Failed to get chat completion chunk: {:?}", err); + yield Err(anyhow!("Failed to get chat completion chunk: {:?}", err).into()); + return; } };