Skip to content

Commit

Permalink
Merge branch 'All-Hands-AI:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
RainRat authored Feb 22, 2025
2 parents 3d3e01e + 70b21d1 commit cd127d1
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,11 @@ export function ConversationPanel({ onClose }: ConversationPanelProps) {
data-testid="conversation-panel"
className="w-[350px] h-full border border-neutral-700 bg-base-secondary rounded-xl overflow-y-auto absolute"
>
<div className="w-full h-full absolute flex justify-center items-center">
{isFetching && <LoadingSpinner size="small" />}
</div>
{isFetching && (
<div className="w-full h-full absolute flex justify-center items-center">
<LoadingSpinner size="small" />
</div>
)}
{error && (
<div className="flex flex-col items-center justify-center h-full">
<p className="text-danger">{error.message}</p>
Expand Down
6 changes: 4 additions & 2 deletions openhands/core/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,8 +221,10 @@ def filter(self, record):
sensitive_values = []
for key, value in os.environ.items():
key_upper = key.upper()
if len(value) > 2 and any(
s in key_upper for s in ('SECRET', 'KEY', 'CODE', 'TOKEN')
if (
len(value) > 2
and value != 'default'
and any(s in key_upper for s in ('SECRET', 'KEY', 'CODE', 'TOKEN'))
):
sensitive_values.append(value)

Expand Down
14 changes: 2 additions & 12 deletions openhands/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
from litellm.exceptions import (
RateLimitError,
)
from litellm.llms.custom_httpx.http_handler import HTTPHandler
from litellm.types.utils import CostPerToken, ModelResponse, Usage
from litellm.utils import create_pretrained_tokenizer

Expand Down Expand Up @@ -232,17 +231,8 @@ def wrapper(*args, **kwargs):
# Record start time for latency measurement
start_time = time.time()

# LiteLLM currently have an issue where HttpHandlers are being created but not
# closed. We have submitted a PR to them, (https://github.com/BerriAI/litellm/pull/8711)
# and their dev team say they are in the process of a refactor that will fix this.
# In the meantime, we manage the lifecycle of the HTTPHandler manually.
handler = HTTPHandler(timeout=self.config.timeout)
kwargs['client'] = handler
try:
# we don't support streaming here, thus we get a ModelResponse
resp: ModelResponse = self._completion_unwrapped(*args, **kwargs)
finally:
handler.close()
# we don't support streaming here, thus we get a ModelResponse
resp: ModelResponse = self._completion_unwrapped(*args, **kwargs)

# Calculate and record latency
latency = time.time() - start_time
Expand Down

0 comments on commit cd127d1

Please sign in to comment.