diff --git a/backend/user_management.py b/backend/user_management.py index 9d21cd3..45a9550 100644 --- a/backend/user_management.py +++ b/backend/user_management.py @@ -2,9 +2,9 @@ from datetime import datetime, timedelta from typing import Optional +import argon2 from jose import jwt from pydantic import BaseModel -import argon2 from backend.database import Database @@ -56,10 +56,10 @@ def authenticate_user(username: str, password: bytes) -> bool | User: user = get_user(username) if not user: return False - + if argon2.verify_password(user.hashed_password, password.encode("utf-8")): return user - + return False def create_access_token(*, data: dict, expires_delta: Optional[timedelta] = None) -> str: diff --git a/catalog-info.yaml b/catalog-info.yaml index fb04429..3efcd13 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -18,4 +18,4 @@ metadata: spec: type: service owner: user:alexisVLRT - lifecycle: experimental \ No newline at end of file + lifecycle: experimental diff --git a/docs/cookbook.md b/docs/cookbook.md index ba61dac..be0783b 100644 --- a/docs/cookbook.md +++ b/docs/cookbook.md @@ -3,4 +3,4 @@ Here you will find a repository of configurations that have proved to work. - [LLM Configuration samples](recipe_llms_configs.md) - [Embedding model Configuration samples](recipe_embedding_models_configs.md) - [Vector Store Configuration samples](recipe_vector_stores_configs.md) -- [Database Configuration samples](recipe_databases_configs.md) \ No newline at end of file +- [Database Configuration samples](recipe_databases_configs.md) diff --git a/docs/recipe_embedding_models_configs.md b/docs/recipe_embedding_models_configs.md index 915f769..4932464 100644 --- a/docs/recipe_embedding_models_configs.md +++ b/docs/recipe_embedding_models_configs.md @@ -3,7 +3,7 @@ This will download the selected model from the HF hub and make embeddings on the machine the backend is running on. ```shell pip install sentence_transformers -``` +``` ```yaml # backend/config.yaml @@ -43,4 +43,4 @@ EmbeddingModelConfig: &EmbeddingModelConfig source: BedrockEmbeddings source_config: model_id: 'amazon.titan-embed-text-v1' -``` \ No newline at end of file +``` diff --git a/docs/recipe_llms_configs.md b/docs/recipe_llms_configs.md index 597fc96..9e949af 100644 --- a/docs/recipe_llms_configs.md +++ b/docs/recipe_llms_configs.md @@ -34,10 +34,10 @@ LLMConfig: &LLMConfig ## Vertex AI gemini-pro -!!! warning +!!! warning Right now Gemini models' safety settings are **very** sensitive, and is is not possible to disable them. That makes this model pretty much useless for the time being as it blocks most requests and/or responses. - + Github issue to follow: https://github.com/langchain-ai/langchain/pull/15344#issuecomment-1888597151 !!! info "You will first need to login to GCP" diff --git a/requirements.in b/requirements.in index 2c7be94..5951271 100644 --- a/requirements.in +++ b/requirements.in @@ -42,4 +42,4 @@ mkdocs mkdocs-techdocs-core pymdown-extensions mkdocs_monorepo_plugin -argon2-cffi \ No newline at end of file +argon2-cffi