From 4a19c8e1ad1954e7c090e762b57a6736ef8d17b1 Mon Sep 17 00:00:00 2001 From: Gato <115658935+CollectiveUnicorn@users.noreply.github.com> Date: Fri, 20 Sep 2024 08:58:01 -0700 Subject: [PATCH 1/3] fix(api, tests): configmap reloading core dump (#1085) * Replaces watchfiles with watchdog to deal with the coredump issue being caused by a combination of awatch, asyncio and test code that is sensitive to async activity. --------- Co-authored-by: Justin Law --- src/leapfrogai_api/main.py | 24 ++-- src/leapfrogai_api/pyproject.toml | 2 +- src/leapfrogai_api/utils/config.py | 144 ++++++++++++++---------- tests/pytest/leapfrogai_api/test_api.py | 45 ++++++-- 4 files changed, 134 insertions(+), 81 deletions(-) diff --git a/src/leapfrogai_api/main.py b/src/leapfrogai_api/main.py index c3c806dbd..d7eac33e6 100644 --- a/src/leapfrogai_api/main.py +++ b/src/leapfrogai_api/main.py @@ -36,17 +36,25 @@ logger = logging.getLogger(__name__) -# handle startup & shutdown tasks +# Handle startup & shutdown tasks @asynccontextmanager async def lifespan(app: FastAPI): """Handle startup and shutdown tasks for the FastAPI app.""" - # startup - logger.info("Starting to watch for configs with this being an info") - asyncio.create_task(get_model_config().watch_and_load_configs()) - yield - # shutdown - logger.info("Clearing model configs") - asyncio.create_task(get_model_config().clear_all_models()) + # Startup + logger.info("Starting to watch for configs.") + config = get_model_config() + config_task = asyncio.create_task(config.watch_and_load_configs()) + try: + yield + finally: + # Shutdown + logger.info("Stopping config watcher and clearing model configs.") + config_task.cancel() + try: + await config_task + except asyncio.CancelledError: + pass # Task was cancelled, which is expected during shutdown + await config.clear_all_models() app = FastAPI(lifespan=lifespan) diff --git a/src/leapfrogai_api/pyproject.toml b/src/leapfrogai_api/pyproject.toml index fa7343f21..1085cd5b2 100644 --- a/src/leapfrogai_api/pyproject.toml +++ b/src/leapfrogai_api/pyproject.toml @@ -13,7 +13,7 @@ dependencies = [ "uvicorn == 0.23.2", "docx2txt == 0.8", "python-multipart == 0.0.7", #indirect dep of FastAPI to receive form data for file uploads - "watchfiles == 0.21.0", + "watchdog == 5.0.2", "leapfrogai_sdk", "supabase == 2.6.0", "langchain == 0.2.12", diff --git a/src/leapfrogai_api/utils/config.py b/src/leapfrogai_api/utils/config.py index 9ef327f51..07e8096d2 100644 --- a/src/leapfrogai_api/utils/config.py +++ b/src/leapfrogai_api/utils/config.py @@ -1,17 +1,52 @@ +import asyncio import fnmatch import glob import logging import os import toml import yaml -from watchfiles import Change, awatch +from watchdog.observers import Observer +from watchdog.events import FileSystemEventHandler from leapfrogai_api.typedef.models import Model - logger = logging.getLogger(__name__) +class ConfigHandler(FileSystemEventHandler): + def __init__(self, config): + self.config = config + super().__init__() + + def on_created(self, event): + self.process(event) + + def on_modified(self, event): + self.process(event) + + def on_deleted(self, event): + self.process(event) + + def process(self, event): + # Ignore directory events + if event.is_directory: + return + + filename = os.path.basename(event.src_path) + logger.debug(f"Processing event '{event.event_type}' for file '{filename}'") + + # Check if the file matches the config filename or pattern + if fnmatch.fnmatch(filename, self.config.filename): + if event.event_type == "deleted": + logger.info(f"Detected deletion of config file '{filename}'") + self.config.remove_model_by_config(filename) + else: + logger.info( + f"Detected modification/creation of config file '{filename}'" + ) + self.config.load_config_file(self.config.directory, filename) + + class Config: models: dict[str, Model] = {} config_sources: dict[str, list] = {} @@ -21,6 +56,8 @@ def __init__( ): self.models = models self.config_sources = config_sources + self.directory = "." + self.filename = "config.yaml" def __str__(self): return f"Models: {self.models}" @@ -28,82 +65,74 @@ def __str__(self): async def watch_and_load_configs(self, directory=".", filename="config.yaml"): # Get the config directory and filename from the environment variables if provided env_directory = os.environ.get("LFAI_CONFIG_PATH", directory) - if env_directory is not None and env_directory != "": + if env_directory: directory = env_directory env_filename = os.environ.get("LFAI_CONFIG_FILENAME", filename) - if env_filename is not None and env_filename != "": + if env_filename: filename = env_filename + self.directory = directory + self.filename = filename + # Process all the configs that were already in the directory self.load_all_configs(directory, filename) - # Watch the directory for changes until the end of time - while True: - async for changes in awatch(directory, recursive=False, step=50): - # get two unique lists of files that have been (updated files and deleted files) - # (awatch can return duplicates depending on the type of updates that happen) - logger.info("Config changes detected: {}".format(changes)) - unique_new_files = set() - unique_deleted_files = set() - for change in changes: - if change[0] == Change.deleted: - unique_deleted_files.add(os.path.basename(change[1])) - else: - unique_new_files.add(os.path.basename(change[1])) - - # filter the files to those that match the filename or glob pattern - filtered_new_matches = fnmatch.filter(unique_new_files, filename) - filtered_deleted_matches = fnmatch.filter( - unique_deleted_files, filename - ) + # Set up the event handler and observer + event_handler = ConfigHandler(self) + observer = Observer() + observer.schedule(event_handler, path=directory, recursive=False) - # load all the updated config files - for match in filtered_new_matches: - self.load_config_file(directory, match) + # Start the observer + observer.start() + logger.info(f"Started watching directory: {directory}") - # remove deleted models - for match in filtered_deleted_matches: - self.remove_model_by_config(match) + try: + while True: + await asyncio.sleep(1) + except (KeyboardInterrupt, asyncio.CancelledError): + # Stop the observer if the script is interrupted + observer.stop() + logger.info(f"Stopped watching directory: {directory}") + + # Wait for the observer to finish + observer.join() async def clear_all_models(self): - # reset the model config on shutdown (so old model configs don't get cached) + # Reset the model config on shutdown (so old model configs don't get cached) self.models = {} self.config_sources = {} logger.info("All models have been removed") def load_config_file(self, directory: str, config_file: str): - logger.info("Loading config file: {}/{}".format(directory, config_file)) + logger.info(f"Loading config file: {directory}/{config_file}") - # load the config file into the config object + # Load the config file into the config object config_path = os.path.join(directory, config_file) - with open(config_path) as c: - # Load the file into a python object - loaded_artifact = {} - if config_path.endswith(".toml"): - loaded_artifact = toml.load(c) - elif config_path.endswith(".yaml"): - loaded_artifact = yaml.safe_load(c) - else: - # TODO: Return an error ??? - logger.error(f"Unsupported file type: {config_path}") - return - - # parse the object into our config - self.parse_models(loaded_artifact, config_file) - - logger.info("loaded artifact at {}".format(config_path)) - - return + try: + with open(config_path) as c: + # Load the file into a python object + if config_path.endswith(".toml"): + loaded_artifact = toml.load(c) + elif config_path.endswith(".yaml"): + loaded_artifact = yaml.safe_load(c) + else: + logger.error(f"Unsupported file type: {config_path}") + return + + # Parse the object into our config + self.parse_models(loaded_artifact, config_file) + + logger.info(f"Loaded artifact at {config_path}") + except Exception as e: + logger.error(f"Failed to load config file {config_path}: {e}") def load_all_configs(self, directory="", filename="config.yaml"): logger.info( - "Loading all configs in {} that match the name '{}'".format( - directory, filename - ) + f"Loading all configs in {directory} that match the name '{filename}'" ) if not os.path.exists(directory): - logger.error("The config directory ({}) does not exist".format(directory)) + logger.error(f"The config directory ({directory}) does not exist") return "THE CONFIG DIRECTORY DOES NOT EXIST" # Get all config files and load them into the config object @@ -112,13 +141,8 @@ def load_all_configs(self, directory="", filename="config.yaml"): dir_path, file_path = os.path.split(config_path) self.load_config_file(directory=dir_path, config_file=file_path) - return - def get_model_backend(self, model: str) -> Model | None: - if model in self.models: - return self.models[model] - else: - return None + return self.models.get(model) def parse_models(self, loaded_artifact, config_file): for m in loaded_artifact["models"]: diff --git a/tests/pytest/leapfrogai_api/test_api.py b/tests/pytest/leapfrogai_api/test_api.py index 2199348d3..552a4eb19 100644 --- a/tests/pytest/leapfrogai_api/test_api.py +++ b/tests/pytest/leapfrogai_api/test_api.py @@ -24,6 +24,12 @@ ) LFAI_CONFIG_FILEPATH = os.path.join(LFAI_CONFIG_PATH, LFAI_CONFIG_FILENAME) +MODEL = "repeater" +TEXT_INPUT = ( + "This is the input content for completions, embeddings, and chat completions" +) +TEXT_INPUT_LEN = len(TEXT_INPUT) + ######################### ######################### @@ -74,39 +80,54 @@ def test_config_load(): response = client.get("/leapfrogai/v1/models") assert response.status_code == 200 - assert response.json() == { - "config_sources": {"repeater-test-config.yaml": ["repeater"]}, - "models": {"repeater": {"backend": "localhost:50051", "name": "repeater"}}, + expected_response = { + "config_sources": {"repeater-test-config.yaml": [MODEL]}, + "models": {MODEL: {"backend": "localhost:50051", "name": MODEL}}, + "directory": LFAI_CONFIG_PATH, + "filename": LFAI_CONFIG_FILENAME, } + assert response.json() == expected_response def test_config_delete(tmp_path): """Test that the config is deleted correctly.""" - # move repeater-test-config.yaml to temp dir so that we can remove it at a later step + # Move repeater-test-config.yaml to temp dir so that we can remove it at a later step tmp_config_filepath = shutil.copyfile( LFAI_CONFIG_FILEPATH, os.path.join(tmp_path, LFAI_CONFIG_FILENAME) ) os.environ["LFAI_CONFIG_PATH"] = str(tmp_path) with TestClient(app) as client: - # ensure the API loads the temp config + # Ensure the API loads the temp config response = client.get("/leapfrogai/v1/models") assert response.status_code == 200 - assert response.json() == { - "config_sources": {"repeater-test-config.yaml": ["repeater"]}, - "models": {"repeater": {"backend": "localhost:50051", "name": "repeater"}}, + expected_response = { + "config_sources": {"repeater-test-config.yaml": [MODEL]}, + "models": {MODEL: {"backend": "localhost:50051", "name": MODEL}}, + "directory": os.environ["LFAI_CONFIG_PATH"], + "filename": LFAI_CONFIG_FILENAME, } - # delete source config from temp dir + assert response.json() == expected_response + + # Delete source config from temp dir os.remove(tmp_config_filepath) - # wait for the api to be able to detect the change + # Wait for the API to detect the change time.sleep(0.5) - # assert response is now empty + + # Assert response is now empty response = client.get("/leapfrogai/v1/models") assert response.status_code == 200 - assert response.json() == {"config_sources": {}, "models": {}} + expected_empty_response = { + "config_sources": {}, + "models": {}, + "directory": os.environ["LFAI_CONFIG_PATH"], + "filename": LFAI_CONFIG_FILENAME, + } + assert response.json() == expected_empty_response + # Reset the environment variable os.environ["LFAI_CONFIG_PATH"] = os.path.join(os.path.dirname(__file__), "fixtures") From 480a35b88fc9170d6432601d022363d908742764 Mon Sep 17 00:00:00 2001 From: Gregory Horvath Date: Fri, 20 Sep 2024 13:35:11 -0400 Subject: [PATCH 2/3] fix(make): make clean changed to make clean-artifacts (#1073) --- Makefile | 4 ++-- tests/Makefile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 8a7280f6e..bf8afb315 100644 --- a/Makefile +++ b/Makefile @@ -299,7 +299,7 @@ silent-deploy-gpu: silent-fresh-leapfrogai-gpu: @echo "Cleaning up previous artifacts..." - @$(MAKE) clean > /dev/null 2>&1 + @$(MAKE) clean-artifacts > /dev/null 2>&1 @echo "Logs at .logs/*.log" @mkdir -p .logs @echo "Creating a uds gpu enabled cluster..." @@ -316,7 +316,7 @@ silent-fresh-leapfrogai-gpu: silent-fresh-leapfrogai-cpu: @echo "Cleaning up previous artifacts..." - @$(MAKE) clean > /dev/null 2>&1 + @$(MAKE) clean-artifacts > /dev/null 2>&1 @echo "Logs at .logs/*.log" @mkdir -p .logs @echo "Creating a uds cpu-only cluster..." diff --git a/tests/Makefile b/tests/Makefile index b2bb66861..4a5179f4c 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -63,8 +63,8 @@ test-api-integration: fi @env $$(cat .env | xargs) LFAI_RUN_NIAH_TESTS=$(LFAI_RUN_NIAH_TESTS) PYTHONPATH=$$(pwd) pytest -vv -s tests/integration/api -test-api-unit: set-supabase - LFAI_RUN_REPEATER_TESTS=$(LFAI_RUN_REPEATER_TESTS) PYTHONPATH=$$(pwd) pytest -vv -s tests/unit +test-api-unit: + LFAI_RUN_REPEATER_TESTS=$(LFAI_RUN_REPEATER_TESTS) PYTHONPATH=$$(pwd) python -m pytest -vv -s tests/unit LFAI_RUN_REPEATER_TESTS=$(LFAI_RUN_REPEATER_TESTS) PYTHONPATH=$$(pwd) python -m pytest -vv -s tests/pytest test-load: From d954c33681128a3c0db851fdccef84d79129a0aa Mon Sep 17 00:00:00 2001 From: Andrew Risse <52644157+andrewrisse@users.noreply.github.com> Date: Fri, 20 Sep 2024 12:03:59 -0600 Subject: [PATCH 3/3] feat(ui): translation (#1046) --- .../src/lib/components/ChatFileUpload.svelte | 153 +++++++++++++++ .../lib/components/ChatFileUploadForm.svelte | 85 --------- .../src/lib/components/FileChatActions.svelte | 164 +++++++++++++++++ .../lib/components/FileChatActions.test.ts | 139 ++++++++++++++ .../components/LFSidebarDropdownItem.svelte | 2 +- .../src/lib/components/Message.svelte | 4 +- .../lib/components/UploadedFileCard.svelte | 18 +- .../lib/components/UploadedFileCards.svelte | 6 +- src/leapfrogai_ui/src/lib/constants/index.ts | 39 +++- .../src/lib/constants/toastMessages.ts | 22 ++- .../src/lib/helpers/apiHelpers.ts | 35 ++++ .../src/lib/helpers/fileHelpers.test.ts | 146 +++++++++++++++ .../src/lib/helpers/fileHelpers.ts | 54 +++++- src/leapfrogai_ui/src/lib/mocks/chat-mocks.ts | 28 +++ src/leapfrogai_ui/src/lib/mocks/openai.ts | 15 +- src/leapfrogai_ui/src/lib/schemas/chat.ts | 4 +- src/leapfrogai_ui/src/lib/schemas/files.ts | 40 +++- src/leapfrogai_ui/src/lib/stores/threads.ts | 58 ++++++ src/leapfrogai_ui/src/lib/types/files.d.ts | 4 + src/leapfrogai_ui/src/lib/types/messages.d.ts | 2 +- src/leapfrogai_ui/src/lib/types/toast.d.ts | 6 + .../routes/api/audio/translation/+server.ts | 36 ++++ .../api/audio/translation/server.test.ts | 127 +++++++++++++ .../routes/api/files/convert/server.test.ts | 18 +- .../routes/api/files/parse-text/+server.ts | 68 +++++++ .../api/files/parse-text/server.test.ts | 153 +++++++++++++++ .../src/routes/api/messages/new/+server.ts | 1 - .../(dashboard)/[[thread_id]]/+page.server.ts | 114 ------------ .../(dashboard)/[[thread_id]]/+page.svelte | 38 ++-- .../chat/(dashboard)/[[thread_id]]/+page.ts | 6 +- .../[[thread_id]]/form-action.test.ts | 174 ------------------ src/leapfrogai_ui/svelte.config.js | 1 + src/leapfrogai_ui/testUtils/fakeData/index.ts | 16 ++ src/leapfrogai_ui/tests/fixtures/spanish.m4a | Bin 0 -> 26614 bytes src/leapfrogai_ui/tests/translation.test.ts | 72 ++++++++ 35 files changed, 1409 insertions(+), 439 deletions(-) create mode 100644 src/leapfrogai_ui/src/lib/components/ChatFileUpload.svelte delete mode 100644 src/leapfrogai_ui/src/lib/components/ChatFileUploadForm.svelte create mode 100644 src/leapfrogai_ui/src/lib/components/FileChatActions.svelte create mode 100644 src/leapfrogai_ui/src/lib/components/FileChatActions.test.ts create mode 100644 src/leapfrogai_ui/src/lib/helpers/apiHelpers.ts create mode 100644 src/leapfrogai_ui/src/lib/helpers/fileHelpers.test.ts create mode 100644 src/leapfrogai_ui/src/routes/api/audio/translation/+server.ts create mode 100644 src/leapfrogai_ui/src/routes/api/audio/translation/server.test.ts create mode 100644 src/leapfrogai_ui/src/routes/api/files/parse-text/+server.ts create mode 100644 src/leapfrogai_ui/src/routes/api/files/parse-text/server.test.ts delete mode 100644 src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.server.ts delete mode 100644 src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/form-action.test.ts create mode 100644 src/leapfrogai_ui/tests/fixtures/spanish.m4a create mode 100644 src/leapfrogai_ui/tests/translation.test.ts diff --git a/src/leapfrogai_ui/src/lib/components/ChatFileUpload.svelte b/src/leapfrogai_ui/src/lib/components/ChatFileUpload.svelte new file mode 100644 index 000000000..e01575ce2 --- /dev/null +++ b/src/leapfrogai_ui/src/lib/components/ChatFileUpload.svelte @@ -0,0 +1,153 @@ + + + { + if (e.detail.length > MAX_NUM_FILES_UPLOAD) { + e.detail.pop(); + toastStore.addToast(MAX_NUM_FILES_UPLOAD_MSG_TOAST()); + return; + } + uploadingFiles = true; + for (const file of e.detail) { + // Metadata is limited to 512 characters, we use a short id to save space + const id = uuidv4().substring(0, 8); + file.id = id; + attachedFileMetadata = [ + ...attachedFileMetadata, + { + id, + name: file.name, + type: file.type, + status: file.type.startsWith('audio/') ? 'complete' : 'uploading' + } + ]; + } + + attachedFiles = [...attachedFiles, ...e.detail]; + convertFiles(e.detail); + fileUploadBtnRef.value = ''; + }} + accept={ACCEPTED_FILE_TYPES} + disabled={uploadingFiles} + class="remove-btn-style flex rounded-lg p-1.5 text-gray-500 hover:bg-inherit dark:hover:bg-inherit" +> + + Attach file + diff --git a/src/leapfrogai_ui/src/lib/components/ChatFileUploadForm.svelte b/src/leapfrogai_ui/src/lib/components/ChatFileUploadForm.svelte deleted file mode 100644 index 894d97eaf..000000000 --- a/src/leapfrogai_ui/src/lib/components/ChatFileUploadForm.svelte +++ /dev/null @@ -1,85 +0,0 @@ - - -
- { - if (e.detail.length > MAX_NUM_FILES_UPLOAD) { - toastStore.addToast(MAX_NUM_FILES_UPLOAD_MSG_TOAST()); - return; - } - uploadingFiles = true; - // Metadata is limited to 512 characters, we use a short id to save space - for (const file of e.detail) { - attachedFileMetadata = [ - ...attachedFileMetadata, - { id: uuidv4().substring(0, 8), name: file.name, type: file.type, status: 'uploading' } - ]; - } - - submit(e.detail); - }} - accept={ACCEPTED_FILE_TYPES} - disabled={uploadingFiles} - class="remove-btn-style flex rounded-lg p-1.5 text-gray-500 hover:bg-inherit dark:hover:bg-inherit" - > - - Attach file - -
diff --git a/src/leapfrogai_ui/src/lib/components/FileChatActions.svelte b/src/leapfrogai_ui/src/lib/components/FileChatActions.svelte new file mode 100644 index 000000000..f162407b6 --- /dev/null +++ b/src/leapfrogai_ui/src/lib/components/FileChatActions.svelte @@ -0,0 +1,164 @@ + + +
0 + ? 'ml-6 flex max-w-full gap-2 overflow-x-auto bg-gray-700' + : 'hidden'} +> + {#each audioFiles as file} +
+ +
+ {/each} +
diff --git a/src/leapfrogai_ui/src/lib/components/FileChatActions.test.ts b/src/leapfrogai_ui/src/lib/components/FileChatActions.test.ts new file mode 100644 index 000000000..03914e7a4 --- /dev/null +++ b/src/leapfrogai_ui/src/lib/components/FileChatActions.test.ts @@ -0,0 +1,139 @@ +import FileChatActions from '$components/FileChatActions.svelte'; +import { render, screen } from '@testing-library/svelte'; +import type { FileMetadata, LFFile } from '$lib/types/files'; +import userEvent from '@testing-library/user-event'; +import { mockConvertFileNoId } from '$lib/mocks/file-mocks'; +import { + mockNewMessage, + mockTranslation, + mockTranslationError, + mockTranslationFileSizeError +} from '$lib/mocks/chat-mocks'; +import { vi } from 'vitest'; +import { threadsStore, toastStore } from '$stores'; +import { AUDIO_FILE_SIZE_ERROR_TOAST, FILE_TRANSLATION_ERROR } from '$constants/toastMessages'; +import { getFakeThread } from '$testUtils/fakeData'; +import { NO_SELECTED_ASSISTANT_ID } from '$constants'; + +const thread = getFakeThread(); + +const mockFile1: LFFile = new File([], 'test1.mpeg', { type: 'audio/mpeg' }); +const mockFile2: LFFile = new File([], 'test1.mp4', { type: 'audio/mp4' }); + +mockFile1.id = '1'; +mockFile2.id = '2'; + +const mockMetadata1: FileMetadata = { + id: mockFile1.id, + name: mockFile1.name, + type: 'audio/mpeg', + status: 'complete', + text: '' +}; + +const mockMetadata2: FileMetadata = { + id: mockFile2.id, + name: mockFile2.name, + type: 'audio/mp4', + status: 'complete', + text: '' +}; + +describe('FileChatActions', () => { + beforeEach(() => { + threadsStore.set({ + threads: [thread], // uses date override starting in March + sendingBlocked: false, + selectedAssistantId: NO_SELECTED_ASSISTANT_ID, + lastVisitedThreadId: '', + streamingMessage: null + }); + }); + + it('should render a translate button for each audio file', () => { + render(FileChatActions, { + attachedFiles: [mockFile1, mockFile2], + attachedFileMetadata: [mockMetadata1, mockMetadata2], + threadId: thread.id, + originalMessages: [], + setMessages: vi.fn() + }); + + expect(screen.getByText(`Translate ${mockMetadata1.name}`)); + expect(screen.getByText(`Translate ${mockMetadata2.name}`)); + }); + + // Tests that correct endpoints are called when clicked + // This is testing implementation rather than behavior, but is the best we can do for this component without + // going up a level for a complicated integration test (behavior is tested in e2e) + it('creates a message and requests a translation for the user requesting translation', async () => { + const fetchSpy = vi.spyOn(global, 'fetch'); + + mockConvertFileNoId(''); + mockNewMessage(); + mockTranslation(); + mockNewMessage(); + render(FileChatActions, { + attachedFiles: [mockFile1, mockFile2], + attachedFileMetadata: [mockMetadata1, mockMetadata2], + threadId: thread.id, + originalMessages: [], + setMessages: vi.fn() + }); + + await userEvent.click(screen.getByRole('button', { name: `Translate ${mockMetadata2.name}` })); + expect(fetchSpy).toHaveBeenNthCalledWith( + 1, + expect.stringContaining('/api/messages/new'), + expect.any(Object) + ); + expect(fetchSpy).toHaveBeenNthCalledWith( + 2, + expect.stringContaining('/api/audio/translation'), + expect.any(Object) + ); + expect(fetchSpy).toHaveBeenNthCalledWith( + 3, + expect.stringContaining('/api/messages/new'), + expect.any(Object) + ); + }); + + it('dispatches a toast if there is an error translating a file', async () => { + mockConvertFileNoId(''); + mockNewMessage(); + mockTranslationError(); + + const toastSpy = vi.spyOn(toastStore, 'addToast'); + + render(FileChatActions, { + attachedFiles: [mockFile1, mockFile2], + attachedFileMetadata: [mockMetadata1, mockMetadata2], + threadId: thread.id, + originalMessages: [], + setMessages: vi.fn() + }); + + await userEvent.click(screen.getByRole('button', { name: `Translate ${mockMetadata2.name}` })); + expect(toastSpy).toHaveBeenCalledWith(FILE_TRANSLATION_ERROR()); + }); + + it('dispatches a toast if the file is too big', async () => { + mockConvertFileNoId(''); + mockNewMessage(); + mockTranslationFileSizeError(); + + const toastSpy = vi.spyOn(toastStore, 'addToast'); + + render(FileChatActions, { + attachedFiles: [mockFile1, mockFile2], + attachedFileMetadata: [mockMetadata1, mockMetadata2], + threadId: thread.id, + originalMessages: [], + setMessages: vi.fn() + }); + + await userEvent.click(screen.getByRole('button', { name: `Translate ${mockMetadata2.name}` })); + expect(toastSpy).toHaveBeenCalledWith(AUDIO_FILE_SIZE_ERROR_TOAST()); + }); +}); diff --git a/src/leapfrogai_ui/src/lib/components/LFSidebarDropdownItem.svelte b/src/leapfrogai_ui/src/lib/components/LFSidebarDropdownItem.svelte index 9282826d0..7f0206739 100644 --- a/src/leapfrogai_ui/src/lib/components/LFSidebarDropdownItem.svelte +++ b/src/leapfrogai_ui/src/lib/components/LFSidebarDropdownItem.svelte @@ -116,7 +116,7 @@ It adds a "three-dot" menu button with Popover, and delete confirmation Modal $$props.class )} > -

+

{label}

diff --git a/src/leapfrogai_ui/src/lib/components/Message.svelte b/src/leapfrogai_ui/src/lib/components/Message.svelte index 5496a2b0f..0af165b1f 100644 --- a/src/leapfrogai_ui/src/lib/components/Message.svelte +++ b/src/leapfrogai_ui/src/lib/components/Message.svelte @@ -197,7 +197,7 @@ {/if}
- {#if message.role === 'user' && !editMode} + {#if message.role === 'user' && !editMode && !message.metadata?.wasTranscriptionOrTranslation} (editMode = true)} @@ -219,7 +219,7 @@ {/if} - {#if message.role !== 'user' && isLastMessage && !$threadsStore.sendingBlocked} + {#if message.role !== 'user' && isLastMessage && !$threadsStore.sendingBlocked && !message.metadata?.wasTranscriptionOrTranslation} import { fade } from 'svelte/transition'; - import { CloseOutline, FileOutline } from 'flowbite-svelte-icons'; + import { CloseOutline, FileMusicOutline, FileOutline } from 'flowbite-svelte-icons'; import { getFileType } from '$lib/utils/files.js'; import { Card, Spinner, ToolbarButton } from 'flowbite-svelte'; import { createEventDispatcher } from 'svelte'; @@ -20,11 +20,11 @@ data-testid={`${fileMetadata.name}-file-uploaded-card`} horizontal padding="xs" - class="w-80 min-w-72" + class="w-80 min-w-72 border-none" on:mouseenter={() => (hovered = true)} on:mouseleave={() => (hovered = false)} > -
+
{#if fileMetadata.status === 'uploading'} @@ -33,16 +33,20 @@ + {:else if fileMetadata.type.startsWith('audio/')} + {:else} {/if}
-
+
{fileMetadata.name}
diff --git a/src/leapfrogai_ui/src/lib/components/UploadedFileCards.svelte b/src/leapfrogai_ui/src/lib/components/UploadedFileCards.svelte index 1c6fef92b..85ddfe7b3 100644 --- a/src/leapfrogai_ui/src/lib/components/UploadedFileCards.svelte +++ b/src/leapfrogai_ui/src/lib/components/UploadedFileCards.svelte @@ -1,18 +1,20 @@
0 - ? 'ml-6 flex max-w-full gap-2 overflow-x-auto bg-gray-700 ' + ? 'ml-6 flex max-w-full gap-2 overflow-x-auto bg-gray-700' : 'hidden'} > {#each attachedFileMetadata as fileMetadata} diff --git a/src/leapfrogai_ui/src/lib/constants/index.ts b/src/leapfrogai_ui/src/lib/constants/index.ts index 5379d858a..3736f4714 100644 --- a/src/leapfrogai_ui/src/lib/constants/index.ts +++ b/src/leapfrogai_ui/src/lib/constants/index.ts @@ -4,8 +4,9 @@ export const MAX_LABEL_SIZE = 100; export const DEFAULT_ASSISTANT_TEMP = 0.2; export const MAX_AVATAR_SIZE = 5000000; export const MAX_FILE_SIZE = 512000000; +export const MAX_AUDIO_FILE_SIZE = 25000000; export const MAX_FILE_NAME_SIZE = 27; -export const MAX_NUM_FILES_UPLOAD = 50; // for chat completion +export const MAX_NUM_FILES_UPLOAD = 10; // for chat completion // PER OPENAI SPEC export const ASSISTANTS_NAME_MAX_LENGTH = 256; @@ -37,6 +38,19 @@ export const assistantDefaults: Omit = { temperature: 0.2, response_format: 'auto' }; + +export const ACCEPTED_AUDIO_FILE_TYPES = [ + '.flac', + '.mp3', + '.mp4', + '.mpeg', + '.mpga', + '.m4a', + '.ogg', + '.wav', + '.webm' +]; + export const ACCEPTED_FILE_TYPES = [ '.pdf', '.txt', @@ -47,8 +61,21 @@ export const ACCEPTED_FILE_TYPES = [ '.pptx', '.doc', '.docx', - '.csv' + '.csv', + ...ACCEPTED_AUDIO_FILE_TYPES ]; + +export const ACCEPTED_AUDIO_FILE_MIME_TYPES = [ + 'audio/flac', + 'audio/mpeg', + 'audio/mp4', + 'audio/x-m4a', + 'audio/mpeg', + 'audio/ogg', + 'audio/wav', + 'audio/webm' +]; + export const ACCEPTED_MIME_TYPES = [ 'application/pdf', // .pdf 'text/plain', // .txt, .text @@ -58,7 +85,8 @@ export const ACCEPTED_MIME_TYPES = [ 'application/vnd.openxmlformats-officedocument.presentationml.presentation', // .pptx 'application/msword', // .doc 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', //.docx, - 'text/csv' + 'text/csv', + ...ACCEPTED_AUDIO_FILE_MIME_TYPES ]; export const FILE_TYPE_MAP = { @@ -71,13 +99,16 @@ export const FILE_TYPE_MAP = { 'application/vnd.openxmlformats-officedocument.presentationml.presentation': 'PPTX', 'application/msword': 'DOC', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'DOCX', - 'text/csv': 'CSV' + 'text/csv': 'CSV', + ...ACCEPTED_AUDIO_FILE_MIME_TYPES.reduce((acc, type) => ({ ...acc, [type]: 'AUDIO' }), {}) }; export const NO_FILE_ERROR_TEXT = 'Please upload an image or select a pictogram'; export const AVATAR_FILE_SIZE_ERROR_TEXT = `File must be less than ${MAX_AVATAR_SIZE / 1000000} MB`; export const FILE_SIZE_ERROR_TEXT = `File must be less than ${MAX_FILE_SIZE / 1000000} MB`; +export const AUDIO_FILE_SIZE_ERROR_TEXT = `Audio file must be less than ${MAX_AUDIO_FILE_SIZE / 1000000} MB`; export const INVALID_FILE_TYPE_ERROR_TEXT = `Invalid file type, accepted types are: ${ACCEPTED_FILE_TYPES.join(', ')}`; +export const INVALID_AUDIO_FILE_TYPE_ERROR_TEXT = `Invalid file type, accepted types are: ${ACCEPTED_AUDIO_FILE_TYPES.join(', ')}`; export const NO_SELECTED_ASSISTANT_ID = 'noSelectedAssistantId'; export const FILE_UPLOAD_PROMPT = "The following are the user's files: "; diff --git a/src/leapfrogai_ui/src/lib/constants/toastMessages.ts b/src/leapfrogai_ui/src/lib/constants/toastMessages.ts index a04760c2d..5a356ab06 100644 --- a/src/leapfrogai_ui/src/lib/constants/toastMessages.ts +++ b/src/leapfrogai_ui/src/lib/constants/toastMessages.ts @@ -1,10 +1,4 @@ -import { MAX_NUM_FILES_UPLOAD } from '$constants/index'; - -type ToastData = { - kind: ToastKind; - title: string; - subtitle?: string; -}; +import { AUDIO_FILE_SIZE_ERROR_TEXT, MAX_NUM_FILES_UPLOAD } from '$constants/index'; export const ERROR_SAVING_MSG_TOAST = (override: Partial = {}): ToastData => ({ kind: 'error', @@ -71,3 +65,17 @@ export const FILE_VECTOR_TIMEOUT_MSG_TOAST = (override: Partial = {}) subtitle: 'There was an error processing assistant files', ...override }); + +export const FILE_TRANSLATION_ERROR = (override: Partial = {}): ToastData => ({ + kind: 'error', + title: 'Translation Error', + subtitle: 'Error translating audio file', + ...override +}); + +export const AUDIO_FILE_SIZE_ERROR_TOAST = (override: Partial = {}): ToastData => ({ + kind: 'error', + title: 'File Too Large', + subtitle: AUDIO_FILE_SIZE_ERROR_TEXT, + ...override +}); diff --git a/src/leapfrogai_ui/src/lib/helpers/apiHelpers.ts b/src/leapfrogai_ui/src/lib/helpers/apiHelpers.ts new file mode 100644 index 000000000..923098af0 --- /dev/null +++ b/src/leapfrogai_ui/src/lib/helpers/apiHelpers.ts @@ -0,0 +1,35 @@ +import { error } from '@sveltejs/kit'; + +/* + * A generic error handler to log and structure error responses + * Try/catch catch blocks can pass their error to this function + * ex. + * catch (e) { + handleError(e, { id: file.id }); + } + */ +export const handleError = (e: unknown, additionalErrorInfo?: object) => { + console.error('Caught Error:', e); + + let status = 500; + let message = 'Internal Server Error'; + + if (e instanceof Error) { + message = e.message; + } else if (typeof e === 'object' && e !== null && 'status' in e) { + status = (e as { status: number }).status || 500; + message = + (e as unknown as { body: { message: string } }).body.message || 'Internal Server Error'; + } + error(status, { message, ...additionalErrorInfo }); +}; + +// In the test environment, formData.get('file') does not return a file of type File, so we mock it differently +// with this helper +export const requestWithFormData = (mockFile: unknown) => { + return { + formData: vi.fn().mockResolvedValue({ + get: vi.fn().mockReturnValue(mockFile) + }) + } as unknown as Request; +}; diff --git a/src/leapfrogai_ui/src/lib/helpers/fileHelpers.test.ts b/src/leapfrogai_ui/src/lib/helpers/fileHelpers.test.ts new file mode 100644 index 000000000..73f63f452 --- /dev/null +++ b/src/leapfrogai_ui/src/lib/helpers/fileHelpers.test.ts @@ -0,0 +1,146 @@ +import { faker } from '@faker-js/faker'; +import type { FileMetadata } from '$lib/types/files'; +import { removeFilesUntilUnderLimit, updateFileMetadata } from '$helpers/fileHelpers'; +import { FILE_CONTEXT_TOO_LARGE_ERROR_MSG } from '$constants/errors'; +import { getMockFileMetadata } from '$testUtils/fakeData'; + +describe('removeFilesUntilUnderLimit', () => { + test('removeFilesUntilUnderLimit should remove the largest file until total size is under the max limit', () => { + // Metadata stringified without text is 95 characters, so the text added below will increase the size from that + // baseline + const text1 = faker.word.words(50); + const text2 = faker.word.words(150); + const text3 = faker.word.words(200); + const files = [ + getMockFileMetadata({ text: text1 }), + getMockFileMetadata({ text: text2 }), + getMockFileMetadata({ text: text3 }) + ]; + + // Files 2 and 3 will have different length once their text is removed and they are set to error status + const file2WhenError = JSON.stringify({ + ...files[1], + text: '', + status: 'error', + errorText: FILE_CONTEXT_TOO_LARGE_ERROR_MSG + }); + const file3WhenError = JSON.stringify({ + ...files[2], + text: '', + status: 'error', + errorText: FILE_CONTEXT_TOO_LARGE_ERROR_MSG + }); + + const totalSize = files.reduce((total, file) => total + JSON.stringify(file).length, 0); + + // Expected length when it removes the text of the last two files, but replaces them with error status + const maxLimit = + totalSize - + JSON.stringify(files[1]).length - + JSON.stringify(files[2]).length + + file2WhenError.length + + file3WhenError.length; + + removeFilesUntilUnderLimit(files, maxLimit); + + // file 1 remains + expect(files[0].text).toBe(text1); + + // file 2 and 3 are removed and set to error status + expect(files[1].text).toBe(''); + expect(files[1].status).toBe('error'); + expect(files[1].errorText).toBe(FILE_CONTEXT_TOO_LARGE_ERROR_MSG); + expect(files[2].text).toBe(''); + expect(files[2].status).toBe('error'); + expect(files[2].errorText).toBe(FILE_CONTEXT_TOO_LARGE_ERROR_MSG); + + // Also check that the total size is under the limit + const totalSizeRecalculated = files.reduce( + (total, file) => total + JSON.stringify(file).length, + 0 + ); + + expect(totalSizeRecalculated).toBeLessThanOrEqual(maxLimit); + }); + + it('should not modify files if total size is already under the max limit', () => { + const text1 = faker.word.words(10); + const text2 = faker.word.words(20); + + const files = [getMockFileMetadata({ text: text1 }), getMockFileMetadata({ text: text2 })]; + + // Assume a limit of 50 characters + const maxLimit = 10000000000000; + + // Call the function to test + removeFilesUntilUnderLimit(files, maxLimit); + + // Expect no modifications to the files + expect(files[0].text).toBe(text1); + expect(files[1].text).toBe(text2); + }); + + it('can remove all files text and avoiding hanging (breaks out of while loop)', () => { + const text1 = faker.word.words(10); + const text2 = faker.word.words(20); + + const files = [getMockFileMetadata({ text: text1 }), getMockFileMetadata({ text: text2 })]; + + // Assume a limit of 50 characters + const maxLimit = 5; + + // Call the function to test + removeFilesUntilUnderLimit(files, maxLimit); + + // Expect no modifications to the files + expect(files[0].text).toBe(''); + expect(files[1].text).toBe(''); + expect(files[0].status).toBe('error'); + expect(files[1].status).toBe('error'); + }); +}); + +describe('updateFileMetadata with order preservation', () => { + it('should update existing files, add new files, and preserve the original order of old metadata', () => { + const file1 = getMockFileMetadata({ status: 'complete' }); + const file2 = getMockFileMetadata({ status: 'uploading' }); + const file3 = getMockFileMetadata({ status: 'complete' }); + const file4 = getMockFileMetadata({ status: 'complete' }); + + // 3 files + const oldMetadata: FileMetadata[] = [file1, file2, file3]; + + // updated file, and new file + const newMetadata: FileMetadata[] = [file1, { ...file2, status: 'complete' }, file3, file4]; + + const result = updateFileMetadata(oldMetadata, newMetadata); + + expect(result).toHaveLength(4); + + // Check if the order is preserved and one file was updated and new one added + + expect(result[0]).toEqual(file1); + expect(result[1]).toEqual({ + ...file2, + status: 'complete' + }); + + expect(result[2]).toEqual(file3); + expect(result[3]).toEqual(file4); + }); + + it('should return new metadata if old metadata is empty', () => { + const oldMetadata: FileMetadata[] = []; + const newMetadata: FileMetadata[] = [getMockFileMetadata()]; + const result = updateFileMetadata(oldMetadata, newMetadata); + expect(result).toHaveLength(1); + expect(result).toEqual(newMetadata); + }); + + it('should keep old metadata if no new metadata is provided', () => { + const oldMetadata: FileMetadata[] = [getMockFileMetadata()]; + const newMetadata: FileMetadata[] = []; + const result = updateFileMetadata(oldMetadata, newMetadata); + expect(result).toEqual(oldMetadata); + }); +}); diff --git a/src/leapfrogai_ui/src/lib/helpers/fileHelpers.ts b/src/leapfrogai_ui/src/lib/helpers/fileHelpers.ts index bc459624f..a0cd0fc5b 100644 --- a/src/leapfrogai_ui/src/lib/helpers/fileHelpers.ts +++ b/src/leapfrogai_ui/src/lib/helpers/fileHelpers.ts @@ -1,5 +1,6 @@ -import type { FileRow } from '$lib/types/files'; +import type { FileMetadata, FileRow } from '$lib/types/files'; import type { FileObject } from 'openai/resources/files'; +import { FILE_CONTEXT_TOO_LARGE_ERROR_MSG } from '$constants/errors'; export const convertFileObjectToFileRows = (files: FileObject[]): FileRow[] => files.map((file) => ({ @@ -8,3 +9,54 @@ export const convertFileObjectToFileRows = (files: FileObject[]): FileRow[] => created_at: file.created_at * 1000, status: 'hide' })); + +export const removeFilesUntilUnderLimit = (parsedFiles: FileMetadata[], max: number) => { + const numFiles = parsedFiles.length; + let numFilesReset = 0; + let totalTextLength = parsedFiles.reduce((total, file) => total + JSON.stringify(file).length, 0); + // Remove the largest files until the total size is within the allowed limit + while (totalTextLength > max) { + if (numFilesReset === numFiles) break; + let largestIndex = 0; + for (let i = 1; i < numFiles; i++) { + const item = JSON.stringify(parsedFiles[i]); + const largestItem = JSON.stringify(parsedFiles[largestIndex]); + if (item.length > largestItem.length) { + largestIndex = i; + } + } + + // remove the text and set to error status + parsedFiles[largestIndex] = { + ...parsedFiles[largestIndex], + text: '', + status: 'error', + errorText: FILE_CONTEXT_TOO_LARGE_ERROR_MSG + }; + numFilesReset += 1; + totalTextLength = parsedFiles.reduce((total, file) => total + JSON.stringify(file).length, 0); //recalculate total size + } +}; + +// Combines old and new file metadata, updating the old metadata with new metadata +export const updateFileMetadata = ( + oldMetadata: FileMetadata[], + newMetadata: FileMetadata[] +): FileMetadata[] => { + // Create a map of new metadata + const newMetadataMap = new Map(newMetadata.map((file) => [file.id, file])); + + // Update and keep the original order from old metadata + const updatedMetadata = oldMetadata.map((oldFile) => { + const newFile = newMetadataMap.get(oldFile.id); + return newFile ? { ...oldFile, ...newFile } : oldFile; + }); + + // Filter out new files that aren't already in the old metadata + const newFilesToAdd = newMetadata.filter( + (newFile) => !oldMetadata.some((oldFile) => oldFile.id === newFile.id) + ); + + // Append new files at the end while keeping the original order of oldMetadata + return [...updatedMetadata, ...newFilesToAdd]; +}; diff --git a/src/leapfrogai_ui/src/lib/mocks/chat-mocks.ts b/src/leapfrogai_ui/src/lib/mocks/chat-mocks.ts index 714f56dc9..e820f32d4 100644 --- a/src/leapfrogai_ui/src/lib/mocks/chat-mocks.ts +++ b/src/leapfrogai_ui/src/lib/mocks/chat-mocks.ts @@ -5,6 +5,7 @@ import type { LFMessage, NewMessageInput } from '$lib/types/messages'; import type { LFAssistant } from '$lib/types/assistants'; import { createStreamDataTransformer, StreamingTextResponse } from 'ai'; import type { LFThread } from '$lib/types/threads'; +import { AUDIO_FILE_SIZE_ERROR_TEXT } from '$constants'; type MockChatCompletionOptions = { responseMsg?: string[]; @@ -98,3 +99,30 @@ export const mockGetThread = (thread: LFThread) => { }) ); }; + +export const mockTranslation = () => { + server.use( + http.post('/api/audio/translation', () => { + return HttpResponse.json({ text: 'fake translation' }); + }) + ); +}; + +export const mockTranslationError = () => { + server.use( + http.post('/api/audio/translation', () => { + return new HttpResponse(null, { status: 500 }); + }) + ); +}; + +export const mockTranslationFileSizeError = () => { + server.use( + http.post('/api/audio/translation', () => { + return HttpResponse.json( + { message: `ValidationError: ${AUDIO_FILE_SIZE_ERROR_TEXT}` }, + { status: 400 } + ); + }) + ); +}; diff --git a/src/leapfrogai_ui/src/lib/mocks/openai.ts b/src/leapfrogai_ui/src/lib/mocks/openai.ts index 25d397d72..704dfa6cd 100644 --- a/src/leapfrogai_ui/src/lib/mocks/openai.ts +++ b/src/leapfrogai_ui/src/lib/mocks/openai.ts @@ -42,6 +42,7 @@ class OpenAI { listAssistants: boolean; deleteFile: boolean; fileContent: boolean; + translation: boolean; }; constructor({ apiKey, baseURL }: { apiKey: string; baseURL: string }) { @@ -71,7 +72,8 @@ class OpenAI { updateAssistant: false, listAssistants: false, deleteFile: false, - fileContent: false + fileContent: false, + translation: false }; } @@ -117,6 +119,17 @@ class OpenAI { this.errors[key] = false; } + audio = { + translations: { + create: vi.fn().mockImplementation(() => { + if (this.errors.translation) this.throwError('translation'); + return Promise.resolve({ + text: 'Fake translation' + }); + }) + } + }; + files = { retrieve: vi.fn().mockImplementation((id) => { return Promise.resolve(this.uploadedFiles.find((file) => file.id === id)); diff --git a/src/leapfrogai_ui/src/lib/schemas/chat.ts b/src/leapfrogai_ui/src/lib/schemas/chat.ts index efa1db8f6..8835e44c9 100644 --- a/src/leapfrogai_ui/src/lib/schemas/chat.ts +++ b/src/leapfrogai_ui/src/lib/schemas/chat.ts @@ -15,8 +15,8 @@ export const stringIdArraySchema = object({ .noUnknown(true) .strict(); -const contentInputSchema = string().max(Number(env.PUBLIC_MESSAGE_LENGTH_LIMIT)).required(); -const contentInputSchemaNoLength = string().required(); +const contentInputSchema = string().max(Number(env.PUBLIC_MESSAGE_LENGTH_LIMIT)); +const contentInputSchemaNoLength = string(); export const messageInputSchema: ObjectSchema = object({ thread_id: string().required(), diff --git a/src/leapfrogai_ui/src/lib/schemas/files.ts b/src/leapfrogai_ui/src/lib/schemas/files.ts index 5c475e5e1..522f4972b 100644 --- a/src/leapfrogai_ui/src/lib/schemas/files.ts +++ b/src/leapfrogai_ui/src/lib/schemas/files.ts @@ -1,20 +1,29 @@ import { array, mixed, object, string, ValidationError } from 'yup'; import { + ACCEPTED_AUDIO_FILE_MIME_TYPES, ACCEPTED_MIME_TYPES, + AUDIO_FILE_SIZE_ERROR_TEXT, FILE_SIZE_ERROR_TEXT, + INVALID_AUDIO_FILE_TYPE_ERROR_TEXT, INVALID_FILE_TYPE_ERROR_TEXT, + MAX_AUDIO_FILE_SIZE, MAX_FILE_SIZE } from '$constants'; +import type { LFFile } from '$lib/types/files'; export const filesSchema = object({ files: array().of( - mixed() + mixed() .test('fileType', 'Please upload a file.', (value) => value == null || value instanceof File) .test('fileSize', FILE_SIZE_ERROR_TEXT, (value) => { if (value == null) { return true; } - if (value.size > MAX_FILE_SIZE) { + if (ACCEPTED_AUDIO_FILE_MIME_TYPES.includes(value.type)) { + if (value.size > MAX_AUDIO_FILE_SIZE) { + return new ValidationError(AUDIO_FILE_SIZE_ERROR_TEXT); + } + } else if (value.size > MAX_FILE_SIZE) { return new ValidationError(FILE_SIZE_ERROR_TEXT); } return true; @@ -40,7 +49,7 @@ export const filesCheckSchema = object({ .strict(); export const fileSchema = object({ - file: mixed() + file: mixed() .test('fileType', 'File is required.', (value) => value == null || value instanceof File) .test('fileSize', FILE_SIZE_ERROR_TEXT, (value) => { if (value == null) { @@ -63,3 +72,28 @@ export const fileSchema = object({ }) .noUnknown(true) .strict(); + +export const audioFileSchema = object({ + file: mixed() + .test('fileType', 'File is required.', (value) => value == null || value instanceof File) + .test('fileSize', AUDIO_FILE_SIZE_ERROR_TEXT, (value) => { + if (value == null) { + return true; + } + if (value.size > MAX_AUDIO_FILE_SIZE) { + return new ValidationError(AUDIO_FILE_SIZE_ERROR_TEXT); + } + return true; + }) + .test('type', INVALID_AUDIO_FILE_TYPE_ERROR_TEXT, (value) => { + if (value == null) { + return true; + } + if (!ACCEPTED_AUDIO_FILE_MIME_TYPES.includes(value.type)) { + return new ValidationError(INVALID_AUDIO_FILE_TYPE_ERROR_TEXT); + } + return true; + }) +}) + .noUnknown(true) + .strict(); diff --git a/src/leapfrogai_ui/src/lib/stores/threads.ts b/src/leapfrogai_ui/src/lib/stores/threads.ts index 354f5e14c..0b9738fbb 100644 --- a/src/leapfrogai_ui/src/lib/stores/threads.ts +++ b/src/leapfrogai_ui/src/lib/stores/threads.ts @@ -8,6 +8,7 @@ import type { LFThread, NewThreadInput } from '$lib/types/threads'; import type { LFMessage } from '$lib/types/messages'; import { getMessageText } from '$helpers/threads'; import { saveMessage } from '$helpers/chatHelpers'; +import type { Message } from 'ai'; type ThreadsStore = { threads: LFThread[]; @@ -208,6 +209,63 @@ const createThreadsStore = () => { }); } }, + // The following temp empty message methods are for showing a message skeleton in the messages window + // while waiting for a full response (e.g. waiting for translation response) + addTempEmptyMessage: (threadId: string, tempId: string) => { + update((old) => { + const updatedThreads = [...old.threads]; + const threadIndex = old.threads.findIndex((c) => c.id === threadId); + const oldThread = old.threads[threadIndex]; + updatedThreads[threadIndex] = { + ...oldThread, + messages: [ + ...(oldThread.messages || []), + { id: tempId, role: 'assistant', content: '', createdAt: new Date() } + ] + }; + return { + ...old, + threads: updatedThreads + }; + }); + }, + replaceTempMessageWithActual: (threadId: string, tempId: string, newMessage: LFMessage) => { + update((old) => { + const updatedThreads = [...old.threads]; + const threadIndex = old.threads.findIndex((c) => c.id === threadId); + const tempMessageIndex = old.threads[threadIndex].messages?.findIndex( + (m) => m.id === tempId + ); + if (!tempMessageIndex || tempMessageIndex === -1) return { ...old }; + const oldThread = old.threads[threadIndex]; + const messagesCopy = [...(oldThread.messages || [])]; + messagesCopy[tempMessageIndex] = newMessage; + updatedThreads[threadIndex] = { + ...oldThread, + messages: messagesCopy + }; + return { + ...old, + threads: updatedThreads + }; + }); + }, + removeMessageFromStore: (threadId: string, messageId: string) => { + update((old) => { + const updatedThreads = [...old.threads]; + const threadIndex = old.threads.findIndex((c) => c.id === threadId); + updatedThreads[threadIndex].messages = + old.threads[threadIndex].messages?.filter((m) => m.id !== messageId) || []; + return { ...old, thread: updatedThreads }; + }); + }, + updateMessagesState: ( + originalMessages: Message[], + setMessages: (messages: Message[]) => void, + newMessage: LFMessage + ) => { + setMessages([...originalMessages, { ...newMessage, content: getMessageText(newMessage) }]); + }, deleteThread: async (id: string) => { try { await deleteThread(id); diff --git a/src/leapfrogai_ui/src/lib/types/files.d.ts b/src/leapfrogai_ui/src/lib/types/files.d.ts index 37c8db5f0..599260041 100644 --- a/src/leapfrogai_ui/src/lib/types/files.d.ts +++ b/src/leapfrogai_ui/src/lib/types/files.d.ts @@ -27,3 +27,7 @@ export type FileMetadata = { text: string; errorText?: string; }; + +export type LFFile = File & { + id?: string; +}; diff --git a/src/leapfrogai_ui/src/lib/types/messages.d.ts b/src/leapfrogai_ui/src/lib/types/messages.d.ts index 96108edd6..47693ea4a 100644 --- a/src/leapfrogai_ui/src/lib/types/messages.d.ts +++ b/src/leapfrogai_ui/src/lib/types/messages.d.ts @@ -8,7 +8,7 @@ import type { ChatRequestOptions, CreateMessage } from 'ai'; export type NewMessageInput = { thread_id: string; - content: string; + content?: string; role: 'user' | 'assistant'; assistantId?: string; metadata?: unknown; diff --git a/src/leapfrogai_ui/src/lib/types/toast.d.ts b/src/leapfrogai_ui/src/lib/types/toast.d.ts index 924d59ae5..2d22d60b2 100644 --- a/src/leapfrogai_ui/src/lib/types/toast.d.ts +++ b/src/leapfrogai_ui/src/lib/types/toast.d.ts @@ -14,3 +14,9 @@ type ToastNotificationProps = { type ToastStore = { toasts: ToastNotificationProps[]; }; + +type ToastData = { + kind: ToastKind; + title: string; + subtitle?: string; +}; diff --git a/src/leapfrogai_ui/src/routes/api/audio/translation/+server.ts b/src/leapfrogai_ui/src/routes/api/audio/translation/+server.ts new file mode 100644 index 000000000..175b410e5 --- /dev/null +++ b/src/leapfrogai_ui/src/routes/api/audio/translation/+server.ts @@ -0,0 +1,36 @@ +import { error, json } from '@sveltejs/kit'; +import type { RequestHandler } from './$types'; +import { getOpenAiClient } from '$lib/server/constants'; +import { audioFileSchema } from '$schemas/files'; +import { env } from '$env/dynamic/private'; +import { handleError } from '$helpers/apiHelpers'; + +export const POST: RequestHandler = async ({ request, locals: { session } }) => { + if (!session) { + error(401, 'Unauthorized'); + } + + let file: File | null; + + // Validate request body + try { + const formData = await request.formData(); + file = formData.get('file') as File; + + await audioFileSchema.validate({ file }, { abortEarly: false }); + } catch (e) { + console.error(e); + error(400, { message: `${e}` }); + } + + try { + const openai = getOpenAiClient(session.access_token); + const translation = await openai.audio.translations.create({ + file: file, + model: env.OPENAI_API_KEY ? 'whisper-1' : 'whisper' + }); + return json({ text: translation.text }); + } catch (e) { + return handleError(e); + } +}; diff --git a/src/leapfrogai_ui/src/routes/api/audio/translation/server.test.ts b/src/leapfrogai_ui/src/routes/api/audio/translation/server.test.ts new file mode 100644 index 000000000..b0aa3733a --- /dev/null +++ b/src/leapfrogai_ui/src/routes/api/audio/translation/server.test.ts @@ -0,0 +1,127 @@ +import { getLocalsMock } from '$lib/mocks/misc'; +import type { RequestEvent } from '@sveltejs/kit'; +import type { RouteParams } from './$types'; +import { POST } from './+server'; +import { mockOpenAI } from '../../../../../vitest-setup'; +import { requestWithFormData } from '$helpers/apiHelpers'; +import * as constants from '$constants'; + +// Allows mocking important constants and only overriding values for specific tests +vi.mock('$constants', async () => { + const actualConstants = await vi.importActual('$constants'); + return { + ...actualConstants + }; +}); + +describe('/api/audio/translation', () => { + it('returns a 401 when there is no session', async () => { + const request = new Request('http://thisurlhasnoeffect', { + method: 'POST' + }); + + await expect( + POST({ + request, + params: {}, + locals: getLocalsMock({ nullSession: true }) + } as RequestEvent) + ).rejects.toMatchObject({ + status: 401 + }); + }); + + it('should return 400 if the form data is missing', async () => { + const request = new Request('http://thisurlhasnoeffect', { method: 'POST' }); + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/audio/translation' + >) + ).rejects.toMatchObject({ status: 400 }); + }); + + it('should return 400 if the file is missing from the form data', async () => { + const request = new Request('http://thisurlhasnoeffect', { + method: 'POST', + body: new FormData() + }); + + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/audio/translation' + >) + ).rejects.toMatchObject({ + status: 400 + }); + }); + + it('should return 400 if the file in the form data is not of type File', async () => { + const formData = new FormData(); + formData.append('file', '123'); + const request = new Request('http://thisurlhasnoeffect', { + method: 'POST', + body: formData + }); + + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/audio/translation' + >) + ).rejects.toMatchObject({ + status: 400 + }); + }); + + it('should return 400 if the file is too big', async () => { + // @ts-expect-error - intentionally overriding a constant for testing + vi.spyOn(constants, 'MAX_AUDIO_FILE_SIZE', 'get').mockReturnValueOnce(1); + + const fileContent = new Blob(['dummy content'], { type: 'audio/mp4' }); + const testFile = new File([fileContent], 'test.txt', { type: 'audio/mp4' }); + const request = requestWithFormData(testFile); + + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/audio/translation' + >) + ).rejects.toMatchObject({ + status: 400 + }); + // Reset the mock after this test + vi.resetModules(); + }); + + it('should return a 500 if there is an error translating the file', async () => { + mockOpenAI.setError('translation'); + + const fileContent = new Blob(['dummy content'], { type: 'audio/mp4' }); + const testFile = new File([fileContent], 'test.txt', { type: 'audio/mp4' }); + const request = requestWithFormData(testFile); + + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/audio/translation' + >) + ).rejects.toMatchObject({ + status: 500 + }); + }); + + it('should return translated text', async () => { + const fileContent = new Blob(['dummy content'], { type: 'audio/mp4' }); + const testFile = new File([fileContent], 'test.txt', { type: 'audio/mp4' }); + const request = requestWithFormData(testFile); + + const res = await POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/audio/translation' + >); + const data = await res.json(); + expect(data).toEqual({ text: 'Fake translation' }); + }); +}); diff --git a/src/leapfrogai_ui/src/routes/api/files/convert/server.test.ts b/src/leapfrogai_ui/src/routes/api/files/convert/server.test.ts index 9a18626a6..3081925f6 100644 --- a/src/leapfrogai_ui/src/routes/api/files/convert/server.test.ts +++ b/src/leapfrogai_ui/src/routes/api/files/convert/server.test.ts @@ -3,6 +3,7 @@ import { getLocalsMock } from '$lib/mocks/misc'; import type { RequestEvent } from '@sveltejs/kit'; import type { RouteParams } from './$types'; import { afterAll } from 'vitest'; +import { requestWithFormData } from '$helpers/apiHelpers'; // Allows swapping out the mock per test const mocks = vi.hoisted(() => { @@ -93,14 +94,7 @@ describe('/api/files/convert', () => { const fileContent = new Blob(['dummy content'], { type: 'text/plain' }); const testFile = new File([fileContent], 'test.txt', { type: 'text/plain' }); - - // In the test environment, formData.get('file') does not return a file of type File, so we mock it differently - // here - const request = { - formData: vi.fn().mockResolvedValue({ - get: vi.fn().mockReturnValue(testFile) - }) - } as unknown as Request; + const request = requestWithFormData(testFile); await expect( POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< @@ -125,13 +119,7 @@ describe('/api/files/convert', () => { const testFile = new File([fileContent], 'test.txt', { type: 'text/plain' }); testFile.arrayBuffer = async () => fileContent.buffer; - // In the test environment, formData.get('file') does not return a file of type File, so we mock it differently - // here - const request = { - formData: vi.fn().mockResolvedValue({ - get: vi.fn().mockReturnValue(testFile) - }) - } as unknown as Request; + const request = requestWithFormData(testFile); const res = await POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< RouteParams, diff --git a/src/leapfrogai_ui/src/routes/api/files/parse-text/+server.ts b/src/leapfrogai_ui/src/routes/api/files/parse-text/+server.ts new file mode 100644 index 000000000..73bd12a7e --- /dev/null +++ b/src/leapfrogai_ui/src/routes/api/files/parse-text/+server.ts @@ -0,0 +1,68 @@ +import type { RequestHandler } from './$types'; +import * as mupdf from 'mupdf'; +import { error, json } from '@sveltejs/kit'; +import { fileSchema } from '$schemas/files'; +import type { LFFile } from '$lib/types/files'; +import { handleError } from '$helpers/apiHelpers'; + +/** + * Parses text from a file. If the file is not a PDF, it will first convert the file to a + * PDF before parsing the text. + */ +export const POST: RequestHandler = async ({ request, fetch, locals: { session } }) => { + if (!session) { + error(401, 'Unauthorized'); + } + + let file: LFFile | null; + // Validate request body + try { + const formData = await request.formData(); + file = formData.get('file') as LFFile; + await fileSchema.validate({ file }, { abortEarly: false }); + } catch (e) { + console.error('Validation error:', e); + error(400, `Bad Request, File invalid: ${e}`); + } + + try { + let text = ''; + let buffer: ArrayBuffer; + const contentType = file.type; + if (contentType !== 'application/pdf') { + // Convert file to PDF + const formData = new FormData(); + formData.append('file', file); + const convertRes = await fetch('/api/files/convert', { + method: 'POST', + body: formData + }); + + if (!convertRes.ok) { + return error(500, { message: 'Error converting file' }); + } + + const convertedFileBlob = await convertRes.blob(); + buffer = await convertedFileBlob.arrayBuffer(); + } else buffer = await file.arrayBuffer(); + + const document = mupdf.Document.openDocument(buffer, 'application/pdf'); + let i = 0; + while (i < document.countPages()) { + const page = document.loadPage(i); + const json = page.toStructuredText('preserve-whitespace').asJSON(); + for (const block of JSON.parse(json).blocks) { + for (const line of block.lines) { + text += line.text; + } + } + i++; + } + + return json({ + text + }); + } catch (e) { + return handleError(e); + } +}; diff --git a/src/leapfrogai_ui/src/routes/api/files/parse-text/server.test.ts b/src/leapfrogai_ui/src/routes/api/files/parse-text/server.test.ts new file mode 100644 index 000000000..b23e233ea --- /dev/null +++ b/src/leapfrogai_ui/src/routes/api/files/parse-text/server.test.ts @@ -0,0 +1,153 @@ +import { POST } from './+server'; +import { getLocalsMock } from '$lib/mocks/misc'; +import type { RequestEvent } from '@sveltejs/kit'; +import type { RouteParams } from '../../../../../.svelte-kit/types/src/routes/api/files/[file_id]/$types'; +import { mockConvertFileErrorNoId, mockConvertFileNoId } from '$lib/mocks/file-mocks'; +import type { LFFile } from '$lib/types/files'; +import { requestWithFormData } from '$helpers/apiHelpers'; +import * as mupdf from 'mupdf'; + +vi.mock('mupdf', () => ({ + Document: { + openDocument: vi.fn() + } +})); + +describe('/api/files/parse-text', () => { + afterEach(() => { + vi.resetAllMocks(); + }); + + it('returns a 401 when there is no session', async () => { + const request = new Request('http://thisurlhasnoeffect', { + method: 'POST' + }); + + await expect( + POST({ request, params: {}, locals: getLocalsMock({ nullSession: true }) } as RequestEvent< + RouteParams, + '/api/files/parse-text' + >) + ).rejects.toMatchObject({ + status: 401 + }); + }); + + it('should return 400 if the form data is missing', async () => { + const request = new Request('http://thisurlhasnoeffect', { + method: 'POST' + }); + + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/files/parse-text' + >) + ).rejects.toMatchObject({ + status: 400 + }); + }); + + it('should return 400 if the file is missing from the form data', async () => { + const request = new Request('http://thisurlhasnoeffect', { + method: 'POST', + body: new FormData() + }); + + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/files/parse-text' + >) + ).rejects.toMatchObject({ + status: 400 + }); + }); + + it('should return 400 if the file in the form data is not of type File', async () => { + const formData = new FormData(); + formData.append('file', '123'); + const request = new Request('http://thisurlhasnoeffect', { + method: 'POST', + body: formData + }); + + await expect( + POST({ request, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/files/parse-text' + >) + ).rejects.toMatchObject({ + status: 400 + }); + }); + + it('returns 500 if there is an error converting a file to PDF', async () => { + mockConvertFileErrorNoId(); + const mockFile1: LFFile = new File(['content1'], 'test1.txt', { type: 'text/plain' }); + mockFile1.id = '1'; + const request = requestWithFormData(mockFile1); + + await expect( + POST({ request, fetch: global.fetch, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/files/parse-text' + >) + ).rejects.toMatchObject({ + status: 500, + body: { message: 'Error converting file' } + }); + }); + + it("returns 500 if there is an error parsing a PDF's text", async () => { + mupdf.Document.openDocument.mockImplementationOnce(() => { + throw new Error('Mocked error'); + }); + + mockConvertFileNoId('this is ignored'); + const mockFile1: LFFile = new File(['content1'], 'test1.txt', { type: 'text/plain' }); + mockFile1.id = '1'; + const request = requestWithFormData(mockFile1); + + await expect( + POST({ request, fetch: global.fetch, params: {}, locals: getLocalsMock() } as RequestEvent< + RouteParams, + '/api/files/parse-text' + >) + ).rejects.toMatchObject({ + status: 500, + body: { message: 'Mocked error' } + }); + }); + + it('parses text for a PDF file', async () => { + mupdf.Document.openDocument.mockReturnValue({ + countPages: vi.fn().mockReturnValue(1), + loadPage: vi.fn().mockReturnValue({ + toStructuredText: vi.fn().mockReturnValue({ + asJSON: vi.fn().mockReturnValue( + JSON.stringify({ + blocks: [{ lines: [{ text: 'Mocked PDF content' }] }] + }) + ) + }) + }) + }); + + mockConvertFileNoId('this is ignored'); + const mockFile1: LFFile = new File(['this is ignored'], 'test1.txt', { type: 'text/plain' }); + mockFile1.id = '1'; + + const request = requestWithFormData(mockFile1); + const res = await POST({ + request, + fetch: global.fetch, + params: {}, + locals: getLocalsMock() + } as RequestEvent); + expect(res.status).toEqual(200); + const resJson = await res.json(); + + expect(resJson.text).toEqual('Mocked PDF content'); + }); +}); diff --git a/src/leapfrogai_ui/src/routes/api/messages/new/+server.ts b/src/leapfrogai_ui/src/routes/api/messages/new/+server.ts index f12a16123..01ec01a72 100644 --- a/src/leapfrogai_ui/src/routes/api/messages/new/+server.ts +++ b/src/leapfrogai_ui/src/routes/api/messages/new/+server.ts @@ -16,7 +16,6 @@ export const POST: RequestHandler = async ({ request, locals: { session } }) => try { requestData = (await request.json()) as NewMessageInput; if (requestData.lengthOverride) { - // TODO await messageInputSchema.validate(requestData, { abortEarly: false }); } else { await messageInputSchema.validate(requestData, { abortEarly: false }); diff --git a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.server.ts b/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.server.ts deleted file mode 100644 index c5c3c35c7..000000000 --- a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.server.ts +++ /dev/null @@ -1,114 +0,0 @@ -import * as mupdf from 'mupdf'; -import { fail, superValidate, withFiles } from 'sveltekit-superforms'; -import { yup } from 'sveltekit-superforms/adapters'; -import type { Actions } from './$types'; -import { filesSchema } from '$schemas/files'; -import type { FileMetadata } from '$lib/types/files'; -import { env } from '$env/dynamic/public'; -import { shortenFileName } from '$helpers/stringHelpers'; -import { APPROX_MAX_CHARACTERS, FILE_UPLOAD_PROMPT } from '$constants'; -import { ERROR_UPLOADING_FILE_MSG, FILE_CONTEXT_TOO_LARGE_ERROR_MSG } from '$constants/errors'; - -// Ensure length of file context message does not exceed total context window when including the -// file upload prompt, user's message, and string quotes -const ADJUSTED_MAX = - APPROX_MAX_CHARACTERS - Number(env.PUBLIC_MESSAGE_LENGTH_LIMIT) - FILE_UPLOAD_PROMPT.length - 2; - -export const actions: Actions = { - // Handles parsing text from files, will convert file to pdf if is not already - default: async ({ request, fetch, locals: { session } }) => { - if (!session) { - return fail(401, { message: 'Unauthorized' }); - } - - const form = await superValidate(request, yup(filesSchema)); - - if (!form.valid) { - console.log( - 'Files form action: Invalid form submission.', - 'id:', - form.id, - 'errors:', - form.errors - ); - return fail(400, { form }); - } - - const extractedFilesText: FileMetadata[] = []; - - if (form.data.files && form.data.files.length > 0) { - for (const file of form.data.files) { - let text = ''; - if (file) { - try { - let buffer: ArrayBuffer; - const contentType = file.type; - if (contentType !== 'application/pdf') { - // Convert file to PDF - const formData = new FormData(); - formData.append('file', file); - const convertRes = await fetch('/api/files/convert', { - method: 'POST', - body: formData - }); - - if (!convertRes.ok) { - throw new Error('Error converting file'); //caught locally - } - - const convertedFileBlob = await convertRes.blob(); - buffer = await convertedFileBlob.arrayBuffer(); - } else buffer = await file.arrayBuffer(); - - const document = mupdf.Document.openDocument(buffer, 'application/pdf'); - let i = 0; - while (i < document.countPages()) { - const page = document.loadPage(i); - const json = page.toStructuredText('preserve-whitespace').asJSON(); - for (const block of JSON.parse(json).blocks) { - for (const line of block.lines) { - text += line.text; - } - } - i++; - } - - extractedFilesText.push({ - name: shortenFileName(file.name), - type: file.type, - text, - status: 'complete' - }); - - // If this file adds too much text (larger than allowed max), remove the text and set to error status - const totalTextLength = extractedFilesText.reduce( - (acc, fileMetadata) => acc + JSON.stringify(fileMetadata).length, - 0 - ); - if (totalTextLength > ADJUSTED_MAX) { - extractedFilesText[extractedFilesText.length - 1] = { - name: shortenFileName(file.name), - type: file.type, - text: '', - status: 'error', - errorText: FILE_CONTEXT_TOO_LARGE_ERROR_MSG - }; - } - } catch (e) { - console.error(`Error uploading file: ${file}: ${e}`); - extractedFilesText.push({ - name: shortenFileName(file.name), - type: file.type, - text: '', - status: 'error', - errorText: ERROR_UPLOADING_FILE_MSG - }); - } - } - } - - return withFiles({ extractedFilesText, form }); - } - return fail(400, { form }); - } -}; diff --git a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.svelte b/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.svelte index 4613c9141..5c5179b99 100644 --- a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.svelte +++ b/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.svelte @@ -24,9 +24,10 @@ } from '$constants/toastMessages'; import SelectAssistantDropdown from '$components/SelectAssistantDropdown.svelte'; import { PaperPlaneOutline, StopOutline } from 'flowbite-svelte-icons'; - import type { FileMetadata } from '$lib/types/files'; + import type { FileMetadata, LFFile } from '$lib/types/files'; import UploadedFileCards from '$components/UploadedFileCards.svelte'; - import ChatFileUploadForm from '$components/ChatFileUploadForm.svelte'; + import ChatFileUploadForm from '$components/ChatFileUpload.svelte'; + import FileChatActions from '$components/FileChatActions.svelte'; export let data; @@ -34,7 +35,8 @@ let lengthInvalid: boolean; // bound to child LFTextArea let assistantsList: Array<{ id: string; text: string }>; let uploadingFiles = false; - let attachedFileMetadata: FileMetadata[] = []; + let attachedFiles: LFFile[] = []; // the actual files uploaded + let attachedFileMetadata: FileMetadata[] = []; // metadata about the files uploaded, e.g. upload status, extracted text, etc... /** END LOCAL VARS **/ /** REACTIVE STATE **/ @@ -229,14 +231,10 @@ }, lengthOverride: true }); - setChatMessages([ - ...$chatMessages, - { ...contextMsg, content: getMessageText(contextMsg) } - ]); + threadsStore.updateMessagesState($chatMessages, setChatMessages, contextMsg); } // Save with API - const newMessage = await saveMessage({ thread_id: data.thread.id, content: $chatInput, @@ -331,7 +329,7 @@
-
+
{#each activeThreadMessages as message, index (message.id)} {#if message.metadata?.hideMessage !== 'true'}

-
+
-
- +
0 && 'py-4' + )} + > +
{#if !assistantMode} - + {/if} + /> {#if !$isLoading && $status !== 'in_progress'} {/if}
+
diff --git a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.ts b/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.ts index 064331c1c..b3ab7fbec 100644 --- a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.ts +++ b/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/+page.ts @@ -2,13 +2,9 @@ import type { PageLoad } from './$types'; import { browser } from '$app/environment'; import type { LFThread } from '$lib/types/threads'; import { threadsStore } from '$stores'; -import { superValidate } from 'sveltekit-superforms'; -import { yup } from 'sveltekit-superforms/adapters'; -import { filesSchema } from '$schemas/files'; export const load: PageLoad = async ({ params, fetch }) => { const promises = [fetch('/api/assistants'), fetch('/api/files')]; - const form = await superValidate(yup(filesSchema)); if (params.thread_id) promises.push(fetch(`/api/threads/${params.thread_id}`)); @@ -29,5 +25,5 @@ export const load: PageLoad = async ({ params, fetch }) => { } } - return { thread, assistants, files, form }; + return { thread, assistants, files }; }; diff --git a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/form-action.test.ts b/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/form-action.test.ts deleted file mode 100644 index 9f428553d..000000000 --- a/src/leapfrogai_ui/src/routes/chat/(dashboard)/[[thread_id]]/form-action.test.ts +++ /dev/null @@ -1,174 +0,0 @@ -import { getLocalsMock } from '$lib/mocks/misc'; -import type { ActionFailure, RequestEvent } from '@sveltejs/kit'; -import type { RouteParams } from './$types'; -import { actions } from './+page.server'; -import { mockConvertFileErrorNoId, mockConvertFileNoId } from '$lib/mocks/file-mocks'; -import * as superforms from 'sveltekit-superforms'; -import { afterAll } from 'vitest'; - -vi.mock('mupdf', () => ({ - Document: { - openDocument: vi.fn().mockReturnValue({ - countPages: vi.fn().mockReturnValue(1), - loadPage: vi.fn().mockReturnValue({ - toStructuredText: vi.fn().mockReturnValue({ - asJSON: vi.fn().mockReturnValue( - JSON.stringify({ - blocks: [{ lines: [{ text: 'Mocked PDF content' }] }] - }) - ) - }) - }) - }) - } -})); - -describe('chat page form action', () => { - afterAll(() => { - vi.restoreAllMocks(); - }); - it('returns a 401 if the request is unauthenticated', async () => { - const request = new Request('https://thisurldoesntmatter', { - method: 'POST' - }); - const res = await actions.default({ - request, - locals: getLocalsMock({ nullSession: true }) - } as RequestEvent); - - expect(res?.status).toEqual(401); - }); - - it('returns a 400 if the request data is invalid', async () => { - const request = new Request('https://thisurldoesntmatter', { - method: 'POST' - }); - - const res = await actions.default({ request, locals: getLocalsMock() } as RequestEvent< - RouteParams, - '/chat/(dashboard)/[[thread_id]]' - >); - expect(res?.status).toEqual(400); - }); - - it('returns a 400 if after validation there are no files', async () => { - vi.spyOn(superforms, 'superValidate').mockResolvedValue({ - valid: true, - data: { - files: [] - }, - id: '', - posted: false, - errors: {} - }); - - const request = new Request('https://thisurldoesntmatter', { - method: 'POST', - headers: { - 'Content-Type': 'multipart/form-data' - } - }); - - const res = (await actions.default({ - request, - fetch: global.fetch, - locals: getLocalsMock() - } as RequestEvent)) as ActionFailure; - - expect(res?.status).toEqual(400); - }); - - it('sets a file to error status if there is an error converting it', async () => { - mockConvertFileErrorNoId(); - - const mockFile1 = new File(['content1'], 'test1.txt', { type: 'text/plain' }); - - vi.spyOn(superforms, 'superValidate').mockResolvedValue({ - valid: true, - data: { - files: [mockFile1] - }, - id: '', - posted: false, - errors: {} - }); - - const request = new Request('https://thisurldoesntmatter', { - method: 'POST', - headers: { - 'Content-Type': 'multipart/form-data' - } - }); - - const res = (await actions.default({ - request, - fetch: global.fetch, - locals: getLocalsMock() - } as RequestEvent)) as ActionFailure; - - expect(res.extractedFilesText[0].status).toEqual('error'); - }); - - it('sets a file to error status if there is an error converting it', async () => { - mockConvertFileErrorNoId(); - - const mockFile1 = new File(['content1'], 'test1.txt', { type: 'text/plain' }); - - vi.spyOn(superforms, 'superValidate').mockResolvedValue({ - valid: true, - data: { - files: [mockFile1] - }, - id: '', - posted: false, - errors: {} - }); - - const request = new Request('https://thisurldoesntmatter', { - method: 'POST', - headers: { - 'Content-Type': 'multipart/form-data' - } - }); - - const res = (await actions.default({ - request, - fetch: global.fetch, - locals: getLocalsMock() - } as RequestEvent)) as ActionFailure; - - expect(res.extractedFilesText[0].status).toEqual('error'); - }); - - it('returns files with their text content', async () => { - mockConvertFileNoId('this is ignored'); - - const mockFile1 = new File(['this is ignored'], 'test1.txt', { type: 'text/plain' }); - - vi.spyOn(superforms, 'superValidate').mockResolvedValue({ - valid: true, - data: { - files: [mockFile1] - }, - id: '', - posted: false, - errors: {} - }); - - const request = new Request('https://thisurldoesntmatter', { - method: 'POST', - headers: { - 'Content-Type': 'multipart/form-data' - } - }); - - const res = (await actions.default({ - request, - fetch: global.fetch, - locals: getLocalsMock() - } as RequestEvent)) as ActionFailure; - - expect(res.extractedFilesText[0].status).toEqual('complete'); - expect(res.extractedFilesText[0].text).toEqual('Mocked PDF content'); - }); -}); diff --git a/src/leapfrogai_ui/svelte.config.js b/src/leapfrogai_ui/svelte.config.js index 97f5b0c2f..f4934c1d1 100644 --- a/src/leapfrogai_ui/svelte.config.js +++ b/src/leapfrogai_ui/svelte.config.js @@ -13,6 +13,7 @@ const config = { // If your environment is not supported or you settled on a specific environment, switch out the adapter. // See https://kit.svelte.dev/docs/adapters for more information about adapters. adapter: adapter(), + csrf: { checkOrigin: process.env.NODE_ENV !== 'development' }, alias: { $components: 'src/lib/components', $webComponents: 'src/lib/web-components', diff --git a/src/leapfrogai_ui/testUtils/fakeData/index.ts b/src/leapfrogai_ui/testUtils/fakeData/index.ts index 98cc60645..7ead385af 100644 --- a/src/leapfrogai_ui/testUtils/fakeData/index.ts +++ b/src/leapfrogai_ui/testUtils/fakeData/index.ts @@ -16,6 +16,7 @@ import type { Assistant } from 'openai/resources/beta/assistants'; import type { VectorStore } from 'openai/resources/beta/vector-stores/index'; import type { VectorStoreFile } from 'openai/resources/beta/vector-stores/files'; import { type APIKeyRow, PERMISSIONS } from '../../src/lib/types/apiKeys'; +import type { FileMetadata, FileUploadStatus } from '$lib/types/files'; const todayOverride = new Date('2024-03-20T00:00'); @@ -346,3 +347,18 @@ export const getFakeApiKeys = (options: GetFakeApiKeysOptions = {}): APIKeyRow[] }; export const fakeKeys = getFakeApiKeys({ numKeys: 4 }); + +type GetMockFileMetadataOptions = { + text?: string; + status?: FileUploadStatus; +}; +export const getMockFileMetadata = (options: GetMockFileMetadataOptions = {}): FileMetadata => { + const { text = faker.word.noun(), status = 'complete' } = options; + return { + id: faker.string.uuid().substring(0, 8), + name: 'fake-file.pdf', + type: 'application/pdf', + text, + status + }; +}; diff --git a/src/leapfrogai_ui/tests/fixtures/spanish.m4a b/src/leapfrogai_ui/tests/fixtures/spanish.m4a new file mode 100644 index 0000000000000000000000000000000000000000..9f57e6baac9f9ffbb474b0dad83fac37992bca84 GIT binary patch literal 26614 zcmW(*Ra9GB5KSPsG`LgTi@UVJ-6`(H-6`7OF2%jL6n8By#oevAyG!BGcYm^ORz4D*ls3CUSvf^yYj|;>BC2t$os(e6~ao^zXNbs7Z#gr7rWRtqWKLS$SnUc7e_~t0DR!jW zwzj13D0s&eha;=frI=9DQjc*`_lNj?>s?3G)lnZB8Oh|ts{2kkSdNthiy@07P&o#3 zk@3j>bIx*geI#Z6rivn4+jQoL(5NQ*s2Pc(;uLw*c|x;_ey!%$y8^2@4SXz|4C}rA zYP_|Vo~utR!kR2+2RZH#kfc2)>uKW7E-Q94K0shIlo!Z^owFZ|i7FOgm<kbU zo6_oD7@cd@yPWrRBaaeE?8QLgB@fp<0VYJb)B_21^LP8>5%XpWbK~2>p;=WGop9H) z1=qx>2dC*l-V&8eLqiqfvSRi}eV@j>ZjAg1t#MCpWB+snT~wIUGhZ(=_{PvEe|XNj zg|xtr3T^sl+ETM`y3EqVnj68Rs4zD0?}0OKWwzs8)(1GtoN9-`W2ttno2P>sN)h#GYXx% zWe06&To}Gzo?NDQA2qc0{O9Di?}=HqOkokpgW2#n|GB32529v%;Y2o<9IDMu;*3is zFW1G@ZlJM*9Xz{{0ES2O)O!#WXs)3`os*A&i6tI`5i}Dy=zZ_j+|Gi_006p1YMTtU zxJE@iodX%Lvu(a&Vc<(+03zEPksqUd@d!<@{nPle?d%!SzDi9V3zE*~~7s7fbEhQW!wvbWW0j%tCN5(5(ZaT9V5zZSM5OG1x)d{B6D zU=G357-OI6<#{fW$SHGuu3`j1eh%pon%CgB46N6_ALHIEYeRB6L1WJcn?YK*G(kV+ z=8wpph08c_O#uX>g5a%R4iDRM@G2s;Tcu?e zhtXV>kVbmjU0VAlL41gIV593f8SBK&T?eat-d35iCu^F=!q3xg&e5bw9lmcS1bHo) zI-S2T&K@?^-b^EyV_UfN2!Qofl6^{`-gzpQUkmJUR#G#QMB1#$CSZ*|60>mF(h~Kcfd?EWHT$7k z>GK}7;=Ar8PRV<>)AirjcN~WVK6>II*NdQmKVizUuLd!D!xVyU;u;C|?+{t@3(r$P zIh~~?%88ZtMkV=e8P6THa{=`9ZtOV~w5t|_Io?OaKq~AKaG)a@Jhdzr`P6VMAP68k zwn?01h1t6+;nbIVXD5hRbg-vrM=r+=KLdzr^G#=9Zk@T;t??(u4&zZxYV$+rSD{ zNS1CcZl;$agD+i zQzH@_)0%AbT}Pgz=|)xYn|}s2+-|J!s$G!~Tt$f;ihCU{en#A5Q@ZNBK zj&T;cJ%#2H2HN2TsyZjqNzyfm9z~XoY%M#$A;>j$5`ZGI{B{U#G8L(DpGeLg6dyeV zzi7Z5tM#8xjL5$GsGok$6HjC40$0!*{VyThSOTYcTS`3wAPj6(R@wXg-x=+m zDiXK_b-DC%N!q*xUw)-Y!^;Tw&yfCAlW2c@U*iXOY(#?}-P{D^v{tqHxSuRFT-|i) z$#>Z+&(u=CzkAJEm-@iDT+EiWqIz>fA21=+{&xd)bO)DyM8&LKbkmcpX+vDZOKYmh zTV$k;Z+E%2O$ zMK)X}*YK}*$}vmPkUhzo(%PshIi3?DrmXvG@y##*AjPTTKy>Co2=8yMgkd{E{!6XC z&=l7%r{QTH7#`WYi%Fg;|Q6i!x{8ySo!aT42 z+86Q;9Sg_aQ9jwzOf9*iviK9jicUA^#S%LsmgRmTEf9J7uwkl<4fC6y4yhrT2+}M% zkU(#dT(%I1?T~Sj93dWz-|M1a3HFl`S)LUKN@lL-1;A*Lr=-Xwr+>N1=7ynM5iAl2 zH7!N`^N);C_C)U`tbO|^ zzEs}?UGmLbL#n69Q3=F?agN8mbrPCxR~e;jmo4w*^p7huXmRN7N>xISI*{=XBI6lr zXPKE{sY=GsZw!HIs8m;o0~^BS`XL1x?_qzV4O$}fG`7+ix`_=r9`-o^$*{{t5{&2& zsT;jKqTewBx{+WWea^?~9FQsK$;Iv95L^(WxK8d}pG=o&ihi#9jYY>7Dn_U5I|eBo zT-&m&WL-ZQLi{8>uhRrS=Bjq@lW2AyZ1o{gKHQT*`5ls6l9p`CT%{bWDj!vNuq%{y zCCH_35(eFEY^z18S^Q(GXs?kuy8a$NaDd6K{kW>|;FNgy3IC-jXGDw-UrZdBoPnN5 zi)PUeMV;u(^0s4`g-)h=o)CtC7cn6w@lqv;9K_cRLA=do4$p9*3JIlbW4&B?O%Z&C zLy7XNRVPH7kC2-^-L1p?tzkr`bOG(ZYntzINnyzszp>|a;Xlh)xxmm;^!V%G7zZbRvCn@CyT7rx`k0nsIK|lmFDeI9kqqysQ{?I z_-`amX%QW^ZDi1b%y_7T>TL12V=Km+c2Fy^5i??fKR}GSO%Hvg%3^% zTQjYMAY=8AfiZ6N%>dcBtQG9Q7ehn3sd88D^3RLro`*SuDfV&d&Adh5vS| z%P8aF(LMbVWB#fa2Em)_b|>Zgs(<)ZoWp_S6L-ER3sW2kq9RK^rTBHf#w5Yfk%DRY zu;^XS@7|l$e@RL2-sR0?PR^$>HIl2H9;Z7K+M9p)i1HPe6%jrGF$gEM z3?*fz<}wNLe$j4oUx}&~@MFAlG=2uZXAnuRiusVZ9hXBQwDf28WIfP9-^$z|?vxo( zvKy*~`NbjoI*F?!yN1MgW(2&=lgZs#tc z`1Ng8i*cdy`IsUi9!8Dp9~dXP@Kce#o9n~uuA=u09#S;%o&9>31zQuNY-z}36V_Ef zPuhLsidyYK6Q-_xw!AxpAHLckFu(Y1>6DL^6&N7#cb|XU*lfjP)v@6_xes5(=l&QT zQA0R>sk5w#Re@orV>3iK_v`R%02UyitkVI=j$K?fqZx&01jDkdITfN1+GaPMV44CT zC)Bz*$Y;R#LVNaOhKsE{`geTSm;8IfFTDm4tz#2i&gI4^)&xEPMnf8J*Eyj)qYg1^ z%1eQL%=$h>Ol{utUnPR&Ndb`T+`2?pA!K1PFvc1+}DFvop^jL(P6J{X_xJ&Nf z!d8NgH1P2^KRB*o4hGTe#a_tcO0z61p1WPZnDaK6H1j zN#()VNgLXwtXV`YUwJ-tk%+>wN_`^hW0Ua8&>;aet`e; zpBjj)dp`q@i5YM(!!_uo@`82dhn+A58wmPByjuwzs0eZKrdW#?D23XjpAJ*P&TOq*|gib*-(D-?YoIyZREOR$zR$ZP;=e z!lOIRrel1r%gafJjIP}OJp-#ixOo|#*SfIu+Au-d!bF&&$@P^a<6B6_s7t++WOVy} z0Kj(2b^I*xr9(oIAZpjsynbEqIi%GXq^0J)x3bG?evW(Vc122#8yG^D48!??5o zcR(bfWZ$!0C89~3HEFvevReY{D}eW(`im41go7a$PD!D_1*dw~4lQZ_cIjD<=qyw( z%kF96AF)Bf0|D^eP(!gh$zua|jB;!YH#YG!zl^`!N&62%17h}5vwq1ZAJL4DFK&0e zzvHiTfFVak=y)!?uXLi$QmYy)*I2;9X}5q@8M>NdiGB+f#q0a?TC?r{MaLRAvZcim zSW8hgl9fT+FK8$yN{zEL%7*(9{hs^X>dK$fH-88S!@({XXMDaj`%Q229XBuZt%R|& zNAd;Wki{Tj0P8fMWnXPj!*8f_VQ@*-xW0ko8HUV}DM(Ste?gV`A@@OfikX{TN_azw z3Ra>wuQ!nri(K3rrXuJsTC=HqR5zr9(2bgugu{cg`eE~e6Tez191Wo|y+{k)vZS_G zqei#k+^~+Z6i29}?K!`8Ck|Q#LTc$RIEyHJ`e38a#5;|GlLBj_{EO`s{Y{ho@2rFe zU;z3FsG#nH%53Lw0e@B5*3DbOaET8kjXbIf;z;mR?n2gqi$V9d{VBl#%v zA7{7mTA-;L;r!r6e)f@AnVYXT->drHS@+ncm_ z^gR9$^=T~S=pc5|LUoof=&D$i^xdhZzhf7FT@pEe;pFEHne%^Lw@tnlnH;ReM7-E^ zZYK|9heNIrZhp*-ibJUl!TJ`*qF)d|&nLlcdd z2gR)KR@>*hWp3z{wL>Bjm2@cqHK#>C;J2Urko37Jbw*((V`(`Cm~F_RAP;W#!?O*i zh#@gokkafVUcME~dBgi*0k9(EW=Mm_*ifWs^r+sbut3l$w@%o}J*3CGZGKf&PpY|a z)Jn+ZZ2U6wT39+SHMjZ)xP|$jP_$8l1c79M_2g31yxwT=$3T4Q&+;o@Jg0v6$GeDD z*MGeP8eIl!Tu_fby8lu$TCv|g*aU|#Zg6dOgo zgQ}~nG9?|4jjUs+6(Ng+Pn%UKVG@Ltg88NTZ~3+vp%JDIv-I?)q{bS9{lPdf)@?awO%iZga|rR7}K9GN)gJRa!0E zpEDJsplm}MX%9-46wJ5lU7^7%Km_Q)q|Ki_nFX0DX>w1nE^!#z;KwKW`UCWU>EK^K zBuSm;M~R*pO&V;>^tm-QJIdgT<4=kIHH}IQ?8HeR#@txmgGne9fnzTBjqS;L z!+LZ@!$n?S2(Rj8(X+GWgRlo+UYqx}!21lxkx@%XUWz3h)o3OMc1AU8IFNBoshu&C z#aE|hBERv0cXfYU!WxRWEg0Ep+>?{@CG}ph>8K{*r1D^&=)IYMg}I~t&Z1SDc4C@w z2A}NIHiO?$_Cbwt(Zg5(;VrBZR1-%sF<1!hOtRB_1Q28jbM^Ess5dKLon4}m${?6ieruC;JxeEnyMKQHErr#-GpSarU=N$NUug_Ti^ce(owsFA^%AR)le z-7ZPO%Z?}4WecqEk7`BSB$6MPj15Bu>j)*|RVoS|J=8;$SOrit)YxvCAEsfMNPADz z&Ixgd z-43Rm&S*U^TDlmX$_5CS_$=zPmD+`4_Wk3>3APHzY6j)}%wM9Lm2c&>DVg*-J~@8Uql z-9WN`Eo0eCx=b33AGAu$PDHqm_ugFH+zHFX$q6YQPm}WX zi9?4JvntLBVd&VXqU-!{BrkJiQ)Z?0+wRk&ELBRHd^pX*SE7@<_#%~}uouwApRgUr zz?}CCL$v;l?nMiyey%X0ooa%e+Ydy(zyMv)2N6w#)9t=b%6GgaUHD@tIb-?Lbo(#EB;!Oz^d4Z0LeM` z2hWL6Uf#N3tM)cnI9Ysa!ub5|+9Gu=kb zcwzUbTtCC0Fx3aBNdgVD_QFR@-Ne<&;3bY6rdajt=msfXCHj&`%XaD=qF|Y}5Lf=d zQUnkwm>Y#r74eO6B;Dny^~{)!J5-^|$CWR-x`FzPB1eaOD#=Zk87OOBe5uQ><9{s; z1qD~wIxAJ(nbIP4`IldGr+HbFw>_@C)dQU)2oOK#q~J_Za6MIE+-fSBk}Nb+BD&aO4l4( z#cXuCj7!l=!LAM#$89@aLd_?6F!F z|K`0dUV5FrNQ<0F3CJ&{8eu7R7-`Z*&kgy65kL|LQC{h5YW}zdn`JR$)V5Km!~ZeK zq9k=60NKRIoOdZ@**aw$=HHVtKtSkLFP*It#lO%#-3Pf!qMXw58mMQ$y|B6xNmQZ` zC2JEm@DmFnmCTGI=`*qQJb%@SK`z%_An$H+e9B1LlvgINV}nAWAQ3;C@8XtKT`DhR zz!p_eIpNq}(>Z=>ho)Y!)j6QJ-{^BdPj{WZPezxl!=i+E*$8j*hO&`R!tcQ_7`#e0 zU~&-r3uv785cjLD)eBd}mH{+XEB>9h5=4ujvYgtjL+-u3Q;y54ao`jBJ)xjCTVX>3^5{PBV=0INTvWX%rBt#QvRV;)YAFV?J z&v~SQz7_NO{_^+-9>mO{fCxFj8E~dh(O@Q;uiI8FdvzH7M=HaWH7s(Ynd&zlrt(-8 ztuLhkh99E<-)H?EybDJ4lZkAbJ$jh*-DRd*M<_T!Vt2u7huD?w3nT;?EfL|A|CAS8 zDc8bhz^bGAAfTQkgzBfQxbCL$gGi#SN9xzHv38(MPr-RKp9ltB{M&qs{VjP`MC35A z;HutEi5GXlm@gyO@O6I|S~JPSq9VjvsUHdn*<%bz_$^A^^8P{zJ$)ChZ zDxZ_t-=`b*r5uAfuYn{rod6fgD8gQRv*I0z1#Q z&?+@DHhi>)^9k2na&N}ykEe^@ycQr8xzRTDNlGZnF=lvNrv&Kz+uOE{0)N}V6>e%3 zNUCJSLF@o6KEEmdq;9B$iVU|eJA_8;N8iZW!VvK5Jl4ezr>76&?~0zZzL~{+kATzr z>+*QDp&(&swgm&( zTh4Bt_?tYYUAL!Mw7-QEx!=ReT~8;p#5aV4d`s~wh$&tBJGN|-UI=1}UK_Y#)D4gR zRAhWk@&IeiQbFe}0riGVv#j_Yh5dfp7_FeYKN=Bo|R)m}gQ@cFBmW zSwZpNfik#8!3CdqQYQOb{9-$Qla~k4pKUyTWL+%a;auzVSM1!WTLtW{@`0f;haBa^ zjN=mm<{e#sn~oGJd%U*Obx+TRvH@hUU%H;sArkltMEC6E&w#R5P3m3QWO0d?-%dCT443W!srK*vjzcp zcI>$ig|WnDK1T1fEkaegFkt<9o1{P%fj!YVod^`z1Dy&fvB+3++QFuaa1uI4AAmqAkN?pv(CraC~B0WqC>CilRPZ?TyYk%_=4D``TKoDNB>CO!B#C(qqsuqOC7#D+lr z!F=hcxKyX`Wk>VdeQ=o7l}jIVmsPpzm^s8zLSHdY1`^#j3zX_PLf`A)+o{n`K@cGI zmu~Z)bRY&`l(pu|{o6d}41G)kn>%;^{oH=JwteyRALKF`1#;DF{i*X=-Gyk_4TcSK z3OFX65^p2Lw`@7o5rccXz|aIGTrjExtgm%RZ^sArX%qSPI%PjJpyW10+NYLN3VMt^ zIZOi(HZSmYEn(eulTy?0;>Vd&_iW5&w82Q^;unxta-McL#GfXDBax_YhR?${mBp>S z#kx!y7Lh^kZ*@p*Unm4m_hLOrL?(gu!;SQbB3ijzHtp^W35QuXvNCln7BTJ`88d`L zrhto4$5liTwW|}zk6re2fiMyj__qHd1|f>uS-qZUkO}+bYWo8oBsb}_ibZKy$W<^t z=Ppva{OB_F7fJ6)VwB-@?q}dqzTq7Olfm3pgRx!LJPIR`iXW3cu(+ zO9Wbu2vBdjWGU5fytrHP-AS(&D7(<8Do!l}WMIr(g9TfS=C-(H3M2$1J`VV1ts zr{+9EKJ9pa9C8RHbhBSgqx$?udB)N>uU;A5g&3Fs%5y*VwrB%(bJ-_}fYTb&WgWt=Q$ z8Q9zK0b}F}^KqIfY@D#1!{4}V43%KEpl`rbt|tBgEC%CO)oBc-ye<>5oi0Lg!Mh*x zi0L%8BfzZhl`nSr)%>$W-f59+AYS*h{*RuluESR)uY{R=6vX0ug}cXK-s5(}s2lg4 z^tD9kvnbtO2Nl}^IF2y|;x8`km0N|ycNI%gArCIHGqd@8Jl#8dc9#4dOpSDWre(*l zAZdIICE5^h*tN%^XZX6xoR7|K2_hV2ihnjLnuk3h0|p{FsF@+=!*3WhFrutt>zm3T z!!D1OB18s}0}(ZqaIiq&zaK}JrN&tw6;8)HbkE$gDZg7=-JDh8TH2g`ziWnKjQ(~p zQsaAe>dIuq_W@nB>t|%ReStthd;*P~E~cQW``6%eI-M_{Qpn!)^L&owZK}Mn^pL6) zk+0JAnGuo26iX(LBJAOgII45N2rVn_T!ZO6w&nXMz*%JqH=QXLueza%mPQFKPOXS2 z_|HGN@$)MtBFNY%)DNLzgoJ=Uo>bQ$J~aXDOQpj|twnRgSJKu~b0?>8o`>0Cx`tOn zeq;iaWD_Pt{2Ld%tC0iUj~s;SwfXG}y5?#y2|XXwZT&ybyM#7w=EM-kFRS5h&Kz+} zeWEm;$8|NQTmH~xnC}1cXPtV)8kgpd3w0tf82}Kfkmktj@Q-;;wXw)#x}i-Fx_``b zHa~PhY>r{B;kc*Byja=aSv<2pdNx3p0TZ~FsfG8^{xG6v#Y5t3)+udqqh%0T?A=l@ z=qr9f6QV&4Ok;v{FDP7GjH&^hl0hZklRYJ z?RYH|clC!w?|jXM-j(974TZ(YY{|7I122VIT;|Eu^pLo=)0%(Mtn84NB45hi2oWm6 zO7Mgk9qPeb*L*y;U@(E~;q)vBJ2R@UZH<7`R#=qTDN`d?9LB#yIxlY+AIz*s~HSnsQqo}kc6 zy_WXyA^o;ar6S6O{)PC@#fm>PCM-3(8eHuWLwcXTLSQECi03kjChMiow+}>Gk7rLA z>@Ux?l&Sl^p#-OwXWUup(?_MYQ%<+6wzj~$Y6%hy?5bu>(UMSGY0{MX@=9N=D}5aO zjdt_+WTjHMbpK0Rnl$(u^(x9iS?1c^BZYmt0d|4EC#jW2&DcNOWHn>J4(=i+fX2Lw zkI$SLD)bpM?XE^i4gaP^0++RagW6@GL$! zMn@zv&vUu61RzMk?$6{A&3qT9iMmyiIvWg!fbZm6N2rvpI=5;zvSI_QW1)kUO_Wr^ ztsx0}NHe5--9)Vp(Ili^4{3iwkcT!aI` zw$|VzzbYbookk3R{;QbFTg2~SKz6_PS$EiuT{L*6m5V=_E)h7S?}8dXiV|I4 zNKnh07j^X(Rb8Htz)o>u1i5E5Dr=Ks-Hl3VeA*T2fukqa>64UQS2LMi@5Hxu6OvH) z`?^v!=tPEj8QBTV1!+yFnf)Y36?1}6o8p`dY~22BTFqkD&(*#5xQs}~*uaQWkphYV z&1}2`-MYEgUprqm007=74#n`Qem2=adAomPeQ8aU7)Yg*10o9;xbYTRbF~htk0$0# zR|l4=m6rFcHp$IoOK`?#5~8*@QZ(Fk@<@LKNGQ(_+Bgs8eBF0YGmi`&gWK%Ux%j0w zqUTkB0WB&gAZ=#utV0B{)21$N-Gi){!?KDId~T}Tnb)6)4Z*L^MU?ye0f zAbzVb;_?0Wwwl%6Z|<>y$7EX{OC0ysi`c`gn?B@WI|vafwYv#ty7oPh01YX08aYiI5l~qB0;K<_kZwlFowCEWxmD-M&!#w%N)>h|lGO^rF zO%5}_$d3-Pe}43fb1FoAr$0GrP7-X z$$MjSlMlV~pyu+aWg9WLvd6{4iGx42V=WnW`vpS=m~L}0uKLc^cl>F`fSB`bZpw^Z zZj;Xt7HSOJTA;h@!A7be$fDprj5UKy3sScl@MYc7)f}9&n?i(v#;HrYVzc}2Q=5b{ zm;g_mSzF!AReA!Tq1b$=K}UKbpjeg8k-Fy{zBQ?9wH?N1Xf2_RI#2O_p?B@ry}~x` z95$oB9sh?vRmXHz;<^0$K>_B=PVF}N>TsP+w?U%T%Ey^3?G^euiQnC2F77MXIz=*61{kDR~VuGJe(2Rp?ACBgGK*&etpO_e|=+Shz z(2|duVN4K-W5y163wW`t=a$;A9*p1Fn4K4mLz178pUh2BUA}5myr=qxa_R~H5tgNq z-35us%Z>5XZFJCO#?9=Oq?dp#)=^TDNGO?b3P$%Q zMUlRm-o$n2KLIgOgN26J1*!TC2|#a+fSOO^dF_mz;iG92MbKWve+kRR?PEDNnPQmm zUPFj+SLajVQuAk3B6p8*WdG5jEb13uIezrY9-@@nwc)5jHbgn#{&Ig@Xp+ihz{Ni( zgrcG>OyciL*~1s8jNl z5j)(p82eT1aCtT9t3i0TA@Lh6mdjZ&;7D~rLWL1p^%i?s4_!|0TT-`lxKY09^qF6) z!}=(E^v)nk;;sB0C;kpV-~Sp4X`5($T$nJ!)N5-W$g0$t(c=m1+U>IPKRL!X9%0CKSVALp!SoCMz-($(izF z^A4JBWnn-}A`R~a2{D(PHCqHM&>`;NviTLGUm-d@PXmT7^l@z3 z>n~Vc$Kh+r}bURucb;L=6=Be`&7zt@>(_2oBGUGLB;mMk9(@ek__PHY|T z;dc=Pwy2(2#=zt=n6`=z5GrD;joIP#R9XBx3xe)zPbY`WKSxA5u$e2h9m~VP$|lKc zX?B44d7=Y4GXA}qnJ{tG$pgneX9!=M$`IcBGh*S+e+Ke#3MMj06r8=FgpC-8?(@B` z?sAz4x3OWN#SaT5V4tZ6J;^yyc&a#2wd!7!HE`v8Z&&t-tL(e<0XY{)yl3kUpBm zpUexf$w;a^C-;tA&^r%v9iegrI~`0Ar%FfZ8rh05lD7=8QEaH*`*r@imgf@634u9p zJ~vtLt9|~=p)A41WG1bAdMrp9t=TzEB0Mqr=BKMbA1ZwKE#Neq%)rPJeaCVrROU{f z#9%j>O#7({SCask_2^-SGgt~}9lT5AQQ22V`yoC1xYk+Xf|DG0($O`mho$FQ%(yqLZpmj+*UO+(lJkjkcy*Zyy76lwJ9C4q zI#mHA>nUzx7E+fe?e2*!DiViT6nx6?QW7~S%UUH#BJW0vglxloZ5KT@(tagKhIN7Q zl@7!-h}d0TP{(coOufjD;83Z2+OuvM<8h|46u9g;fU@ntDvc(mtm6m% z{QFO%&}n;$Lt>&HvZ1jtk7?B&E4*U#V_s3;AT-+i#-yn_@jYF%;NCT^v1~s6DR-qR zq~EIF8inwJZ$PQ-G8_G01LtCm1Rv^;k*!Hb^LEG97k?fNF)nksX~$`gt-w-CZ+^4I zZM_hk5hW)^r^r~G5F)TVj)uueaeD7Qa=vDKlXNUqd*+%0{g#f zsO5|wl}hIb{KcXbagKo;zDf!R8QdY-WaU(4pqxWi1L+Q!W-DiTR%lIrl7NZ+fkDSw zGH55Dm@McSLK&&ebVLmR^#MHaW7DFs4{X1lXB}B(Sf2&LzY{XRGv0dD=Bx&oB!6M) zbzLA7tXyo_5xh+pBvs@1GT)Gm8IBgVGbeaMc z{D)%EBt!j+WgkO?{Uw-#i1b3#4*AS;{!VI>bDll@g(S6kJsM1E&o##=YXGv@2rI)o zUwbC`86PO`8)%ecN)XbC6<`1tAH3Xl7}a|ZS4ea!g!FbF;cL0A&n<=#vL# zte%VJuUO&xxIX0}#_qgemF&==}jOGzWl4Ab zp1P@YJN%4P{-=h@9Mq`LP`g9`N^;oX6!^Vk%UXH#(8E2AvMCm2VWdH3_6n^PR6i;g zVZ<7brZuq$kApGd%HH!&KcjQOdmMfPw9G??#jnyU5Zng}3dx-mQD|1ekaxm(6&AZ6feg9<;tZLuIXh`hej&{Vz%zg z7G8+i+QD^1p3IIo7UlP%x$Kz$n7yNB{r#Ie%2@aDLiT8BHo5MM+o3%lYJyN5+7XHn zdPUS*|7;;1bEQiXL^?DF_kM0x*i9mLU?cEAY5jaw#VUTq>dfKn;&C6Do z=YHE${b%m>5rL!qr~u=74=Q#o()QfLnU3Wkc{)@LsmI>XOk_e#TdTbe+}*M!ujc~3 zzRFN^a_nl{{3ZNE0dA8c1!A3bHtTS6juK}KdQ(lVsZSTYYZjl#`Ls@vC@*!4zx)KA zV_cm}w@)5YRv3-{M7t+!Z_D5&R>#hEZc1~Y@`|6A%(H=6a~p5O%UgW-=tshkzaBfOfEG8EGO51lDV^0RkeI?=jwr*+4 zM@ihudS`vI!0nw}VxtNJCi}##W7ta9XZ2nH(N^dQX3M;66&d7zl5;X_W4_kQ0bZK; zR_N(FW*~7v5Bra?$}y#gsKkOW{HCuhPAeQpwW*i$Ogjn8IPy)oO*e-nU)Q2PME`jJ z&8_}bOzk{$AwwJPYcZJl-sNaThmsAsdYcRfLNdU8+v@ALZc9gAn!f?ORrHj|?(1Rz z2#i#7%2!$5-$yY+<|)XEPqw^8gG2MMv+qeUUmPM22uu|jx-25gVunA>-+OVpa3Ak#Wt;QEcUSc6=d0oL+sWlx~^<6fLljs=X(yBM{M0>a0 zxvtcpzpxfxjgNI%KEn}~Q+d)Zk0_NH{kI%VZ5g^i+*!s_pGlk;COab3aMKk7;_Iud zusjGQ(E6;B`41^}68FsX!U2Uq8ql>415d{*gynP3p`ME{8Sfj^l-O``fI)ubtnvV! zPet&Sb{y~F#<@L4*L0_U!w8`Crjk}v{+Kr|$)jdl97$D!&!!x5TE9=2p6W1U1|6PL1@pQ>Ur zs$DvIRTG;7yix#I!3Pk)Z2fLlefLx{R*h=fTB zK4o9oKs=vkZ&9y(PdzJ<7QFj1)+(8cyf2+gq#p!`As$u1#)FtPsYeh?YE667GZY3v zP~S_y+@9C>w=ZIY(u@G+J^*-7`eZIvmdMElbv`0eN&mZ?#qg-MDqL~BMhL<`=y6Gs zLFxt?<|0CQxNO;3L^WoZ*Ls)wp@xwKY?vP8Ga*K`)<3q%?onpFgjba5VF9ST1h##GlF&WnV*mtcv#H`=@?Rac)REtif z5&l48RLQuBXy5YGlkG9P7=K}3`=4_N80)=idb)ROj+;aFn(dSX2oW76UpZWw^X7*! zu0q6dje1&l*sJq#D9s8Bn7f^*W~eOeEH$cS0zNAThcZO^+o*g|B!MHA<3(83K@pWU zFZhyh;FmH8=AL1%42+9Fh6x%KQaIJEs8momHm+MAio=aZV>Zc5*WGSeWsI z!?`6%jNpS z)agpiDI8v!{)#$kE7|>zS~~bo*mujH60%hz-V-kn`^4A&{l))-?j}P4H|z&`>cu_v zHm%sqWWIV&)An6Pu!K|BcQK0Zo(|%yZJ&Ry?|iP#xxGT)11bR&0E!Z>lqWAQ)<;z_ z4cf}6C$_TmtbXaMB3c8-OD9ENyO;KXGD$UnE`HMFuRTke-i&!ICqYs;6IQB9YwnV`v!zM1oZ z$)&$vNm*A9wRw?tDL|?i#Fy!eP8NhNlA@M@(U8zVN(dHE_&!;#k2EMNr>cYhwfEgo zQ8e4WGvu5EB>|yAA9$%UA6bFp4!vXU0tG8@`O3Ac`ObIo5^b6eth$#drJW; zL7qEI3ky$`KOJL(gN&Z~$z^-{>LdkRow+ z-{^Tdgfy1bbS#cXSE*62iaDq zZw%Tw*7wBje9C_;C~`x%R5W@>i`rTOo^OB z{t9q@>ZoY7yU9B}BGZ{TAOxoqLqcdL1&TwA$jecBwyU)5my5o4NLV{sZLL{w0o5 z?@twqY-h`GN`E|Ux)So+MX*^Bq5MZy{-Pk!Q42*`NUCX}B&biL z_6#Psp9?=knw;CsA9pbW9B9B`<06wjmo^I%4*`)rM3BG$sh;~x}IKc zRSk8xog8SSpxQLN8f8EV`N}gpR!O(fzAciD_QHS-pM8k_dS>B-iwe?cCSKS``NkXT z2*1yxgw=jUvnbFeZ1XX)AaDPN?Tf}2oD=q)yvYj?EUwm{+0Vt^Hr^ii*cQF|MoI$8 zBwqGnSKL8Ig-#w^9HmykNe=HIgLK1$-cgs^;EkU7Y}I-RCuE#=FOcpj^!`B z;xM$tjO%Y{=RJotr{(vY!T9<;sP#{uXDe4dKs3HG^$}V4xKi z=+6~k`Hy5;LZVKIDj5X^9Kt|-(Z249`r{w0D(vg0P$Q!<+PjQT3g2XV?~E_oGH`I! z#Yb$Af4#>Xm@E@oNG+i;4jMMJ!0t1-Hkp7WC*F{XVE;oYc`X~jw|keUWbV-kY>Ts$VUP_8cj(6dLRtvUW*rWayy|q#8vk>n4^&BvP!Fr2(n8BBIX9 z=tv{)b8oG@SM(eCF5RSth4C#voxvC@KRiLnz@;Wi+*`-MqOHtU>kXgBBM;6A>Kb7@ ztoJwIQxf-7w$PB5W_jt%k~oAZY|Xf9M_UaE2n%5z@~X50O12dEr>&e;-N*UvudzAC zI{2%e=(-Z&i_A-2S_z&l2lgwQOeiti!yA9SKjkz0c)Wg zFI!t(CHX!Dxio3umr>3BVd;?c-bjS z#HPD)a#6fvbBwzqvG@aqg0Hej#d2*|B72sMAa zehVk5`t+{fG@U>&{iIyXHCw+aBy3JocUDe5A(;EYl5bG&v@VbXfCq{0VJIpkbV~a= z&E@C(reZwQ95cXOl-ugp6+?sl~;M5DZ*U37+qE{auE5f3nT`a-CQ1|6nPl>rG4Ey=fy7Dfy z!%rDG-gf4Md*Gw*P0?HQWJ=I<`Xw5goqR`T3d|{NCgRYVO~y|5^-$~GlZC`lXOG0W zLYIl1XENnqlETa}y<`&zH$8aF#ki^lTQvD>x}!>n!WtGQ36J^i_}XuZ#6^jC+rx=s$e;z}bKixf7#`q{fWh z^)H9|*b{oS4xwJU=u6GEuhttn7iENv>XFa0HLTGz?VmXnYGpHu`Pvj~&lV9bK)n+; zSLIyGH5Ux5u-5U!YX)|H%ETp`cG(rjL20x~ohWI7 z5f{?%XHsWt*w3K`jbTg##cY_jxNHrGO;W}r>7Y~@sWhz0kR_BkpIpBJ)iS|7)qtbV zrg3R~CNsk;BF#C*QZIF#)_BkRPMXRR3$zew0bRNLID z(L%jH)$#EPf)d&b*~n6Ic*JhIvT1FQoV`miF?;PeapJKrZioiS_x=z}PCX(j7)M$s zsbX~qyl^;>RXWw}xB%x|phNbPjM4Ei@V+Ec+wc8vn2e(AGP!r59CcpUe`W{EWF%Xd0D`=9c-AatQ@m>{myhqr*YS5F1mX@_!00p zm~jGl8O(qk6+{}Cy59Mb;ko_gIC2%Dnhjl|*JceaM3DX}O;ENdqVF#tWIjITtlh1o zm5LTrAg;_)6`8R4qUmElaM2!1Xj4Q_@x_9*QOL?QUS2v^Xo;+Nmf3c{tn*qoWbK`A z?1mNrB|DROpY8j$%ym)9+k`q^VHPFRCc^XG#0k$PGTn4DuO7^w!R+HS+<8+eD_?dV zqLxMjuwr1p(lYYzvFk0@)*73_t;USyrVQ$39fr>#kd&hB)u=!)Wvdl4GN7cSs#G`` za7gRy)au$8Kl8|$oPQ=f6;4z05A1KSHr~ug)d(lUUfSL}WV)D?##1XTmb~ZnpbESDJklsOB zPL9>DF*j%FE>JqHl97-kl5TW2$79Kn9T@@=l@_eus$QMWJa&Zyu%%|UD2T`F5M!a9X)6? zZ*pwL=Ts6qC3%Vz!*qlE@ITCm`B+UqP<+kIAT!K%7AS@bY?lN>XzOaaJ+5Q+eTxPq zRX{_>7J{I6*zNQQScS;DKOK*n! z{@B!|kRxJ)#*cK)-4Ot!;KeUA z#RO=E_^6~pjOK|kro_u2V4bEyzDa%kcwP}Xj5$7-p&beXCfFn%by%=EgG{F(r#qJ? z-zyR8NX|iG6@mcEik-A4^3^QF^z6(9ej&MQ^HjTCI{ed}jS6h+rifEiaeL9F{`Ba+ z*2=fU5q^q7S&Z|YAG5;mRIN3>=D6?|fsH zrxZSqlXiZ07?>Mz+y4%y*Pk05`JCr?BvL4nb~Q%p8%jcBfyqk5M$Ftpj|Ytkn1V67 z%a7QfOey-K7m}bAhkm)}BmK#LZ0fM&m|356I&{C#Km($}?CC6MUUQ|Vs!ugyP--K> z(7uVB96%BWp?Boz9)|-lM^Rn)k6C7|VAxhq9kfZlY7M6hwMsVrnGTQLHv%B>r~S;IzmX6Jo0;`Ea7*`20w$n%I*3+G}Tl zi*H$_(9xG1QNQYK8>{Pl@n+7}MweFcK^$`~cg05>BopIM>5-Rlm34+nxUuvSeMDJ3 z%(A{Jij6Rj<9XViQED_#&Ay_fRLsbiQOHF0&H6I~es+cw*q_TiwFVAKj2LJl{AY>7 zJve48B9N2x=_Av+_nDEIzf?`Q9b^&-{qBgW6<|H(btw=ol)Qg6rlV1s-)r5QVubQ- zn9x#*q>*+ifcB_VYM)vg6@L8k(_dyiN%q`??8|pV`NDizn{TId1YACd&fK?8bHJZ_ zaW+ik!>fgN{KdB87NusD%}KBCSyzZ2e>bVfyGYEv^Xsg)o+A@@LyIqK*;=_!eNfn~ zgY9$DOIOvU)HC3crPOha9ftLi6Lk<36%>(I^ci~p!AFN_y#nvjj!*pw6#Mft>EP_pXg0Dk+0RLQO4{hkTHgryL)B7QYmPyw08z&qDZ!t8?y0==Hha1cC?_&y0eB+y7!F-WH!i0v36@;x%sA0)GPnlqNCi3mJmmt6f)yrPvI|6 zRKwo|lle^IZ2D5NbXDoj^*~20=jwc!A&(#4R26y!UOl^rF|;Gq-go=kfe)RDBZlsL z9rBRy`~Ez=v6a2Xm@(d-Tcg=qRmAqQ`_c{fwc72q&@vy*b!x+PDSES>u=Y7G#2dum zX&i=PkuFm%BRHB(1^=RTbuLEJ}O5$Fpj4jHkHF7KAi?9f&(DOH%G#U_t6 zo%bI-#YJ4a>PkP#rw<&_L!G%jStd9-9UGEOp-A1t#!Wh@F@9XZJmB`)+i)Y!BP3u} zAhiDUV_d#!v&TeLT*2Wnp^sKIJ_@~drng93-zN!vbCcG*LD8fV+Qm~Ts=ZHdgO-5v zvffsXDnyXMAGl~Jb+l54^`es-cd(YPOz^f#n}l0u3*&))$b&ziRI*^(yXW+6uOJt zVAcySmsiF~+wNgB0e5K&aB6rlBG3)m9hSS z2uvK?usE2wFzqvDm+85%Cmjr2H(ctdRT6^L%`(qzyE$Z{EnA9S9k}>h##W`GEX|4V zE^nO=oT4mJS=5~Ssog-#<@yqqr-j5gj?5)%o`O!1G4AFo@?XC_a~-`_q=+0_6G=l4 zyhJ@JugF5>A7~NX$d_lo#y7Flt0obj#@wvp^pJ3o(IKuNlcy%s3NfH8-IR!>&MFR= zv)w=fTLyFCM>O=s9C}$t?!CwCgDHoKe*e)UAq<6Sl@d&_bK*pW62*uoQ=7%BiHlvf zz1|;MCGg|xElihIF&VpLDSW`Rcd}RW(FiX9!B^*t&Y6CUTUoDqE_N}euq=lSEy}M&tJtfbu zruAfp``cWpdeO_%#_wWa_q`>tPG+XtFvF+>b}!>jU{#n#wZcbeR6oV@CrDf34kVSG z_S%>xukHjeFJIqTaIWU-%Xus`kLKZj)b5h>TjwXddm!X6kvY>uT9Wt0)9JeR_}e}h z_!}KN&SAVT!U*|~{km&{Z=+;M$Oepy>HRPeV4$m?PakXmhKLb~~=> z$2c3@QhH1_I?!&^@|r7`ibMiqbK7ImzI0>#dSi?`I&0D5`<}V5$^2AS2vOWTaKS-0 zQGlUC#)2|8r#9~K2@{J=HS~{peY;jQbR!DcRFNZh;iKs;qL*`yCq?Bc_(Ng3+)6eCHbkz>bR|8v;jbs~(&WtQRmJ+~AFTn)kX#LI}-MnD%d*%*m)-~?Q=P4g(PL9`S}5F6MZZiNWoRboHs zoBmzWkEt&pm}V%__o~0zoM5nlnv_KyEl2Sys<)*v^l$679Q#K6=||{_nR0do9u1>yU!{G!bV|7r zq`TIVfO3)ZOud4Vljox(2Ga;kiS~0Nn)NvtUIaoQ2}gce+AIyr&nw(i`A3yeE}Jzr z7y}w_2||J2UzmNi&d#2|dId+Mr>zx0A**CO_yC9@kPm?nG>#hx6axPHXZbG&fcU>1 zOZ-#wf8x*}5DXG`S91rT(#YN6k4@-5ZT{UGX!l>r9~1x0&i~pjdO6a{&K&3hhP3*h z;^K#BcmFZ)(ALV)^&h6(oIRZW!Gl!V*%bj(MF2g)Bt5L`oBMuE+l`8_Q2Mg#jfrqOj>@Obuwwt?!Bf#6Zxx4)t?7!p?U0@S+&{?qjGfXrH z2`B(g16cSFZdPs}gAQz+zz9$bfRam)L#P1$;twzZ2^whx7C@lDI76u1+}-~)0eG<6 z7@!eAAZ&nnOOOEys0Y?@|4l&!^uKsC1whXi;Qvv8_J2Ak2WUNjQUGrNyaJF0AQeE$ z-!vVd82~^z$R`4b|N9t}gSucl08r3o4ggROkVA<8TK6{^0Sf9D0RY>BK7hwr0O9~d z07wP^+5v3?eLzV8dIVvT)008?3eF_2a z0st5jSPza-9)KYLV0$or&_7WA4ghctpujwZ^Z;lE0LBKM8_)-E+)x0j0f4c9ae(Kn z769m10RS-GDgdC5Ab{AQ)Bx=Q0O|pC&_@7m2haha4FGsvKpUVwIBsA(ps(OD*e@_9 zP%41-0szPKApn@0yCpbxz=8pQ69S>9g+N4rxD4(C`m`Qa?&g4;4)`ww5WGCR{9HUD zTn|OG9*T(a3yMAzF#zh$kcdAz01ILK*EtK!b5~IKU&?@;qnkS*apM9CKGna>lofRPA2OI(&gM!t3GJ;Y9 zaxy|(lEPB5T>L^pLR=z}QbJt9LUKa<4@IPe1o#91HF&AJGZ?Jgzqb9SSsl=bDqzD6 z#%8BxZUxgpxVqUnJHa0Ea|^)Oq&(~#tzf)-azY~P>_9JUNKb?}Kw<8#e<;BI@doe+ z9Lc}_0BO7n`R_FTpC5yNr}00Zi+`u_f2Z;Pn`!*X|2~aFY5$MX_}<@XoVgUp(LlBX kzaaiIA3ybQcXW1kfXTSRz`XnaeX38lc10lmU;7IB4=nn?mjD0& literal 0 HcmV?d00001 diff --git a/src/leapfrogai_ui/tests/translation.test.ts b/src/leapfrogai_ui/tests/translation.test.ts new file mode 100644 index 000000000..364f8491e --- /dev/null +++ b/src/leapfrogai_ui/tests/translation.test.ts @@ -0,0 +1,72 @@ +import { expect, test } from './fixtures'; +import { loadChatPage } from './helpers/navigationHelpers'; +import { createPDF, deleteFixtureFile, uploadFiles } from './helpers/fileHelpers'; +import { deleteActiveThread } from './helpers/threadHelpers'; +import { faker } from '@faker-js/faker'; + +test('it can translate an audio file', async ({ page, openAIClient }) => { + await loadChatPage(page); + + await uploadFiles({ + page, + filenames: ['spanish.m4a'], + testId: 'upload-file-btn' + }); + + const chatTools = page.getByTestId('chat-tools'); + await chatTools.getByRole('button', { name: 'Translate spanish.m4a' }).click(); + + await expect(page.getByTestId('loading-msg')).toHaveCount(1); // loading skeleton + await expect(page.getByTestId('loading-msg')).not.toBeVisible(); + await expect(page.getByTestId('message')).toHaveCount(2); + // Edit and regen disabled for translated messages + await expect(page.getByTestId('edit-message')).not.toBeVisible(); + await expect(page.getByTestId('regenerate btn')).not.toBeVisible(); + const messages = await page.getByTestId('message').all(); + const responseText = await messages[1].innerText(); + expect(responseText).toContain('unicorn'); + + await deleteActiveThread(page, openAIClient); +}); + +test('it can removes the audio file but keeps other files after translating', async ({ + page, + openAIClient +}) => { + await loadChatPage(page); + const fakeContent = faker.word.words(3); + const pdfFilename = await createPDF({ content: fakeContent, filename: 'shortname.pdf' }); + + await uploadFiles({ + page, + filenames: ['spanish.m4a', pdfFilename], + testId: 'upload-file-btn' + }); + + await page.getByTestId('spanish.m4a-uploaded'); + await page.getByTestId(`${pdfFilename}-uploaded`); + + const messagesContainer = page.getByTestId('messages-container'); + const chatToolsContainer = page.getByTestId('chat-tools'); + + const chatToolsPDFFileCard = chatToolsContainer.getByTestId(`${pdfFilename}-file-uploaded-card`); + const chatToolsAudioCard = chatToolsContainer.getByTestId('spanish.m4a-file-uploaded-card'); + + await expect(chatToolsPDFFileCard).toBeVisible(); + await expect(chatToolsAudioCard).toBeVisible(); + + const translateBtn = chatToolsContainer.getByRole('button', { name: 'Translate spanish.m4a' }); + await translateBtn.click(); + + await expect(page.getByTestId('message')).toHaveCount(2); + + await expect(chatToolsAudioCard).not.toBeVisible(); + await expect(translateBtn).not.toBeVisible(); + + await expect(messagesContainer.getByTestId('spanish.m4a-file-uploaded-card')).toBeVisible(); + await expect(chatToolsPDFFileCard).toBeVisible(); + + // cleanup + deleteFixtureFile(pdfFilename); + await deleteActiveThread(page, openAIClient); +});