From 0500b99c3be272fcde14e5e604709afa39f603d8 Mon Sep 17 00:00:00 2001 From: Joshua Aresty Date: Sun, 11 Aug 2024 00:43:23 -0700 Subject: [PATCH 1/2] Wrapped content to distinguish prompt and content - Even those these end up as separate chunks, by clearly wrapping the text in delimiters we are able to distinguish the prompt from the content better --- GPT/gpt.py | 10 +++++----- lib/modelHelpers.py | 10 ++++++++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/GPT/gpt.py b/GPT/gpt.py index b01fbd48..bc5d1f23 100644 --- a/GPT/gpt.py +++ b/GPT/gpt.py @@ -267,26 +267,26 @@ def gpt_get_source_text(spoken_text: str) -> GPTMessageItem: raise Exception( "GPT Failure: User applied a prompt to the phrase context, but there was no context stored" ) - return format_message(messages_to_string(GPTState.context)) + return format_message(messages_to_string(GPTState.context), True) case "thread": # TODO: Do we want to throw an exception here if the thread is empty? - return format_message(thread_to_string(GPTState.thread)) + return format_message(thread_to_string(GPTState.thread), True) case "gptResponse": if GPTState.last_response == "": raise Exception( "GPT Failure: User applied a prompt to the phrase GPT response, but there was no GPT response stored" ) - return format_message(GPTState.last_response) + return format_message(GPTState.last_response, True) case "lastTalonDictation": last_output = actions.user.get_last_phrase() if last_output: actions.user.clear_last_phrase() - return format_message(last_output) + return format_message(last_output, True) else: notify("GPT Failure: No last dictation to reformat") raise Exception( "GPT Failure: User applied a prompt to the phrase last Talon Dictation, but there was no text to reformat" ) case "this" | _: - return format_message(actions.edit.selected_text()) + return format_message(actions.edit.selected_text(), True) diff --git a/lib/modelHelpers.py b/lib/modelHelpers.py index 887b95ed..2a2cb845 100644 --- a/lib/modelHelpers.py +++ b/lib/modelHelpers.py @@ -64,7 +64,13 @@ def format_messages( } -def format_message(content: str) -> GPTMessageItem: +def format_message(content: str, wrapContent: bool = False) -> GPTMessageItem: + if wrapContent: + content = f""" + \"\"\" + {content} + \"\"\" + """ return {"type": "text", "text": content} @@ -87,7 +93,7 @@ def format_clipboard() -> GPTMessageItem: "User requested info from the clipboard but there is nothing in it" ) - return format_message(clip.text()) # type: ignore Unclear why this is not narrowing the type + return format_message(clip.text(), True) # type: ignore Unclear why this is not narrowing the type def send_request( From 58f6e5e2670e6415d17ecd6e3b48058c926f7a73 Mon Sep 17 00:00:00 2001 From: Joshua Aresty Date: Sun, 11 Aug 2024 00:53:48 -0700 Subject: [PATCH 2/2] Also wrapped custom command text with delimiters --- GPT/gpt.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/GPT/gpt.py b/GPT/gpt.py index bc5d1f23..80638efb 100644 --- a/GPT/gpt.py +++ b/GPT/gpt.py @@ -50,7 +50,7 @@ def gpt_blend(source_text: str, destination_text: str): Please return only the final text. What follows is all of the source texts separated by '---'. """ - result = gpt_query(format_message(prompt), format_message(source_text)) + result = gpt_query(format_message(prompt), format_message(source_text, True)) actions.user.gpt_insert_response(extract_message(result), "paste") def gpt_blend_list(source_text: list[str], destination_text: str): @@ -70,7 +70,9 @@ def gpt_generate_shell(text_to_process: str) -> str: Condense the code into a single line such that it can be ran in the terminal. """ - result = gpt_query(format_message(prompt), format_message(text_to_process)) + result = gpt_query( + format_message(prompt), format_message(text_to_process, True) + ) return result.get("text", "") def gpt_generate_sql(text_to_process: str) -> str: @@ -82,9 +84,9 @@ def gpt_generate_sql(text_to_process: str) -> str: Do not output comments, backticks, or natural language explanations. Prioritize SQL queries that are database agnostic. """ - return gpt_query(format_message(prompt), format_message(text_to_process)).get( - "text", "" - ) + return gpt_query( + format_message(prompt), format_message(text_to_process, True) + ).get("text", "") def gpt_clear_context(): """Reset the stored context""" @@ -154,7 +156,7 @@ def gpt_run_prompt(destination: str, prompt: str, source: str) -> str: # Handle special cases in the prompt ### Ask is a special case, where the text to process is the prompted question, not selected text if prompt.startswith("ask"): - text_to_process = format_message(prompt.removeprefix("ask")) + text_to_process = format_message(prompt.removeprefix("ask"), True) prompt = "Generate text that satisfies the question or request given in the input." response = gpt_query(format_message(prompt), text_to_process, destination) @@ -184,7 +186,7 @@ def gpt_reformat_last(how_to_reformat: str): last_output = actions.user.get_last_phrase() if last_output: actions.user.clear_last_phrase() - return gpt_query(format_message(PROMPT), format_message(last_output)) + return gpt_query(format_message(PROMPT), format_message(last_output, True)) else: notify("No text to reformat") raise Exception("No text to reformat")