From 9c7e7a665e6c655764144a62e886ff1a297db79f Mon Sep 17 00:00:00 2001 From: Joshua Aresty Date: Wed, 14 Aug 2024 18:01:40 -0700 Subject: [PATCH] :sparkles: feat: Add 'explain prompt' functionality to AI tools - Introduced new prompt 'explain prompt' to analyze and explain how a given prompt was understood to generate a response. - Replaced `thread_to_string` with `chats_to_string` to improve clarity in handling chat threads. - Added commands `gptRequest` and `gptExchange` to support retrieval of previous requests and exchanges. - Updated `send_request` to store the last request and response in `GPTState` for better context management. - Removed redundant `GPTState.last_response` assignment in `gpt_query`. These changes enhance the ability to run 'explain prompt' and understand prompt-response mechanics. --- GPT/gpt.py | 11 ++++++++--- GPT/lists/modelSource.talon-list | 6 ++++++ GPT/lists/staticPrompt.talon-list | 3 +++ lib/modelHelpers.py | 13 +++++++++---- lib/modelState.py | 1 + 5 files changed, 27 insertions(+), 7 deletions(-) diff --git a/GPT/gpt.py b/GPT/gpt.py index c32906c1..62ed5402 100644 --- a/GPT/gpt.py +++ b/GPT/gpt.py @@ -13,7 +13,7 @@ messages_to_string, notify, send_request, - thread_to_string, + chats_to_string, ) from ..lib.modelState import GPTState from ..lib.modelTypes import GPTMessageItem @@ -36,7 +36,6 @@ def gpt_query( GPTState.last_was_pasted = False response = send_request(prompt, text_to_process, None, destination) - GPTState.last_response = extract_message(response) return response @@ -313,13 +312,19 @@ def gpt_get_source_text(spoken_text: str) -> GPTMessageItem: return format_message(messages_to_string(GPTState.context)) case "thread": # TODO: Do we want to throw an exception here if the thread is empty? - return format_message(thread_to_string(GPTState.thread)) + return format_message(chats_to_string(GPTState.thread)) case "gptResponse": if GPTState.last_response == "": raise Exception( "GPT Failure: User applied a prompt to the phrase GPT response, but there was no GPT response stored" ) return format_message(GPTState.last_response) + case "gptRequest": + return format_message(GPTState.last_request) + case "gptExchange": + return format_message( + GPTState.last_request + "\n\nassistant\n\n" + GPTState.last_response + ) case "lastTalonDictation": last_output = actions.user.get_last_phrase() diff --git a/GPT/lists/modelSource.talon-list b/GPT/lists/modelSource.talon-list index f8a21279..b3939cd5 100644 --- a/GPT/lists/modelSource.talon-list +++ b/GPT/lists/modelSource.talon-list @@ -15,6 +15,12 @@ thread: thread # Apply the prompt to the currently selected text this: selection +# Apply the prompt to the previous request +request: gptRequest + +# Apply the prompt to the previous request and response +exchange: gptExchange + # Apply the prompt to the previously returned GPT response response: gptResponse diff --git a/GPT/lists/staticPrompt.talon-list b/GPT/lists/staticPrompt.talon-list index 08fde8e0..b067c83d 100644 --- a/GPT/lists/staticPrompt.talon-list +++ b/GPT/lists/staticPrompt.talon-list @@ -54,3 +54,6 @@ improve semantics: The following is an HTML document. Keep the same structure an add questions: Help me explore this question from multiple perspectives. For each perspective, ask follow-up questions and indicate what perspective is being taken. format outline: Create an outline that encapsulates the text below. Keep the number of sections between three and five to optimize for human working memory. Return just the outline. format prose: As an editor, format the following outline or summarization as prose. You can have headings and paragraphs. Avoid using bullet points. Reorder and add transitions as necessary to make the document flow. Return just the text. + +## AI TOOLS +explain prompt: Analyze the provided prompt and response. Explain how the prompt was understood to generate the given response. Provide only the explanation. diff --git a/lib/modelHelpers.py b/lib/modelHelpers.py index b111ee20..47def25c 100644 --- a/lib/modelHelpers.py +++ b/lib/modelHelpers.py @@ -26,7 +26,7 @@ def messages_to_string(messages: list[GPTMessageItem]) -> str: return "\n\n".join(formatted_messages) -def thread_to_string(chats: list[GPTMessage]) -> str: +def chats_to_string(chats: list[GPTMessage]) -> str: """Format thread as a string""" formatted_messages = [] for chat in chats: @@ -163,12 +163,15 @@ def send_request( "content": content, } - data = { - "messages": [ + messages = ( + [ format_messages("system", system_messages), ] + GPTState.thread - + [current_request], + + [current_request] + ) + data = { + "messages": messages, "max_tokens": 2024, "temperature": settings.get("user.model_temperature"), "n": 1, @@ -186,6 +189,8 @@ def send_request( notify("GPT Task Completed") resp = raw_response.json()["choices"][0]["message"]["content"].strip() formatted_resp = strip_markdown(resp) + GPTState.last_request = chats_to_string(messages) + GPTState.last_response = formatted_resp response = format_message(formatted_resp) case _: notify("GPT Failure: Check the Talon Log") diff --git a/lib/modelState.py b/lib/modelState.py index ec5fb2a7..da53198d 100644 --- a/lib/modelState.py +++ b/lib/modelState.py @@ -8,6 +8,7 @@ class GPTState: text_to_confirm: ClassVar[str] = "" last_response: ClassVar[str] = "" + last_request: ClassVar[str] = "" last_was_pasted: ClassVar[bool] = False context: ClassVar[list[GPTMessageItem]] = [] thread: ClassVar[list[GPTMessage]] = []