Skip to content

Commit

Permalink
✨ feat: Add 'explain prompt' functionality to AI tools
Browse files Browse the repository at this point in the history
- Introduced new prompt 'explain prompt' to analyze and explain how a given prompt was understood to generate a response.
- Replaced `thread_to_string` with `chats_to_string` to improve clarity in handling chat threads.
- Added commands `gptRequest` and `gptExchange` to support retrieval of previous requests and exchanges.
- Updated `send_request` to store the last request and response in `GPTState` for better context management.
- Removed redundant `GPTState.last_response` assignment in `gpt_query`.

These changes enhance the ability to run 'explain prompt' and understand prompt-response mechanics.
  • Loading branch information
jaresty committed Aug 15, 2024
1 parent afb96b2 commit 9c7e7a6
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 7 deletions.
11 changes: 8 additions & 3 deletions GPT/gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
messages_to_string,
notify,
send_request,
thread_to_string,
chats_to_string,
)
from ..lib.modelState import GPTState
from ..lib.modelTypes import GPTMessageItem
Expand All @@ -36,7 +36,6 @@ def gpt_query(
GPTState.last_was_pasted = False

response = send_request(prompt, text_to_process, None, destination)
GPTState.last_response = extract_message(response)
return response


Expand Down Expand Up @@ -313,13 +312,19 @@ def gpt_get_source_text(spoken_text: str) -> GPTMessageItem:
return format_message(messages_to_string(GPTState.context))
case "thread":
# TODO: Do we want to throw an exception here if the thread is empty?
return format_message(thread_to_string(GPTState.thread))
return format_message(chats_to_string(GPTState.thread))
case "gptResponse":
if GPTState.last_response == "":
raise Exception(
"GPT Failure: User applied a prompt to the phrase GPT response, but there was no GPT response stored"
)
return format_message(GPTState.last_response)
case "gptRequest":
return format_message(GPTState.last_request)
case "gptExchange":
return format_message(
GPTState.last_request + "\n\nassistant\n\n" + GPTState.last_response
)

case "lastTalonDictation":
last_output = actions.user.get_last_phrase()
Expand Down
6 changes: 6 additions & 0 deletions GPT/lists/modelSource.talon-list
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@ thread: thread
# Apply the prompt to the currently selected text
this: selection

# Apply the prompt to the previous request
request: gptRequest

# Apply the prompt to the previous request and response
exchange: gptExchange

# Apply the prompt to the previously returned GPT response
response: gptResponse

Expand Down
3 changes: 3 additions & 0 deletions GPT/lists/staticPrompt.talon-list
Original file line number Diff line number Diff line change
Expand Up @@ -54,3 +54,6 @@ improve semantics: The following is an HTML document. Keep the same structure an
add questions: Help me explore this question from multiple perspectives. For each perspective, ask follow-up questions and indicate what perspective is being taken.
format outline: Create an outline that encapsulates the text below. Keep the number of sections between three and five to optimize for human working memory. Return just the outline.
format prose: As an editor, format the following outline or summarization as prose. You can have headings and paragraphs. Avoid using bullet points. Reorder and add transitions as necessary to make the document flow. Return just the text.

## AI TOOLS
explain prompt: Analyze the provided prompt and response. Explain how the prompt was understood to generate the given response. Provide only the explanation.
13 changes: 9 additions & 4 deletions lib/modelHelpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def messages_to_string(messages: list[GPTMessageItem]) -> str:
return "\n\n".join(formatted_messages)


def thread_to_string(chats: list[GPTMessage]) -> str:
def chats_to_string(chats: list[GPTMessage]) -> str:
"""Format thread as a string"""
formatted_messages = []
for chat in chats:
Expand Down Expand Up @@ -163,12 +163,15 @@ def send_request(
"content": content,
}

data = {
"messages": [
messages = (
[
format_messages("system", system_messages),
]
+ GPTState.thread
+ [current_request],
+ [current_request]
)
data = {
"messages": messages,
"max_tokens": 2024,
"temperature": settings.get("user.model_temperature"),
"n": 1,
Expand All @@ -186,6 +189,8 @@ def send_request(
notify("GPT Task Completed")
resp = raw_response.json()["choices"][0]["message"]["content"].strip()
formatted_resp = strip_markdown(resp)
GPTState.last_request = chats_to_string(messages)
GPTState.last_response = formatted_resp
response = format_message(formatted_resp)
case _:
notify("GPT Failure: Check the Talon Log")
Expand Down
1 change: 1 addition & 0 deletions lib/modelState.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
class GPTState:
text_to_confirm: ClassVar[str] = ""
last_response: ClassVar[str] = ""
last_request: ClassVar[str] = ""
last_was_pasted: ClassVar[bool] = False
context: ClassVar[list[GPTMessageItem]] = []
thread: ClassVar[list[GPTMessage]] = []
Expand Down

0 comments on commit 9c7e7a6

Please sign in to comment.