diff --git a/GPT/beta-commands/beta-gpt.talon b/GPT/beta-commands/beta-gpt.talon index f12e0007..0c033530 100644 --- a/GPT/beta-commands/beta-gpt.talon +++ b/GPT/beta-commands/beta-gpt.talon @@ -11,3 +11,7 @@ model blend clip: destination_text = edit.selected_text() result = user.gpt_blend(clipboard_text, destination_text) user.gpt_insert_response(result, "") + +# Pass the raw text of a prompt to a destination without actually calling GPT with it +model pass [{user.modelDestination}]: + user.gpt_insert_response(modelPrompt, modelDestination or "") diff --git a/GPT/gpt.py b/GPT/gpt.py index 955a4009..0084af50 100644 --- a/GPT/gpt.py +++ b/GPT/gpt.py @@ -154,6 +154,11 @@ def gpt_apply_prompt( else text_to_process ) + # Apply modifiers to prompt before handling special cases + match modifier: + case "snip": + prompt += "\n\nPlease return the response as a textmate snippet for insertion into an editor with placeholders that the user should edit. Return just the snippet content - no XML and no heading." + # Ask is a special case, where the text to process is the prompted question, not the selected text if prompt.startswith("ask"): text_to_process = prompt.removeprefix("ask") @@ -162,10 +167,6 @@ def gpt_apply_prompt( elif prompt == "pass": return text_to_process - match modifier: - case "snip": - prompt += "\n\nPlease return the response as a textmate snippet for insertion into an editor with placeholders that the user should edit. Return just the snippet content - no XML and no heading." - return gpt_query(prompt, text_to_process) def gpt_help(): diff --git a/GPT/gpt.talon b/GPT/gpt.talon index c2b42a7d..00637ca2 100644 --- a/GPT/gpt.talon +++ b/GPT/gpt.talon @@ -10,21 +10,24 @@ model [{user.modelSource}] [{user.modelDestination}]: result = user.gpt_apply_prompt(modelPrompt, text) user.gpt_insert_response(result, modelDestination or "") -# Modifies a model command to be inserted as a snippet instead of a standard paste +# Select the last GPT response so you can edit it further +model take response: user.gpt_select_last() + +# Modifies a model command to be inserted as a snippet for VSCode instead of a standard paste # Otherwise same grammar as standard `model` command model snip [{user.modelSource}] [{user.modelDestination}]: text = user.gpt_get_source_text(modelSource or "") result = user.gpt_apply_prompt(modelPrompt, text, "snip") user.gpt_insert_response(result, modelDestination or "", "snip") +# Modifies a model comand to always insert with the text selected +# Useful for chaining together prompts immediately after they return +# Otherwise same grammar as standard `model` command model chain [{user.modelSource}] [{user.modelDestination}]: text = user.gpt_get_source_text(modelSource or "") result = user.gpt_apply_prompt(modelPrompt, text) user.gpt_insert_response(result, modelDestination or "", "chain") -# Select the last GPT response so you can edit it further -model take response: user.gpt_select_last() - # Applies an arbitrary prompt from the clipboard to selected text and pastes the result. # Useful for applying complex/custom prompts that need to be drafted in a text editor. model apply [from] clip$: