From 1a0fa98796f847e5ab492cfca250d20538ec24f3 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 15:49:26 -0400 Subject: [PATCH 01/57] Create zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 77 +++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 zsh-llm-suggestions-ollama.py diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py new file mode 100644 index 0000000..be20182 --- /dev/null +++ b/zsh-llm-suggestions-ollama.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +import sys +import subprocess +import json +import os + +MISSING_PREREQUISITES = "zsh-llm-suggestions missing prerequisites:" + +def highlight_explanation(explanation): + try: + import pygments + from pygments.lexers import MarkdownLexer + from pygments.formatters import TerminalFormatter + return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) + except ImportError: + print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') + return explanation # Return unhighlighted text if pygments is not installed + +def send_request(prompt, explain=False): + model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') + data = json.dumps({ + "model": model, + "prompt": prompt, + "stream": False + }) + try: + response = subprocess.run( + ["curl", "-XPOST", "localhost:11434/api/generate", "-d", data], + capture_output=True, + text=True, + timeout=60 + ) + if response.stdout: + # Parse the JSON response + json_response = json.loads(response.stdout) + # Extract the 'response' field from the JSON object + command_response = json_response.get('response', 'No response received.') + return command_response + else: + return "No response received." + except subprocess.TimeoutExpired: + return "Request timed out. Please try again." + except json.JSONDecodeError: + return "Failed to decode the response. Please check the API response format." + except Exception as e: + return f"Error: {str(e)}" + +def zsh_llm_suggestions_ollama(prompt): + try: + print("Processing your request...") + result = send_request(prompt) + return result + except Exception as e: + print(f"Error: {e}") + return "" + +def main(): + mode = sys.argv[1] + if mode != 'generate' and mode != 'explain': + print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) + return + + buffer = sys.stdin.read() + if mode == 'generate': + message = f"You are a ZSH shell expert. Please write a ZSH command that solves my query. You should only output the completed command. Do not include any explanation at all. The query follows: '{buffer}'" + elif mode == 'explain': + message = f"You are a ZSH shell expert. Please briefly explain how the given command works. Be as concise as possible. Use Markdown syntax for formatting. The command follows: '{buffer}'" + result = zsh_llm_suggestions_ollama(message) + + if mode == 'generate': + result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() + print(result) + elif mode == 'explain': + print(highlight_explanation(result)) + +if __name__ == '__main__': + main() From d3623748f4af028f09f115bf0d58e9355d16b939 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 15:51:03 -0400 Subject: [PATCH 02/57] Create zsh-llm-suggestions.plugin.zsh --- zsh-llm-suggestions.plugin.zsh | 1 + 1 file changed, 1 insertion(+) create mode 100644 zsh-llm-suggestions.plugin.zsh diff --git a/zsh-llm-suggestions.plugin.zsh b/zsh-llm-suggestions.plugin.zsh new file mode 100644 index 0000000..a9975ae --- /dev/null +++ b/zsh-llm-suggestions.plugin.zsh @@ -0,0 +1 @@ +source ${0:A:h}/zsh-llm-suggestions.zsh From ce2fbe38ed94b679705ce748275eaec19a292b56 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 17:01:48 -0400 Subject: [PATCH 03/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index be20182..56847a8 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -16,7 +16,7 @@ def highlight_explanation(explanation): print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') return explanation # Return unhighlighted text if pygments is not installed -def send_request(prompt, explain=False): +def send_request(prompt): model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') data = json.dumps({ "model": model, From 6baaa9995ac18dc5b53a61646d7235f0a2264070 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 17:04:49 -0400 Subject: [PATCH 04/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index cfcb0dc..73975e8 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -94,7 +94,22 @@ zsh_llm_suggestions_github_copilot_explain() { zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "explain" } +zsh_llm_suggestions_ollama() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "generate" +} + +zsh_llm_suggestions_ollama_explain() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "explain" +} + +zsh_llm_suggestions_ollama_freestyle() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "freestyle" +} + zle -N zsh_llm_suggestions_openai zle -N zsh_llm_suggestions_openai_explain zle -N zsh_llm_suggestions_github_copilot zle -N zsh_llm_suggestions_github_copilot_explain +zle -N zsh_llm_suggestions_ollama +zle -N zsh_llm_suggestions_ollama_explain +zle -N zsh_llm_suggestions_ollama_freestyle From 6fc92ea66c6c6cd25fc454514a93127fcbdc1a2d Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 17:06:20 -0400 Subject: [PATCH 05/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 56847a8..e555fd6 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -25,7 +25,7 @@ def send_request(prompt): }) try: response = subprocess.run( - ["curl", "-XPOST", "localhost:11434/api/generate", "-d", data], + ["curl", "-XPOST", "localhost:11434/api/generate", "-H", "Content-Type: application/json", "-d", data], capture_output=True, text=True, timeout=60 @@ -56,22 +56,26 @@ def zsh_llm_suggestions_ollama(prompt): def main(): mode = sys.argv[1] - if mode != 'generate' and mode != 'explain': + if mode != 'generate' and mode != 'explain' and mode != 'freestyle': print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) return - + print("Mode " + mode) buffer = sys.stdin.read() if mode == 'generate': message = f"You are a ZSH shell expert. Please write a ZSH command that solves my query. You should only output the completed command. Do not include any explanation at all. The query follows: '{buffer}'" elif mode == 'explain': message = f"You are a ZSH shell expert. Please briefly explain how the given command works. Be as concise as possible. Use Markdown syntax for formatting. The command follows: '{buffer}'" + elif mode == 'freestyle': + message = f"{buffer}" result = zsh_llm_suggestions_ollama(message) - + if mode == 'generate': result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() print(result) elif mode == 'explain': print(highlight_explanation(result)) + elif mode == 'freestyle': + print(result) if __name__ == '__main__': main() From 8334f6abd1fba4892b5562ab39091e507cecb152 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 17:56:05 -0400 Subject: [PATCH 06/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index e555fd6..dbd4c29 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -59,16 +59,14 @@ def main(): if mode != 'generate' and mode != 'explain' and mode != 'freestyle': print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) return - print("Mode " + mode) buffer = sys.stdin.read() if mode == 'generate': message = f"You are a ZSH shell expert. Please write a ZSH command that solves my query. You should only output the completed command. Do not include any explanation at all. The query follows: '{buffer}'" elif mode == 'explain': message = f"You are a ZSH shell expert. Please briefly explain how the given command works. Be as concise as possible. Use Markdown syntax for formatting. The command follows: '{buffer}'" elif mode == 'freestyle': - message = f"{buffer}" + message = buffer result = zsh_llm_suggestions_ollama(message) - if mode == 'generate': result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() print(result) From 50b12597528206ca06d8cb14bcb6525c3aac5fc4 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 17:56:53 -0400 Subject: [PATCH 07/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 73975e8..24539d1 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -74,6 +74,13 @@ zsh_llm_completion() { echo "" zle reset-prompt fi + if [[ "$mode" == "freestyle" ]]; then + echo "" + eval "cat $result_file" + echo "" + zle reset-prompt + fi + } SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) From 3b09a3a6de985d0518f2d31cbf791dfddc11ea3f Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 18:04:37 -0400 Subject: [PATCH 08/57] Update README.md --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6d6dc59..336241a 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,9 @@ bindkey '^o' zsh_llm_suggestions_openai # Ctrl + O to have OpenAI suggest a comm bindkey '^[^o' zsh_llm_suggestions_openai_explain # Ctrl + alt + O to have OpenAI explain a command bindkey '^p' zsh_llm_suggestions_github_copilot # Ctrl + P to have GitHub Copilot suggest a command given a English description bindkey '^[^p' zsh_llm_suggestions_github_copilot_explain # Ctrl + alt + P to have GitHub Copilot explain a command +bindkey '^q' zsh_llm_suggestions_ollama # Ctrl + P to have Ollama suggest a command given a English description +bindkey '^[^q' zsh_llm_suggestions_ollama_explain # Ctrl + alt + P to have Ollama explain a command +bindkey '^r' zsh_llm_suggestions_ollama_freestyle # Ctrl + P to send a regular prompt to Ollama ``` Make sure `python3` is installed. @@ -83,6 +86,10 @@ If you typed a command (or maybe the LLM generated one) that you don't understan ctrl+alt+O to have OpenAI explain the command in English, or hit ctrl+alt+P to have GitHub Copilot explain it. +### Freestyle + +Send a regular good old LLM prompt to Ollama. + ## Warning There are some risks using `zsh-llm-suggestions`: @@ -92,6 +99,7 @@ There are some risks using `zsh-llm-suggestions`: ## Supported LLMs -Right now, two LLMs are supported: +Right now, three LLMs are supported: 1. GitHub Copilot (via GitHub CLI). Requires a GitHub Copilot subscription. 2. OpenAI. Requires an OpenAI API key. Currently uses `gpt-4-1106-preview`. +3. Ollama - can use any model From 038b71e87087176da2adf91a5a23767e5a7bb6fe Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 18:28:27 -0400 Subject: [PATCH 09/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 24539d1..7fe7c4c 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -74,13 +74,17 @@ zsh_llm_completion() { echo "" zle reset-prompt fi - if [[ "$mode" == "freestyle" ]]; then + if [[ "$mode" == "freestyle" ]]; then + # Clear the current line + BUFFER="" + # Optionally, you might want to reset the cursor position + CURSOR=0 + # Now, display the content from the result file and reset the prompt echo "" - eval "cat $result_file" + eval "cat '$result_file'" echo "" - zle reset-prompt + zle reset-prompt fi fi - } SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) From eeeffdc72b2b7715c44b3641363dfc5b4ac10b27 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 19:36:18 -0400 Subject: [PATCH 10/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index dbd4c29..c9290ff 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -17,6 +17,7 @@ def highlight_explanation(explanation): return explanation # Return unhighlighted text if pygments is not installed def send_request(prompt): + server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') data = json.dumps({ "model": model, @@ -25,7 +26,7 @@ def send_request(prompt): }) try: response = subprocess.run( - ["curl", "-XPOST", "localhost:11434/api/generate", "-H", "Content-Type: application/json", "-d", data], + ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", data], capture_output=True, text=True, timeout=60 @@ -45,6 +46,7 @@ def send_request(prompt): except Exception as e: return f"Error: {str(e)}" + def zsh_llm_suggestions_ollama(prompt): try: print("Processing your request...") From fc521ffcf6ebd5ccbc37d97a07b824c3d370ac59 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 19:52:59 -0400 Subject: [PATCH 11/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 1 - 1 file changed, 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index c9290ff..8b8137b 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -49,7 +49,6 @@ def send_request(prompt): def zsh_llm_suggestions_ollama(prompt): try: - print("Processing your request...") result = send_request(prompt) return result except Exception as e: From 413b299c02cb760df252f7b2d248e32f24a36f52 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 20:45:43 -0400 Subject: [PATCH 12/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 75 ++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 23 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 8b8137b..ca2bf27 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -16,58 +16,87 @@ def highlight_explanation(explanation): print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') return explanation # Return unhighlighted text if pygments is not installed -def send_request(prompt): +def send_request(prompt, system_message=None, context=None): server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') - data = json.dumps({ + data = { "model": model, "prompt": prompt, + "keep_alive": "30m", "stream": False - }) + } + if system_message: + data["system"] = system_message + if context: + data["context"] = context + try: response = subprocess.run( - ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", data], + ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)], capture_output=True, text=True, timeout=60 ) if response.stdout: - # Parse the JSON response json_response = json.loads(response.stdout) - # Extract the 'response' field from the JSON object - command_response = json_response.get('response', 'No response received.') - return command_response + return json_response.get('response', 'No response received.'), json_response.get('context', None) else: - return "No response received." + return "No response received.", None except subprocess.TimeoutExpired: - return "Request timed out. Please try again." + return "Request timed out. Please try again.", None except json.JSONDecodeError: - return "Failed to decode the response. Please check the API response format." + return "Failed to decode the response. Please check the API response format.", None except Exception as e: - return f"Error: {str(e)}" - + return f"Error: {str(e)}", None -def zsh_llm_suggestions_ollama(prompt): +def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): try: - result = send_request(prompt) - return result + result, new_context = send_request(prompt, system_message, context) + return result, new_context except Exception as e: print(f"Error: {e}") - return "" + return "", None def main(): mode = sys.argv[1] - if mode != 'generate' and mode != 'explain' and mode != 'freestyle': + if mode not in ['generate', 'explain', 'freestyle']: print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) return + buffer = sys.stdin.read() + system_message = None if mode == 'generate': - message = f"You are a ZSH shell expert. Please write a ZSH command that solves my query. You should only output the completed command. Do not include any explanation at all. The query follows: '{buffer}'" + system_message = "You are a ZSH shell expert. Please write a ZSH command that solves my query. Do not include any explanation at all." elif mode == 'explain': - message = f"You are a ZSH shell expert. Please briefly explain how the given command works. Be as concise as possible. Use Markdown syntax for formatting. The command follows: '{buffer}'" - elif mode == 'freestyle': - message = buffer - result = zsh_llm_suggestions_ollama(message) + system_message = "You are a ZSH shell expert. Please briefly explain how the given command works. Be as concise as possible. Use Markdown syntax for formatting." + + # Load the previous context + try: + with open(os.path.expanduser('~/.ollama_history'), 'r') as file: + file_contents = file.read().strip() # Read and strip whitespace + if file_contents: # Check if the file is not empty + context = json.loads(file_contents) + else: + context = None # Set context to None if the file is empty + except FileNotFoundError: + context = None # Handle the case where the file does not exist + except json.JSONDecodeError: + print("Failed to decode JSON from context file. It may be corrupt or empty.") + context = None # Reset context if there's an error decoding it + except Exception as e: + print(f"Unexpected error when loading context: {e}") + context = None + + result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) + + # Store the new context + try: + with open(os.path.expanduser('~/.ollama_history'), 'w') as file: + if new_context is not None: + file.write(json.dumps(new_context)) # Assuming new_context needs to be serialized + except Exception as e: + print(f"Error saving context: {e}") + if mode == 'generate': result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() print(result) From 2b44956139bad95ab4c830dfcd0fb0059f25d99a Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 21:05:27 -0400 Subject: [PATCH 13/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index ca2bf27..74543b1 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -99,11 +99,12 @@ def main(): if mode == 'generate': result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() - print(result) + print(result) # Print the result after stripping elif mode == 'explain': - print(highlight_explanation(result)) + explanation = highlight_explanation(result) + print(explanation.strip()) # Strip after processing if necessary elif mode == 'freestyle': - print(result) + print(result.strip()) # Strip before printing if __name__ == '__main__': main() From 21070caffd81eeb076dd1e81c94da220a3d3feed Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 21:38:17 -0400 Subject: [PATCH 14/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 120 +++++++++++++++++++++++++--------- 1 file changed, 90 insertions(+), 30 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 74543b1..a350269 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -3,6 +3,57 @@ import subprocess import json import os +import platform +import distro +import subprocess +import os +import socket +import psutil + +def get_system_load(): + cpu_usage = psutil.cpu_percent(interval=1) + memory_usage = psutil.virtual_memory().percent + return cpu_usage, memory_usage + +def get_shell_version(): + result = subprocess.run(["zsh", "--version"], capture_output=True, text=True) + return result.stdout.strip() + +def is_user_root(): + return os.geteuid() == 0 + +def get_cpu_architecture(): + return platform.machine() + +def get_network_info(): + hostname = socket.gethostname() + ip_address = socket.gethostbyname(hostname) + return hostname, ip_address + +def get_env_vars(): + path = os.getenv('PATH') + home = os.getenv('HOME') + ld_library_path = os.getenv('LD_LIBRARY_PATH') + return path, home, ld_library_path + +def get_current_username(): + return os.environ.get('USER', os.environ.get('USERNAME', 'Unknown User')) + +def get_os_info(): + try: + # This will work on Linux distributions with the distro module installed + os_id = distro.id() + os_version = distro.version() + os_name = distro.name() + return f"{os_name} ({os_id} {os_version})".strip() + except ModuleNotFoundError: + # Fallback for non-Linux platforms + system = platform.system() + version = platform.version() + return f"{system} {version}".strip() + +def filter_non_ascii(text): + return ''.join(char for char in text if ord(char) < 128) MISSING_PREREQUISITES = "zsh-llm-suggestions missing prerequisites:" @@ -65,46 +116,55 @@ def main(): buffer = sys.stdin.read() system_message = None + context = None # Initialize context to None + + os_info = get_os_info() + shell_version = get_shell_version() + user_is_root = is_user_root() + cpu_arch = get_cpu_architecture() + hostname, ip_address = get_network_info() + path, home, ld_library_path = get_env_vars() + username = get_current_username() + cpu_usage, memory_usage = get_system_load() if mode == 'generate': - system_message = "You are a ZSH shell expert. Please write a ZSH command that solves my query. Do not include any explanation at all." + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else 'non-root'} as user {username}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please write a ZSH command that solves my query without any additional explanation." elif mode == 'explain': - system_message = "You are a ZSH shell expert. Please briefly explain how the given command works. Be as concise as possible. Use Markdown syntax for formatting." - - # Load the previous context - try: - with open(os.path.expanduser('~/.ollama_history'), 'r') as file: - file_contents = file.read().strip() # Read and strip whitespace - if file_contents: # Check if the file is not empty - context = json.loads(file_contents) - else: - context = None # Set context to None if the file is empty - except FileNotFoundError: - context = None # Handle the case where the file does not exist - except json.JSONDecodeError: - print("Failed to decode JSON from context file. It may be corrupt or empty.") - context = None # Reset context if there's an error decoding it - except Exception as e: - print(f"Unexpected error when loading context: {e}") - context = None + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else 'non-root'} as user {username}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." + elif mode == 'freestyle': + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else 'non-root'} as user {username}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%." + print(system_message) + # Load the previous context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'r') as file: + file_contents = file.read().strip() + if file_contents: + context = json.loads(file_contents) + except FileNotFoundError: + context = None # Handle the case where the file does not exist + except json.JSONDecodeError: + print("Failed to decode JSON from context file. It may be corrupt or empty.") + context = None + except Exception as e: + print(f"Unexpected error when loading context: {e}") result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) - # Store the new context - try: - with open(os.path.expanduser('~/.ollama_history'), 'w') as file: - if new_context is not None: - file.write(json.dumps(new_context)) # Assuming new_context needs to be serialized - except Exception as e: - print(f"Error saving context: {e}") + if mode == 'freestyle': + # Save the new context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'w') as file: + if new_context is not None: + file.write(json.dumps(new_context)) + except Exception as e: + print(f"Error saving context: {e}") if mode == 'generate': result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() - print(result) # Print the result after stripping + print(result) elif mode == 'explain': - explanation = highlight_explanation(result) - print(explanation.strip()) # Strip after processing if necessary + print(highlight_explanation(result)) elif mode == 'freestyle': - print(result.strip()) # Strip before printing + print(result) if __name__ == '__main__': main() From 03d9c37e049596142732216580efe626c2d9df1a Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 21:44:17 -0400 Subject: [PATCH 15/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index a350269..14ef96f 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -127,12 +127,11 @@ def main(): username = get_current_username() cpu_usage, memory_usage = get_system_load() if mode == 'generate': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else 'non-root'} as user {username}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please write a ZSH command that solves my query without any additional explanation." + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please write a ZSH command that solves my query without any additional explanation." elif mode == 'explain': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else 'non-root'} as user {username}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." elif mode == 'freestyle': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else 'non-root'} as user {username}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%." - print(system_message) + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%." # Load the previous context only for freestyle mode try: with open(os.path.expanduser('~/.ollama_history'), 'r') as file: From f710601a18a1a59328cb79dd367b1b2207a562a6 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 21:48:28 -0400 Subject: [PATCH 16/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 14ef96f..1f8816d 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -131,7 +131,7 @@ def main(): elif mode == 'explain': system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." elif mode == 'freestyle': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%." + # system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%." # Load the previous context only for freestyle mode try: with open(os.path.expanduser('~/.ollama_history'), 'r') as file: From bf65c6dc5c25d0c58d27560bfdd1ee1962aa120c Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 21:52:30 -0400 Subject: [PATCH 17/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 1f8816d..c456082 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -122,16 +122,19 @@ def main(): shell_version = get_shell_version() user_is_root = is_user_root() cpu_arch = get_cpu_architecture() - hostname, ip_address = get_network_info() path, home, ld_library_path = get_env_vars() username = get_current_username() - cpu_usage, memory_usage = get_system_load() + + #Unused + #hostname, ip_address = get_network_info() + #cpu_usage, memory_usage = get_system_load() + #Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}% + if mode == 'generate': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please write a ZSH command that solves my query without any additional explanation." + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please write a ZSH command that solves my query without any additional explanation." elif mode == 'explain': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." elif mode == 'freestyle': - # system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}%." # Load the previous context only for freestyle mode try: with open(os.path.expanduser('~/.ollama_history'), 'r') as file: @@ -147,7 +150,7 @@ def main(): print(f"Unexpected error when loading context: {e}") result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) - + print(system_message) if mode == 'freestyle': # Save the new context only for freestyle mode try: From a4b087cc67593b0d97d989de13bf3abaeb0fce74 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 3 May 2024 21:55:14 -0400 Subject: [PATCH 18/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 1 - 1 file changed, 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index c456082..09963d5 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -150,7 +150,6 @@ def main(): print(f"Unexpected error when loading context: {e}") result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) - print(system_message) if mode == 'freestyle': # Save the new context only for freestyle mode try: From ce82fc0b10377481862d6178d3634394e88e8b2b Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sat, 4 May 2024 01:30:14 -0400 Subject: [PATCH 19/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 296 +++++++++++++++++++++++----------------- 1 file changed, 171 insertions(+), 125 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 7fe7c4c..21686dc 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -1,126 +1,172 @@ - -zsh_llm_suggestions_spinner() { - local pid=$1 - local delay=0.1 - local spinstr='|/-\' - - cleanup() { - kill $pid - echo -ne "\e[?25h" +#!/usr/bin/env python3 +import sys +import subprocess +import json +import os +import platform +import distro +import subprocess +import os +import socket +import psutil + +def get_system_load(): + cpu_usage = psutil.cpu_percent(interval=1) + memory_usage = psutil.virtual_memory().percent + return cpu_usage, memory_usage + +def get_shell_version(): + result = subprocess.run(["zsh", "--version"], capture_output=True, text=True) + return result.stdout.strip() + +def is_user_root(): + return os.geteuid() == 0 + +def get_cpu_architecture(): + return platform.machine() + +def get_network_info(): + hostname = socket.gethostname() + ip_address = socket.gethostbyname(hostname) + return hostname, ip_address + +def get_env_vars(): + path = os.getenv('PATH') + home = os.getenv('HOME') + ld_library_path = os.getenv('LD_LIBRARY_PATH') + return path, home, ld_library_path + +def get_current_username(): + return os.environ.get('USER', os.environ.get('USERNAME', 'Unknown User')) + +def get_os_info(): + try: + # This will work on Linux distributions with the distro module installed + os_id = distro.id() + os_version = distro.version() + os_name = distro.name() + return f"{os_name} ({os_id} {os_version})".strip() + except ModuleNotFoundError: + # Fallback for non-Linux platforms + system = platform.system() + version = platform.version() + return f"{system} {version}".strip() + +def filter_non_ascii(text): + return ''.join(char for char in text if ord(char) < 128) + +MISSING_PREREQUISITES = "zsh-llm-suggestions missing prerequisites:" + +def highlight_explanation(explanation): + try: + import pygments + from pygments.lexers import MarkdownLexer + from pygments.formatters import TerminalFormatter + return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) + except ImportError: + print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') + return explanation # Return unhighlighted text if pygments is not installed + +def send_request(prompt, system_message=None, context=None): + server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') + model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') + data = { + "model": model, + "prompt": prompt, + "keep_alive": "30m", + "stream": False } - trap cleanup SIGINT - - echo -ne "\e[?25l" - while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do - local temp=${spinstr#?} - printf " [%c]" "$spinstr" - local spinstr=$temp${spinstr%"$temp"} - sleep $delay - printf "\b\b\b\b" - done - printf " \b\b\b\b" - - echo -ne "\e[?25h" - trap - SIGINT -} - -zsh_llm_suggestions_run_query() { - local llm="$1" - local query="$2" - local result_file="$3" - local mode="$4" - echo -n "$query" | eval $llm $mode > $result_file -} - -zsh_llm_completion() { - local llm="$1" - local mode="$2" - local query=${BUFFER} - - # Empty prompt, nothing to do - if [[ "$query" == "" ]]; then - return - fi - - # If the prompt is the last suggestions, just get another suggestion for the same query - if [[ "$mode" == "generate" ]]; then - if [[ "$query" == "$ZSH_LLM_SUGGESTIONS_LAST_RESULT" ]]; then - query=$ZSH_LLM_SUGGESTIONS_LAST_QUERY - else - ZSH_LLM_SUGGESTIONS_LAST_QUERY="$query" - fi - fi - - # Temporary file to store the result of the background process - local result_file="/tmp/zsh-llm-suggestions-result" - # Run the actual query in the background (since it's long-running, and so that we can show a spinner) - read < <( zsh_llm_suggestions_run_query $llm $query $result_file $mode & echo $! ) - # Get the PID of the background process - local pid=$REPLY - # Call the spinner function and pass the PID - zsh_llm_suggestions_spinner $pid - - if [[ "$mode" == "generate" ]]; then - # Place the query in the history first - print -s $query - # Replace the current buffer with the result - ZSH_LLM_SUGGESTIONS_LAST_RESULT=$(cat $result_file) - BUFFER="${ZSH_LLM_SUGGESTIONS_LAST_RESULT}" - CURSOR=${#ZSH_LLM_SUGGESTIONS_LAST_RESULT} - fi - if [[ "$mode" == "explain" ]]; then - echo "" - eval "cat $result_file" - echo "" - zle reset-prompt - fi - if [[ "$mode" == "freestyle" ]]; then - # Clear the current line - BUFFER="" - # Optionally, you might want to reset the cursor position - CURSOR=0 - # Now, display the content from the result file and reset the prompt - echo "" - eval "cat '$result_file'" - echo "" - zle reset-prompt fi - fi -} - -SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) - -zsh_llm_suggestions_openai() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "generate" -} - -zsh_llm_suggestions_github_copilot() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "generate" -} - -zsh_llm_suggestions_openai_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "explain" -} - -zsh_llm_suggestions_github_copilot_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "explain" -} - -zsh_llm_suggestions_ollama() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "generate" -} - -zsh_llm_suggestions_ollama_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "explain" -} - -zsh_llm_suggestions_ollama_freestyle() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "freestyle" -} - -zle -N zsh_llm_suggestions_openai -zle -N zsh_llm_suggestions_openai_explain -zle -N zsh_llm_suggestions_github_copilot -zle -N zsh_llm_suggestions_github_copilot_explain -zle -N zsh_llm_suggestions_ollama -zle -N zsh_llm_suggestions_ollama_explain -zle -N zsh_llm_suggestions_ollama_freestyle + if system_message: + data["system"] = system_message + if context: + data["context"] = context + + try: + response = subprocess.run( + ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)], + capture_output=True, + text=True, + timeout=60 + ) + if response.stdout: + json_response = json.loads(response.stdout) + return json_response.get('response', 'No response received.'), json_response.get('context', None) + else: + return "No response received.", None + except subprocess.TimeoutExpired: + return "Request timed out. Please try again.", None + except json.JSONDecodeError: + return "Failed to decode the response. Please check the API response format.", None + except Exception as e: + return f"Error: {str(e)}", None + +def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): + try: + result, new_context = send_request(prompt, system_message, context) + return result, new_context + except Exception as e: + print(f"Error: {e}") + return "", None + +def main(): + mode = sys.argv[1] + if mode not in ['generate', 'explain', 'freestyle']: + print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) + return + + buffer = sys.stdin.read() + system_message = None + context = None # Initialize context to None + + os_info = get_os_info() + shell_version = get_shell_version() + user_is_root = is_user_root() + cpu_arch = get_cpu_architecture() + path, home, ld_library_path = get_env_vars() + username = get_current_username() + + #Unused + #hostname, ip_address = get_network_info() + #cpu_usage, memory_usage = get_system_load() + #Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}% + + if mode == 'generate': + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please write a ZSH command that solves my query without any additional explanation." + elif mode == 'explain': + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." + elif mode == 'freestyle': + # Load the previous context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'r') as file: + file_contents = file.read().strip() + if file_contents: + context = json.loads(file_contents) + except FileNotFoundError: + context = None # Handle the case where the file does not exist + except json.JSONDecodeError: + print("Failed to decode JSON from context file. It may be corrupt or empty.") + context = None + except Exception as e: + print(f"Unexpected error when loading context: {e}") + + result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) + result=filter_non_ascii(result) + if mode == 'freestyle': + # Save the new context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'w') as file: + if new_context is not None: + file.write(json.dumps(new_context)) + except Exception as e: + print(f"Error saving context: {e}") + + if mode == 'generate': + result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() + print(result) + elif mode == 'explain': + print(highlight_explanation(result)) + elif mode == 'freestyle': + print(result) + +if __name__ == '__main__': + main() From 2ab16a0d5f9e14052bd608f3ac3c5fab67f044ac Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sat, 4 May 2024 01:33:29 -0400 Subject: [PATCH 20/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 1 + 1 file changed, 1 insertion(+) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 21686dc..3b64611 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -151,6 +151,7 @@ def main(): result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) result=filter_non_ascii(result) + new_context=filter_non_ascii(new_context) if mode == 'freestyle': # Save the new context only for freestyle mode try: From 86fb8d661165341af37bc75b1a929d075a6308bc Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sat, 4 May 2024 01:34:58 -0400 Subject: [PATCH 21/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 1 - 1 file changed, 1 deletion(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 3b64611..21686dc 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -151,7 +151,6 @@ def main(): result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) result=filter_non_ascii(result) - new_context=filter_non_ascii(new_context) if mode == 'freestyle': # Save the new context only for freestyle mode try: From 1e011576c0095dd5f94ff3f57a08c737c7811082 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sat, 4 May 2024 02:49:58 -0400 Subject: [PATCH 22/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 21686dc..951d0d0 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -143,12 +143,13 @@ def main(): context = json.loads(file_contents) except FileNotFoundError: context = None # Handle the case where the file does not exist + #system_message = "" except json.JSONDecodeError: print("Failed to decode JSON from context file. It may be corrupt or empty.") context = None except Exception as e: print(f"Unexpected error when loading context: {e}") - + print(system_message) result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) result=filter_non_ascii(result) if mode == 'freestyle': From e43f89ca3d364280d45ceb4b89326bb6495564dd Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sat, 4 May 2024 02:50:42 -0400 Subject: [PATCH 23/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 1 - 1 file changed, 1 deletion(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 951d0d0..cdce387 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -149,7 +149,6 @@ def main(): context = None except Exception as e: print(f"Unexpected error when loading context: {e}") - print(system_message) result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) result=filter_non_ascii(result) if mode == 'freestyle': From d292c66777cf0e679e32be8a75fd272c0c73db56 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sat, 4 May 2024 03:02:40 -0400 Subject: [PATCH 24/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index cdce387..a69a62b 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -124,6 +124,7 @@ def main(): cpu_arch = get_cpu_architecture() path, home, ld_library_path = get_env_vars() username = get_current_username() + freestyle_system_message = os.environ.get('OLLAMA_FREESTYLE_SYSTEM_MESSAGE') #Unused #hostname, ip_address = get_network_info() @@ -143,7 +144,8 @@ def main(): context = json.loads(file_contents) except FileNotFoundError: context = None # Handle the case where the file does not exist - #system_message = "" + if freestyle_system_message is not None: + system_message = freestyle_system_message except json.JSONDecodeError: print("Failed to decode JSON from context file. It may be corrupt or empty.") context = None From efd18efdad29c385205c3a19fe48d43731be9c80 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sat, 4 May 2024 03:15:03 -0400 Subject: [PATCH 25/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index a69a62b..f1618c2 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -144,7 +144,7 @@ def main(): context = json.loads(file_contents) except FileNotFoundError: context = None # Handle the case where the file does not exist - if freestyle_system_message is not None: + if freestyle_system_message: system_message = freestyle_system_message except json.JSONDecodeError: print("Failed to decode JSON from context file. It may be corrupt or empty.") From 8d65db68338aa20e5834e38ea8ad33dea69a6d92 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Mon, 6 May 2024 01:58:43 -0400 Subject: [PATCH 26/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 298 +++++++++++++++++----------------------- 1 file changed, 125 insertions(+), 173 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index f1618c2..55b87a0 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -1,174 +1,126 @@ -#!/usr/bin/env python3 -import sys -import subprocess -import json -import os -import platform -import distro -import subprocess -import os -import socket -import psutil - -def get_system_load(): - cpu_usage = psutil.cpu_percent(interval=1) - memory_usage = psutil.virtual_memory().percent - return cpu_usage, memory_usage - -def get_shell_version(): - result = subprocess.run(["zsh", "--version"], capture_output=True, text=True) - return result.stdout.strip() - -def is_user_root(): - return os.geteuid() == 0 - -def get_cpu_architecture(): - return platform.machine() - -def get_network_info(): - hostname = socket.gethostname() - ip_address = socket.gethostbyname(hostname) - return hostname, ip_address - -def get_env_vars(): - path = os.getenv('PATH') - home = os.getenv('HOME') - ld_library_path = os.getenv('LD_LIBRARY_PATH') - return path, home, ld_library_path - -def get_current_username(): - return os.environ.get('USER', os.environ.get('USERNAME', 'Unknown User')) - -def get_os_info(): - try: - # This will work on Linux distributions with the distro module installed - os_id = distro.id() - os_version = distro.version() - os_name = distro.name() - return f"{os_name} ({os_id} {os_version})".strip() - except ModuleNotFoundError: - # Fallback for non-Linux platforms - system = platform.system() - version = platform.version() - return f"{system} {version}".strip() - -def filter_non_ascii(text): - return ''.join(char for char in text if ord(char) < 128) - -MISSING_PREREQUISITES = "zsh-llm-suggestions missing prerequisites:" - -def highlight_explanation(explanation): - try: - import pygments - from pygments.lexers import MarkdownLexer - from pygments.formatters import TerminalFormatter - return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) - except ImportError: - print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') - return explanation # Return unhighlighted text if pygments is not installed - -def send_request(prompt, system_message=None, context=None): - server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') - model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') - data = { - "model": model, - "prompt": prompt, - "keep_alive": "30m", - "stream": False + +zsh_llm_suggestions_spinner() { + local pid=$1 + local delay=0.1 + local spinstr='|/-\' + + cleanup() { + kill $pid + echo -ne "\e[?25h" } - if system_message: - data["system"] = system_message - if context: - data["context"] = context - - try: - response = subprocess.run( - ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)], - capture_output=True, - text=True, - timeout=60 - ) - if response.stdout: - json_response = json.loads(response.stdout) - return json_response.get('response', 'No response received.'), json_response.get('context', None) - else: - return "No response received.", None - except subprocess.TimeoutExpired: - return "Request timed out. Please try again.", None - except json.JSONDecodeError: - return "Failed to decode the response. Please check the API response format.", None - except Exception as e: - return f"Error: {str(e)}", None - -def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): - try: - result, new_context = send_request(prompt, system_message, context) - return result, new_context - except Exception as e: - print(f"Error: {e}") - return "", None - -def main(): - mode = sys.argv[1] - if mode not in ['generate', 'explain', 'freestyle']: - print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) - return - - buffer = sys.stdin.read() - system_message = None - context = None # Initialize context to None - - os_info = get_os_info() - shell_version = get_shell_version() - user_is_root = is_user_root() - cpu_arch = get_cpu_architecture() - path, home, ld_library_path = get_env_vars() - username = get_current_username() - freestyle_system_message = os.environ.get('OLLAMA_FREESTYLE_SYSTEM_MESSAGE') - - #Unused - #hostname, ip_address = get_network_info() - #cpu_usage, memory_usage = get_system_load() - #Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}% - - if mode == 'generate': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please write a ZSH command that solves my query without any additional explanation." - elif mode == 'explain': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." - elif mode == 'freestyle': - # Load the previous context only for freestyle mode - try: - with open(os.path.expanduser('~/.ollama_history'), 'r') as file: - file_contents = file.read().strip() - if file_contents: - context = json.loads(file_contents) - except FileNotFoundError: - context = None # Handle the case where the file does not exist - if freestyle_system_message: - system_message = freestyle_system_message - except json.JSONDecodeError: - print("Failed to decode JSON from context file. It may be corrupt or empty.") - context = None - except Exception as e: - print(f"Unexpected error when loading context: {e}") - result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) - result=filter_non_ascii(result) - if mode == 'freestyle': - # Save the new context only for freestyle mode - try: - with open(os.path.expanduser('~/.ollama_history'), 'w') as file: - if new_context is not None: - file.write(json.dumps(new_context)) - except Exception as e: - print(f"Error saving context: {e}") - - if mode == 'generate': - result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() - print(result) - elif mode == 'explain': - print(highlight_explanation(result)) - elif mode == 'freestyle': - print(result) - -if __name__ == '__main__': - main() + trap cleanup SIGINT + + echo -ne "\e[?25l" + while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do + local temp=${spinstr#?} + printf " [%c]" "$spinstr" + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b\b" + done + printf " \b\b\b\b" + + echo -ne "\e[?25h" + trap - SIGINT +} + +zsh_llm_suggestions_run_query() { + local llm="$1" + local query="$2" + local result_file="$3" + local mode="$4" + echo -n "$query" | eval $llm $mode > $result_file +} + +zsh_llm_completion() { + local llm="$1" + local mode="$2" + local query=${BUFFER} + + # Empty prompt, nothing to do + if [[ "$query" == "" ]]; then + return + fi + + # If the prompt is the last suggestions, just get another suggestion for the same query + if [[ "$mode" == "generate" ]]; then + if [[ "$query" == "$ZSH_LLM_SUGGESTIONS_LAST_RESULT" ]]; then + query=$ZSH_LLM_SUGGESTIONS_LAST_QUERY + else + ZSH_LLM_SUGGESTIONS_LAST_QUERY="$query" + fi + fi + + # Temporary file to store the result of the background process + local result_file="/tmp/zsh-llm-suggestions-result" + # Run the actual query in the background (since it's long-running, and so that we can show a spinner) + read < <( zsh_llm_suggestions_run_query $llm $query $result_file $mode & echo $! ) + # Get the PID of the background process + local pid=$REPLY + # Call the spinner function and pass the PID + zsh_llm_suggestions_spinner $pid + + if [[ "$mode" == "generate" ]]; then + # Place the query in the history first + print -s $query + # Replace the current buffer with the result + ZSH_LLM_SUGGESTIONS_LAST_RESULT=$(cat $result_file) + BUFFER="${ZSH_LLM_SUGGESTIONS_LAST_RESULT}" + CURSOR=${#ZSH_LLM_SUGGESTIONS_LAST_RESULT} + fi + if [[ "$mode" == "explain" ]]; then + echo "" + eval "cat $result_file" + echo "" + zle reset-prompt + fi + if [[ "$mode" == "freestyle" ]]; then + # Clear the current line + BUFFER="" + # Optionally, you might want to reset the cursor position + CURSOR=0 + # Now, display the content from the result file and reset the prompt + echo "" + eval "cat '$result_file'" + echo "" + zle reset-prompt + fi +} + +SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) + +zsh_llm_suggestions_openai() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "generate" +} + +zsh_llm_suggestions_github_copilot() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "generate" +} + +zsh_llm_suggestions_openai_explain() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "explain" +} + +zsh_llm_suggestions_github_copilot_explain() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "explain" +} + +zsh_llm_suggestions_ollama() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "generate" +} + +zsh_llm_suggestions_ollama_explain() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "explain" +} + +zsh_llm_suggestions_ollama_freestyle() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "freestyle" +} + +zle -N zsh_llm_suggestions_openai +zle -N zsh_llm_suggestions_openai_explain +zle -N zsh_llm_suggestions_github_copilot +zle -N zsh_llm_suggestions_github_copilot_explain +zle -N zsh_llm_suggestions_ollama +zle -N zsh_llm_suggestions_ollama_explain +zle -N zsh_llm_suggestions_ollama_freestyle From 8b3f688e859d4afaacfce186c6ad46f94b92b81c Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 12 May 2024 01:09:47 -0400 Subject: [PATCH 27/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 09963d5..f1618c2 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -122,9 +122,10 @@ def main(): shell_version = get_shell_version() user_is_root = is_user_root() cpu_arch = get_cpu_architecture() - path, home, ld_library_path = get_env_vars() + path, home, ld_library_path = get_env_vars() username = get_current_username() - + freestyle_system_message = os.environ.get('OLLAMA_FREESTYLE_SYSTEM_MESSAGE') + #Unused #hostname, ip_address = get_network_info() #cpu_usage, memory_usage = get_system_load() @@ -143,13 +144,15 @@ def main(): context = json.loads(file_contents) except FileNotFoundError: context = None # Handle the case where the file does not exist + if freestyle_system_message: + system_message = freestyle_system_message except json.JSONDecodeError: print("Failed to decode JSON from context file. It may be corrupt or empty.") context = None except Exception as e: print(f"Unexpected error when loading context: {e}") - result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) + result=filter_non_ascii(result) if mode == 'freestyle': # Save the new context only for freestyle mode try: From be1ce72424b30cf7100656e09b248f138d598d7f Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 12 May 2024 01:10:10 -0400 Subject: [PATCH 28/57] Update zsh-llm-suggestions.plugin.zsh --- zsh-llm-suggestions.plugin.zsh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/zsh-llm-suggestions.plugin.zsh b/zsh-llm-suggestions.plugin.zsh index a9975ae..947d6e3 100644 --- a/zsh-llm-suggestions.plugin.zsh +++ b/zsh-llm-suggestions.plugin.zsh @@ -1 +1,4 @@ source ${0:A:h}/zsh-llm-suggestions.zsh +bindkey '^o' zsh_llm_suggestions_ollama +bindkey '^[^o' zsh_llm_suggestions_ollama_explain +bindkey '^p' zsh_llm_suggestions_ollama_freestyle From fe60191173c13e11154b07450e7f39a98f5ac007 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 12 May 2024 01:10:33 -0400 Subject: [PATCH 29/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 55b87a0..fb8f42b 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -9,7 +9,7 @@ zsh_llm_suggestions_spinner() { echo -ne "\e[?25h" } trap cleanup SIGINT - + echo -ne "\e[?25l" while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do local temp=${spinstr#?} @@ -59,7 +59,7 @@ zsh_llm_completion() { local pid=$REPLY # Call the spinner function and pass the PID zsh_llm_suggestions_spinner $pid - + if [[ "$mode" == "generate" ]]; then # Place the query in the history first print -s $query From 33662620ad37fc1e99789ea04a26065c28dcfe8b Mon Sep 17 00:00:00 2001 From: p1r473 Date: Mon, 13 May 2024 12:20:54 -0400 Subject: [PATCH 30/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index fb8f42b..14d76dd 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -87,7 +87,7 @@ zsh_llm_completion() { fi } -SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) +local SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) zsh_llm_suggestions_openai() { zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "generate" From cafd6686e46e3a311f17d36af21e83bdc57ab325 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Mon, 13 May 2024 13:31:39 -0400 Subject: [PATCH 31/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 14d76dd..0802d35 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -87,8 +87,6 @@ zsh_llm_completion() { fi } -local SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) - zsh_llm_suggestions_openai() { zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "generate" } @@ -114,7 +112,7 @@ zsh_llm_suggestions_ollama_explain() { } zsh_llm_suggestions_ollama_freestyle() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "freestyle" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "freestyle" } zle -N zsh_llm_suggestions_openai From cfac04c18658e15d3ee2e9892cac87578c627df7 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Tue, 14 May 2024 21:39:40 -0400 Subject: [PATCH 32/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 0802d35..2f3e4ae 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -88,27 +88,27 @@ zsh_llm_completion() { } zsh_llm_suggestions_openai() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "generate" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-openai.py" "generate" } zsh_llm_suggestions_github_copilot() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "generate" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-github-copilot.py" "generate" } zsh_llm_suggestions_openai_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "explain" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-openai.py" "explain" } zsh_llm_suggestions_github_copilot_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "explain" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-github-copilot.py" "explain" } zsh_llm_suggestions_ollama() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "generate" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "generate" } zsh_llm_suggestions_ollama_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "explain" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "explain" } zsh_llm_suggestions_ollama_freestyle() { From 2f8607fb5d0cab38ad55f8642bb8211a387b8c8b Mon Sep 17 00:00:00 2001 From: P1r473 Date: Tue, 14 May 2024 21:40:11 -0400 Subject: [PATCH 33/57] hi --- zsh-llm-suggestions-ollama.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 zsh-llm-suggestions-ollama.py diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py old mode 100644 new mode 100755 From e4def8eb268fe2bbe51b2d5a1e6d9bc77d4d6aa1 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 15 May 2024 04:08:15 -0400 Subject: [PATCH 34/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 307 ++++++++++++++++++++++++---------------- 1 file changed, 184 insertions(+), 123 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 2f3e4ae..0fabbf9 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -1,124 +1,185 @@ - -zsh_llm_suggestions_spinner() { - local pid=$1 - local delay=0.1 - local spinstr='|/-\' - - cleanup() { - kill $pid - echo -ne "\e[?25h" +#!/usr/bin/env python3 + +# User-configurable variables +context_window_size = 4096 +curl_max_time = 3 +curl_connect_timeout = 3 + + +import sys +import subprocess +import json +import os +import platform +import distro +import subprocess +import os +import socket +import psutil + +def get_system_load(): + cpu_usage = psutil.cpu_percent(interval=1) + memory_usage = psutil.virtual_memory().percent + return cpu_usage, memory_usage + +def get_shell_version(): + result = subprocess.run(["zsh", "--version"], capture_output=True, text=True) + return result.stdout.strip() + +def is_user_root(): + return os.geteuid() == 0 + +def get_cpu_architecture(): + return platform.machine() + +def get_network_info(): + hostname = socket.gethostname() + ip_address = socket.gethostbyname(hostname) + return hostname, ip_address + +def get_env_vars(): + path = os.getenv('PATH') + home = os.getenv('HOME') + ld_library_path = os.getenv('LD_LIBRARY_PATH') + return path, home, ld_library_path + +def get_current_username(): + return os.environ.get('USER', os.environ.get('USERNAME', 'Unknown User')) + +def get_os_info(): + try: + # This will work on Linux distributions with the distro module installed + os_id = distro.id() + os_version = distro.version() + os_name = distro.name() + return f"{os_name} ({os_id} {os_version})".strip() + except ModuleNotFoundError: + # Fallback for non-Linux platforms + system = platform.system() + version = platform.version() + return f"{system} {version}".strip() + +def filter_non_ascii(text): + return ''.join(char for char in text if ord(char) < 128) + +MISSING_PREREQUISITES = "zsh-llm-suggestions missing prerequisites:" + +def highlight_explanation(explanation): + try: + import pygments + from pygments.lexers import MarkdownLexer + from pygments.formatters import TerminalFormatter + return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) + except ImportError: + print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') + return explanation # Return unhighlighted text if pygments is not installed + +def send_request(prompt, system_message=None, context=None): + server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') + model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') + data = { + "model": model, + "prompt": prompt, + "keep_alive": "30m", + "stream": False, + "options": { + "num_ctx": context_window_size + } } - trap cleanup SIGINT - - echo -ne "\e[?25l" - while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do - local temp=${spinstr#?} - printf " [%c]" "$spinstr" - local spinstr=$temp${spinstr%"$temp"} - sleep $delay - printf "\b\b\b\b" - done - printf " \b\b\b\b" - - echo -ne "\e[?25h" - trap - SIGINT -} - -zsh_llm_suggestions_run_query() { - local llm="$1" - local query="$2" - local result_file="$3" - local mode="$4" - echo -n "$query" | eval $llm $mode > $result_file -} - -zsh_llm_completion() { - local llm="$1" - local mode="$2" - local query=${BUFFER} - - # Empty prompt, nothing to do - if [[ "$query" == "" ]]; then - return - fi - - # If the prompt is the last suggestions, just get another suggestion for the same query - if [[ "$mode" == "generate" ]]; then - if [[ "$query" == "$ZSH_LLM_SUGGESTIONS_LAST_RESULT" ]]; then - query=$ZSH_LLM_SUGGESTIONS_LAST_QUERY - else - ZSH_LLM_SUGGESTIONS_LAST_QUERY="$query" - fi - fi - - # Temporary file to store the result of the background process - local result_file="/tmp/zsh-llm-suggestions-result" - # Run the actual query in the background (since it's long-running, and so that we can show a spinner) - read < <( zsh_llm_suggestions_run_query $llm $query $result_file $mode & echo $! ) - # Get the PID of the background process - local pid=$REPLY - # Call the spinner function and pass the PID - zsh_llm_suggestions_spinner $pid - - if [[ "$mode" == "generate" ]]; then - # Place the query in the history first - print -s $query - # Replace the current buffer with the result - ZSH_LLM_SUGGESTIONS_LAST_RESULT=$(cat $result_file) - BUFFER="${ZSH_LLM_SUGGESTIONS_LAST_RESULT}" - CURSOR=${#ZSH_LLM_SUGGESTIONS_LAST_RESULT} - fi - if [[ "$mode" == "explain" ]]; then - echo "" - eval "cat $result_file" - echo "" - zle reset-prompt - fi - if [[ "$mode" == "freestyle" ]]; then - # Clear the current line - BUFFER="" - # Optionally, you might want to reset the cursor position - CURSOR=0 - # Now, display the content from the result file and reset the prompt - echo "" - eval "cat '$result_file'" - echo "" - zle reset-prompt - fi -} - -zsh_llm_suggestions_openai() { - zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-openai.py" "generate" -} - -zsh_llm_suggestions_github_copilot() { - zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-github-copilot.py" "generate" -} - -zsh_llm_suggestions_openai_explain() { - zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-openai.py" "explain" -} - -zsh_llm_suggestions_github_copilot_explain() { - zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-github-copilot.py" "explain" -} - -zsh_llm_suggestions_ollama() { - zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "generate" -} - -zsh_llm_suggestions_ollama_explain() { - zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "explain" -} - -zsh_llm_suggestions_ollama_freestyle() { - zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "freestyle" -} - -zle -N zsh_llm_suggestions_openai -zle -N zsh_llm_suggestions_openai_explain -zle -N zsh_llm_suggestions_github_copilot -zle -N zsh_llm_suggestions_github_copilot_explain -zle -N zsh_llm_suggestions_ollama -zle -N zsh_llm_suggestions_ollama_explain -zle -N zsh_llm_suggestions_ollama_freestyle + if system_message: + data["system"] = system_message + if context: + data["context"] = context + + try: + response = subprocess.run( + ["curl", "-sSk", "--ipv4", "--max-time", str(curl_max_time), "--connect-timeout", str(curl_connect_timeout), + "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)], + capture_output=True, + text=True, + timeout=60 + ) + if response.stdout: + json_response = json.loads(response.stdout) + return json_response.get('response', 'No response received.'), json_response.get('context', None) + else: + return "No response received.", None + except subprocess.TimeoutExpired: + return "Request timed out. Please try again.", None + except json.JSONDecodeError: + return "Failed to decode the response. Please check the API response format.", None + except Exception as e: + return f"Error: {str(e)}", None + +def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): + try: + result, new_context = send_request(prompt, system_message, context) + return result, new_context + except Exception as e: + print(f"Error: {e}") + return "", None + +def main(): + mode = sys.argv[1] + if mode not in ['generate', 'explain', 'freestyle']: + print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) + return + + buffer = sys.stdin.read() + system_message = None + context = None # Initialize context to None + + os_info = get_os_info() + shell_version = get_shell_version() + user_is_root = is_user_root() + cpu_arch = get_cpu_architecture() + path, home, ld_library_path = get_env_vars() + username = get_current_username() + freestyle_system_message = os.environ.get('OLLAMA_FREESTYLE_SYSTEM_MESSAGE') + + #Unused + #hostname, ip_address = get_network_info() + #cpu_usage, memory_usage = get_system_load() + #Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}% + + if mode == 'generate': + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please write a ZSH command that solves my query without any additional explanation." + elif mode == 'explain': + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." + elif mode == 'freestyle': + # Load the previous context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'r') as file: + file_contents = file.read().strip() + if file_contents: + context = json.loads(file_contents) + except FileNotFoundError: + context = None # Handle the case where the file does not exist + if freestyle_system_message: + system_message = freestyle_system_message + except json.JSONDecodeError: + print("Failed to decode JSON from context file. It may be corrupt or empty.") + context = None + except Exception as e: + print(f"Unexpected error when loading context: {e}") + result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) + result=filter_non_ascii(result) + if mode == 'freestyle': + # Save the new context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'w') as file: + if new_context is not None: + file.write(json.dumps(new_context)) + except Exception as e: + print(f"Error saving context: {e}") + + if mode == 'generate': + result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() + print(result) + elif mode == 'explain': + print(highlight_explanation(result)) + elif mode == 'freestyle': + print(result) + +if __name__ == '__main__': + main() From 2e12c99c7a157b099123a029d0fbb2e4d35864a5 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 15 May 2024 04:12:37 -0400 Subject: [PATCH 35/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 0fabbf9..d8f25b0 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -91,14 +91,16 @@ def send_request(prompt, system_message=None, context=None): if context: data["context"] = context + curl_command = [ + "curl", "-sSk", "--ipv4", + "--max-time", str(curl_max_time), + "--connect-timeout", str(curl_connect_timeout), + "-XPOST", f"http://{server_address}/api/generate", + "-H", "Content-Type: application/json", + "-d", json.dumps(data) + ] try: - response = subprocess.run( - ["curl", "-sSk", "--ipv4", "--max-time", str(curl_max_time), "--connect-timeout", str(curl_connect_timeout), - "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)], - capture_output=True, - text=True, - timeout=60 - ) + response = subprocess.run(curl_command, capture_output=True, text=True, timeout=60) if response.stdout: json_response = json.loads(response.stdout) return json_response.get('response', 'No response received.'), json_response.get('context', None) From c9d45d0a24ae05f1b722cb1c3829919257231032 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 15 May 2024 04:21:28 -0400 Subject: [PATCH 36/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index d8f25b0..3f9a874 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -5,7 +5,6 @@ context_window_size = 4096 curl_max_time = 3 curl_connect_timeout = 3 - import sys import subprocess import json @@ -99,8 +98,15 @@ def send_request(prompt, system_message=None, context=None): "-H", "Content-Type: application/json", "-d", json.dumps(data) ] + try: - response = subprocess.run(curl_command, capture_output=True, text=True, timeout=60) + response = subprocess.run( + curl_command, + capture_output=True, + text=True, + timeout=60 # Overall timeout for the subprocess.run call + ) + if response.stdout: json_response = json.loads(response.stdout) return json_response.get('response', 'No response received.'), json_response.get('context', None) @@ -113,14 +119,6 @@ def send_request(prompt, system_message=None, context=None): except Exception as e: return f"Error: {str(e)}", None -def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): - try: - result, new_context = send_request(prompt, system_message, context) - return result, new_context - except Exception as e: - print(f"Error: {e}") - return "", None - def main(): mode = sys.argv[1] if mode not in ['generate', 'explain', 'freestyle']: @@ -139,11 +137,6 @@ def main(): username = get_current_username() freestyle_system_message = os.environ.get('OLLAMA_FREESTYLE_SYSTEM_MESSAGE') - #Unused - #hostname, ip_address = get_network_info() - #cpu_usage, memory_usage = get_system_load() - #Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}% - if mode == 'generate': system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please write a ZSH command that solves my query without any additional explanation." elif mode == 'explain': @@ -164,8 +157,15 @@ def main(): context = None except Exception as e: print(f"Unexpected error when loading context: {e}") - result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) - result=filter_non_ascii(result) + + try: + result, new_context = send_request(buffer, system_message, context) + except Exception as e: + print(f"Error: {e}") + result, new_context = "", None + + result = filter_non_ascii(result) + if mode == 'freestyle': # Save the new context only for freestyle mode try: From 55cef4cfff10b8649a07bc7f98317ac25d2cda09 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 15 May 2024 04:24:47 -0400 Subject: [PATCH 37/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 3f9a874..cf436d6 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -2,8 +2,8 @@ # User-configurable variables context_window_size = 4096 -curl_max_time = 3 -curl_connect_timeout = 3 +curl_max_time = 60 +curl_connect_timeout = 60 import sys import subprocess From 6bdac91e1fa3345862d94f53cdba13a9e053ccc2 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 15 May 2024 04:28:23 -0400 Subject: [PATCH 38/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index cf436d6..f5a604d 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # User-configurable variables -context_window_size = 4096 +context_window_size = 8192 curl_max_time = 60 curl_connect_timeout = 60 From 4021da1efa19e392dcbcb5988c1cde466ad9c2a9 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:32:17 -0400 Subject: [PATCH 39/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index f5a604d..9c7538c 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -1,10 +1,5 @@ #!/usr/bin/env python3 -# User-configurable variables -context_window_size = 8192 -curl_max_time = 60 -curl_connect_timeout = 60 - import sys import subprocess import json From 3a4267fa72f9dc5df7de5ec47b65462a47503fbe Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:38:12 -0400 Subject: [PATCH 40/57] Delete zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 174 ---------------------------------- 1 file changed, 174 deletions(-) delete mode 100755 zsh-llm-suggestions-ollama.py diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py deleted file mode 100755 index f1618c2..0000000 --- a/zsh-llm-suggestions-ollama.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env python3 -import sys -import subprocess -import json -import os -import platform -import distro -import subprocess -import os -import socket -import psutil - -def get_system_load(): - cpu_usage = psutil.cpu_percent(interval=1) - memory_usage = psutil.virtual_memory().percent - return cpu_usage, memory_usage - -def get_shell_version(): - result = subprocess.run(["zsh", "--version"], capture_output=True, text=True) - return result.stdout.strip() - -def is_user_root(): - return os.geteuid() == 0 - -def get_cpu_architecture(): - return platform.machine() - -def get_network_info(): - hostname = socket.gethostname() - ip_address = socket.gethostbyname(hostname) - return hostname, ip_address - -def get_env_vars(): - path = os.getenv('PATH') - home = os.getenv('HOME') - ld_library_path = os.getenv('LD_LIBRARY_PATH') - return path, home, ld_library_path - -def get_current_username(): - return os.environ.get('USER', os.environ.get('USERNAME', 'Unknown User')) - -def get_os_info(): - try: - # This will work on Linux distributions with the distro module installed - os_id = distro.id() - os_version = distro.version() - os_name = distro.name() - return f"{os_name} ({os_id} {os_version})".strip() - except ModuleNotFoundError: - # Fallback for non-Linux platforms - system = platform.system() - version = platform.version() - return f"{system} {version}".strip() - -def filter_non_ascii(text): - return ''.join(char for char in text if ord(char) < 128) - -MISSING_PREREQUISITES = "zsh-llm-suggestions missing prerequisites:" - -def highlight_explanation(explanation): - try: - import pygments - from pygments.lexers import MarkdownLexer - from pygments.formatters import TerminalFormatter - return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) - except ImportError: - print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') - return explanation # Return unhighlighted text if pygments is not installed - -def send_request(prompt, system_message=None, context=None): - server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') - model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') - data = { - "model": model, - "prompt": prompt, - "keep_alive": "30m", - "stream": False - } - if system_message: - data["system"] = system_message - if context: - data["context"] = context - - try: - response = subprocess.run( - ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)], - capture_output=True, - text=True, - timeout=60 - ) - if response.stdout: - json_response = json.loads(response.stdout) - return json_response.get('response', 'No response received.'), json_response.get('context', None) - else: - return "No response received.", None - except subprocess.TimeoutExpired: - return "Request timed out. Please try again.", None - except json.JSONDecodeError: - return "Failed to decode the response. Please check the API response format.", None - except Exception as e: - return f"Error: {str(e)}", None - -def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): - try: - result, new_context = send_request(prompt, system_message, context) - return result, new_context - except Exception as e: - print(f"Error: {e}") - return "", None - -def main(): - mode = sys.argv[1] - if mode not in ['generate', 'explain', 'freestyle']: - print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) - return - - buffer = sys.stdin.read() - system_message = None - context = None # Initialize context to None - - os_info = get_os_info() - shell_version = get_shell_version() - user_is_root = is_user_root() - cpu_arch = get_cpu_architecture() - path, home, ld_library_path = get_env_vars() - username = get_current_username() - freestyle_system_message = os.environ.get('OLLAMA_FREESTYLE_SYSTEM_MESSAGE') - - #Unused - #hostname, ip_address = get_network_info() - #cpu_usage, memory_usage = get_system_load() - #Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}% - - if mode == 'generate': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please write a ZSH command that solves my query without any additional explanation." - elif mode == 'explain': - system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." - elif mode == 'freestyle': - # Load the previous context only for freestyle mode - try: - with open(os.path.expanduser('~/.ollama_history'), 'r') as file: - file_contents = file.read().strip() - if file_contents: - context = json.loads(file_contents) - except FileNotFoundError: - context = None # Handle the case where the file does not exist - if freestyle_system_message: - system_message = freestyle_system_message - except json.JSONDecodeError: - print("Failed to decode JSON from context file. It may be corrupt or empty.") - context = None - except Exception as e: - print(f"Unexpected error when loading context: {e}") - result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) - result=filter_non_ascii(result) - if mode == 'freestyle': - # Save the new context only for freestyle mode - try: - with open(os.path.expanduser('~/.ollama_history'), 'w') as file: - if new_context is not None: - file.write(json.dumps(new_context)) - except Exception as e: - print(f"Error saving context: {e}") - - if mode == 'generate': - result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() - print(result) - elif mode == 'explain': - print(highlight_explanation(result)) - elif mode == 'freestyle': - print(result) - -if __name__ == '__main__': - main() From 0710612e31630dfc92b491462266c803ef99c62e Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:39:04 -0400 Subject: [PATCH 41/57] Create zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 174 ++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 zsh-llm-suggestions-ollama.py diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py new file mode 100644 index 0000000..f1618c2 --- /dev/null +++ b/zsh-llm-suggestions-ollama.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 +import sys +import subprocess +import json +import os +import platform +import distro +import subprocess +import os +import socket +import psutil + +def get_system_load(): + cpu_usage = psutil.cpu_percent(interval=1) + memory_usage = psutil.virtual_memory().percent + return cpu_usage, memory_usage + +def get_shell_version(): + result = subprocess.run(["zsh", "--version"], capture_output=True, text=True) + return result.stdout.strip() + +def is_user_root(): + return os.geteuid() == 0 + +def get_cpu_architecture(): + return platform.machine() + +def get_network_info(): + hostname = socket.gethostname() + ip_address = socket.gethostbyname(hostname) + return hostname, ip_address + +def get_env_vars(): + path = os.getenv('PATH') + home = os.getenv('HOME') + ld_library_path = os.getenv('LD_LIBRARY_PATH') + return path, home, ld_library_path + +def get_current_username(): + return os.environ.get('USER', os.environ.get('USERNAME', 'Unknown User')) + +def get_os_info(): + try: + # This will work on Linux distributions with the distro module installed + os_id = distro.id() + os_version = distro.version() + os_name = distro.name() + return f"{os_name} ({os_id} {os_version})".strip() + except ModuleNotFoundError: + # Fallback for non-Linux platforms + system = platform.system() + version = platform.version() + return f"{system} {version}".strip() + +def filter_non_ascii(text): + return ''.join(char for char in text if ord(char) < 128) + +MISSING_PREREQUISITES = "zsh-llm-suggestions missing prerequisites:" + +def highlight_explanation(explanation): + try: + import pygments + from pygments.lexers import MarkdownLexer + from pygments.formatters import TerminalFormatter + return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) + except ImportError: + print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') + return explanation # Return unhighlighted text if pygments is not installed + +def send_request(prompt, system_message=None, context=None): + server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') + model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') + data = { + "model": model, + "prompt": prompt, + "keep_alive": "30m", + "stream": False + } + if system_message: + data["system"] = system_message + if context: + data["context"] = context + + try: + response = subprocess.run( + ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)], + capture_output=True, + text=True, + timeout=60 + ) + if response.stdout: + json_response = json.loads(response.stdout) + return json_response.get('response', 'No response received.'), json_response.get('context', None) + else: + return "No response received.", None + except subprocess.TimeoutExpired: + return "Request timed out. Please try again.", None + except json.JSONDecodeError: + return "Failed to decode the response. Please check the API response format.", None + except Exception as e: + return f"Error: {str(e)}", None + +def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): + try: + result, new_context = send_request(prompt, system_message, context) + return result, new_context + except Exception as e: + print(f"Error: {e}") + return "", None + +def main(): + mode = sys.argv[1] + if mode not in ['generate', 'explain', 'freestyle']: + print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) + return + + buffer = sys.stdin.read() + system_message = None + context = None # Initialize context to None + + os_info = get_os_info() + shell_version = get_shell_version() + user_is_root = is_user_root() + cpu_arch = get_cpu_architecture() + path, home, ld_library_path = get_env_vars() + username = get_current_username() + freestyle_system_message = os.environ.get('OLLAMA_FREESTYLE_SYSTEM_MESSAGE') + + #Unused + #hostname, ip_address = get_network_info() + #cpu_usage, memory_usage = get_system_load() + #Your system is on {hostname} ({ip_address}), with CPU usage at {cpu_usage}% and memory usage at {memory_usage}% + + if mode == 'generate': + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please write a ZSH command that solves my query without any additional explanation." + elif mode == 'explain': + system_message = f"You are a ZSH shell expert using {os_info} on {cpu_arch}, shell version {shell_version}, running as {'root' if user_is_root else f'non-root as user {username}'}. Please briefly explain how the given command works. Be as concise as possible using Markdown syntax." + elif mode == 'freestyle': + # Load the previous context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'r') as file: + file_contents = file.read().strip() + if file_contents: + context = json.loads(file_contents) + except FileNotFoundError: + context = None # Handle the case where the file does not exist + if freestyle_system_message: + system_message = freestyle_system_message + except json.JSONDecodeError: + print("Failed to decode JSON from context file. It may be corrupt or empty.") + context = None + except Exception as e: + print(f"Unexpected error when loading context: {e}") + result, new_context = zsh_llm_suggestions_ollama(buffer, system_message, context) + result=filter_non_ascii(result) + if mode == 'freestyle': + # Save the new context only for freestyle mode + try: + with open(os.path.expanduser('~/.ollama_history'), 'w') as file: + if new_context is not None: + file.write(json.dumps(new_context)) + except Exception as e: + print(f"Error saving context: {e}") + + if mode == 'generate': + result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() + print(result) + elif mode == 'explain': + print(highlight_explanation(result)) + elif mode == 'freestyle': + print(result) + +if __name__ == '__main__': + main() From ca5e76c7d8c317c2295c6b1d3d0e34091636306d Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:41:08 -0400 Subject: [PATCH 42/57] Create zsh-llm-suggestions.plugin.zsh --- zsh-llm-suggestions.plugin.zsh | 1 + 1 file changed, 1 insertion(+) create mode 100644 zsh-llm-suggestions.plugin.zsh diff --git a/zsh-llm-suggestions.plugin.zsh b/zsh-llm-suggestions.plugin.zsh new file mode 100644 index 0000000..a9975ae --- /dev/null +++ b/zsh-llm-suggestions.plugin.zsh @@ -0,0 +1 @@ +source ${0:A:h}/zsh-llm-suggestions.zsh From e0f015dd7eefdbf1a3bc90b0bf0a52ce5c7f32e5 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:42:42 -0400 Subject: [PATCH 43/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index cfcb0dc..0802d35 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -9,7 +9,7 @@ zsh_llm_suggestions_spinner() { echo -ne "\e[?25h" } trap cleanup SIGINT - + echo -ne "\e[?25l" while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do local temp=${spinstr#?} @@ -59,7 +59,7 @@ zsh_llm_completion() { local pid=$REPLY # Call the spinner function and pass the PID zsh_llm_suggestions_spinner $pid - + if [[ "$mode" == "generate" ]]; then # Place the query in the history first print -s $query @@ -74,10 +74,19 @@ zsh_llm_completion() { echo "" zle reset-prompt fi + if [[ "$mode" == "freestyle" ]]; then + # Clear the current line + BUFFER="" + # Optionally, you might want to reset the cursor position + CURSOR=0 + # Now, display the content from the result file and reset the prompt + echo "" + eval "cat '$result_file'" + echo "" + zle reset-prompt + fi } -SCRIPT_DIR=$( cd -- "$( dirname -- "$0" )" &> /dev/null && pwd ) - zsh_llm_suggestions_openai() { zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "generate" } @@ -94,7 +103,22 @@ zsh_llm_suggestions_github_copilot_explain() { zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "explain" } +zsh_llm_suggestions_ollama() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "generate" +} + +zsh_llm_suggestions_ollama_explain() { + zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "explain" +} + +zsh_llm_suggestions_ollama_freestyle() { + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "freestyle" +} + zle -N zsh_llm_suggestions_openai zle -N zsh_llm_suggestions_openai_explain zle -N zsh_llm_suggestions_github_copilot zle -N zsh_llm_suggestions_github_copilot_explain +zle -N zsh_llm_suggestions_ollama +zle -N zsh_llm_suggestions_ollama_explain +zle -N zsh_llm_suggestions_ollama_freestyle From 3b3fc24f14238e18d6a25755692a8f46dbb86ecf Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:43:18 -0400 Subject: [PATCH 44/57] Update README.md --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6d6dc59..336241a 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,9 @@ bindkey '^o' zsh_llm_suggestions_openai # Ctrl + O to have OpenAI suggest a comm bindkey '^[^o' zsh_llm_suggestions_openai_explain # Ctrl + alt + O to have OpenAI explain a command bindkey '^p' zsh_llm_suggestions_github_copilot # Ctrl + P to have GitHub Copilot suggest a command given a English description bindkey '^[^p' zsh_llm_suggestions_github_copilot_explain # Ctrl + alt + P to have GitHub Copilot explain a command +bindkey '^q' zsh_llm_suggestions_ollama # Ctrl + P to have Ollama suggest a command given a English description +bindkey '^[^q' zsh_llm_suggestions_ollama_explain # Ctrl + alt + P to have Ollama explain a command +bindkey '^r' zsh_llm_suggestions_ollama_freestyle # Ctrl + P to send a regular prompt to Ollama ``` Make sure `python3` is installed. @@ -83,6 +86,10 @@ If you typed a command (or maybe the LLM generated one) that you don't understan ctrl+alt+O to have OpenAI explain the command in English, or hit ctrl+alt+P to have GitHub Copilot explain it. +### Freestyle + +Send a regular good old LLM prompt to Ollama. + ## Warning There are some risks using `zsh-llm-suggestions`: @@ -92,6 +99,7 @@ There are some risks using `zsh-llm-suggestions`: ## Supported LLMs -Right now, two LLMs are supported: +Right now, three LLMs are supported: 1. GitHub Copilot (via GitHub CLI). Requires a GitHub Copilot subscription. 2. OpenAI. Requires an OpenAI API key. Currently uses `gpt-4-1106-preview`. +3. Ollama - can use any model From 31730d34af1d9ac411f27ca85b94279ba9141115 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:46:37 -0400 Subject: [PATCH 45/57] Update zsh-llm-suggestions.zsh --- zsh-llm-suggestions.zsh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh index 2cfe55a..1bc72ba 100644 --- a/zsh-llm-suggestions.zsh +++ b/zsh-llm-suggestions.zsh @@ -87,27 +87,27 @@ zsh_llm_completion() { } zsh_llm_suggestions_openai() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "generate" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-openai.py" "generate" } zsh_llm_suggestions_github_copilot() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "generate" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-github-copilot.py" "generate" } zsh_llm_suggestions_openai_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "explain" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-openai.py" "explain" } zsh_llm_suggestions_github_copilot_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "explain" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-github-copilot.py" "explain" } zsh_llm_suggestions_ollama() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "generate" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "generate" } zsh_llm_suggestions_ollama_explain() { - zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "explain" + zsh_llm_completion "$(dirname "${(%):-%x}")/zsh-llm-suggestions-ollama.py" "explain" } zsh_llm_suggestions_ollama_freestyle() { @@ -120,4 +120,4 @@ zle -N zsh_llm_suggestions_github_copilot zle -N zsh_llm_suggestions_github_copilot_explain zle -N zsh_llm_suggestions_ollama zle -N zsh_llm_suggestions_ollama_explain -zle -N zsh_llm_suggestions_ollama_freestyle \ No newline at end of file +zle -N zsh_llm_suggestions_ollama_freestyle From 632c2fedb362c218806210a423edc8783712d8a3 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Fri, 17 May 2024 15:49:35 -0400 Subject: [PATCH 46/57] Add files via upload From 4d84bd8d95bf8501bfeb1a248d60bd75997e0e78 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 29 May 2024 18:26:11 -0400 Subject: [PATCH 47/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index f1618c2..41aea7f 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -10,6 +10,16 @@ import socket import psutil +def highlight_explanation(explanation): + try: + import pygments + from pygments.lexers import MarkdownLexer + from pygments.formatters import TerminalFormatter + return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) + except ImportError: + print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') + return explanation # Return unhighlighted text if pygments is not installed + def get_system_load(): cpu_usage = psutil.cpu_percent(interval=1) memory_usage = psutil.virtual_memory().percent @@ -109,14 +119,23 @@ def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): return "", None def main(): - mode = sys.argv[1] + mode = sys.argv[1] if len(sys.argv) > 1 else 'freestyle' if mode not in ['generate', 'explain', 'freestyle']: - print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + mode) + print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + str(mode)) return - buffer = sys.stdin.read() + if not sys.stdin.isatty(): + input_content = sys.stdin.read().strip() # Read piped input directly + if len(sys.argv) > 2: + prompt = sys.argv[2] + buffer = f"{prompt}\n{input_content}" + else: + buffer = input_content + else: + buffer = sys.stdin.read().strip() if not sys.stdin.isatty() else input("Enter your prompt: ").strip() + system_message = None - context = None # Initialize context to None + context = None os_info = get_os_info() shell_version = get_shell_version() From e8db1e3a965d55b5248072ad377d5d4b8132e3b7 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 29 May 2024 18:36:36 -0400 Subject: [PATCH 48/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 41aea7f..7be4a09 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -119,20 +119,22 @@ def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): return "", None def main(): - mode = sys.argv[1] if len(sys.argv) > 1 else 'freestyle' - if mode not in ['generate', 'explain', 'freestyle']: - print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + str(mode)) - return + if len(sys.argv) > 1 and sys.argv[1] in ['generate', 'explain', 'freestyle']: + mode = sys.argv[1] + prompt_index = 2 + else: + mode = 'freestyle' # Default mode + prompt_index = 1 if not sys.stdin.isatty(): input_content = sys.stdin.read().strip() # Read piped input directly - if len(sys.argv) > 2: - prompt = sys.argv[2] + if len(sys.argv) > prompt_index: + prompt = ' '.join(sys.argv[prompt_index:]) buffer = f"{prompt}\n{input_content}" else: buffer = input_content else: - buffer = sys.stdin.read().strip() if not sys.stdin.isatty() else input("Enter your prompt: ").strip() + buffer = ' '.join(sys.argv[prompt_index:]) if len(sys.argv) > prompt_index else input("Enter your prompt: ").strip() system_message = None context = None From 56ee2e63e29731be55a00af4f2d1179b8a7dfc31 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 29 May 2024 18:48:41 -0400 Subject: [PATCH 49/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 7be4a09..48adfdf 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -134,7 +134,10 @@ def main(): else: buffer = input_content else: - buffer = ' '.join(sys.argv[prompt_index:]) if len(sys.argv) > prompt_index else input("Enter your prompt: ").strip() + if len(sys.argv) > prompt_index: + buffer = ' '.join(sys.argv[prompt_index:]) + else: + buffer = input("Enter your prompt: ").strip() system_message = None context = None From d8da3d72eca5624dea25b0eafca169955d21c117 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 29 May 2024 19:10:57 -0400 Subject: [PATCH 50/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 48adfdf..9043568 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -9,6 +9,14 @@ import os import socket import psutil +import pygments + +def colorize_output(text): + try: + return highlight(text, BashLexer(), TerminalFormatter()) + except ImportError: + print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') + return text # Return unhighlighted text if pygments is not installed def highlight_explanation(explanation): try: @@ -194,5 +202,6 @@ def main(): elif mode == 'freestyle': print(result) + if __name__ == '__main__': main() From 187618fad9d105199e27d4bd0834d1520eed91e1 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 29 May 2024 19:42:16 -0400 Subject: [PATCH 51/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 9043568..f6de942 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -10,6 +10,10 @@ import socket import psutil import pygments +from pygments import highlight +from pygments.lexers import BashLexer +from pygments.lexers import MarkdownLexer +from pygments.formatters import TerminalFormatter def colorize_output(text): try: @@ -20,13 +24,10 @@ def colorize_output(text): def highlight_explanation(explanation): try: - import pygments - from pygments.lexers import MarkdownLexer - from pygments.formatters import TerminalFormatter return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) except ImportError: print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') - return explanation # Return unhighlighted text if pygments is not installed + return explanation def get_system_load(): cpu_usage = psutil.cpu_percent(interval=1) @@ -196,11 +197,11 @@ def main(): if mode == 'generate': result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() - print(result) + print(colorize_output(result)) elif mode == 'explain': print(highlight_explanation(result)) elif mode == 'freestyle': - print(result) + print(colorize_output(result)) if __name__ == '__main__': From 60adfdb4525312903e9dbb4cd1436978822950a8 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Wed, 29 May 2024 19:46:56 -0400 Subject: [PATCH 52/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index f6de942..36930c0 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -10,13 +10,12 @@ import socket import psutil import pygments -from pygments import highlight -from pygments.lexers import BashLexer -from pygments.lexers import MarkdownLexer -from pygments.formatters import TerminalFormatter def colorize_output(text): try: + import pygments + from pygments.lexers import BashLexer + from pygments.formatters import TerminalFormatter return highlight(text, BashLexer(), TerminalFormatter()) except ImportError: print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') @@ -24,10 +23,13 @@ def colorize_output(text): def highlight_explanation(explanation): try: + import pygments + from pygments.lexers import MarkdownLexer + from pygments.formatters import TerminalFormatter return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) except ImportError: print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') - return explanation + return explanation # Return unhighlighted text if pygments is not installed def get_system_load(): cpu_usage = psutil.cpu_percent(interval=1) @@ -92,7 +94,7 @@ def send_request(prompt, system_message=None, context=None): data = { "model": model, "prompt": prompt, - "keep_alive": "30m", + "keep_alive": "30m", "stream": False } if system_message: @@ -197,12 +199,11 @@ def main(): if mode == 'generate': result = result.replace('```bash', '').replace('```zsh', '').replace('```', '').strip() - print(colorize_output(result)) + print(result) elif mode == 'explain': print(highlight_explanation(result)) elif mode == 'freestyle': - print(colorize_output(result)) - + print(result) if __name__ == '__main__': main() From ff55241786e4f4c9be9f60d15e2f2288f857ad5f Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 15 Dec 2024 19:10:20 -0500 Subject: [PATCH 53/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 36930c0..a942aea 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -13,9 +13,6 @@ def colorize_output(text): try: - import pygments - from pygments.lexers import BashLexer - from pygments.formatters import TerminalFormatter return highlight(text, BashLexer(), TerminalFormatter()) except ImportError: print(f'echo "{MISSING_PREREQUISITES} Install pygments" && pip3 install pygments') @@ -91,12 +88,16 @@ def highlight_explanation(explanation): def send_request(prompt, system_message=None, context=None): server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') + num_ctx = os.environ.get('ZSH_LLM_SUGGESTION_NUM_CTX', '2056') + data = { "model": model, "prompt": prompt, - "keep_alive": "30m", + "num_ctx": int(num_ctx), + "keep_alive": "30m", "stream": False } + if system_message: data["system"] = system_message if context: @@ -205,5 +206,6 @@ def main(): elif mode == 'freestyle': print(result) + if __name__ == '__main__': main() From 33d301e4e840f3ca43d73de76f7ced0135aa5115 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 15 Dec 2024 19:11:41 -0500 Subject: [PATCH 54/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 1 - 1 file changed, 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index a942aea..9b3df1c 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -20,7 +20,6 @@ def colorize_output(text): def highlight_explanation(explanation): try: - import pygments from pygments.lexers import MarkdownLexer from pygments.formatters import TerminalFormatter return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) From 1f015992d668e0c7e0ef96fe43ab14b1dd512267 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 15 Dec 2024 19:22:38 -0500 Subject: [PATCH 55/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 60 ++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 11 deletions(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 9b3df1c..5614ed8 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -86,20 +86,51 @@ def highlight_explanation(explanation): def send_request(prompt, system_message=None, context=None): server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') - model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') - num_ctx = os.environ.get('ZSH_LLM_SUGGESTION_NUM_CTX', '2056') + model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'llama3.3') # Default model + # Base request data data = { "model": model, "prompt": prompt, - "num_ctx": int(num_ctx), "keep_alive": "30m", "stream": False } - if system_message: - data["system"] = system_message - if context: + # Optional parameters: Check and add if set + optional_params = [ + "num_ctx", # Context length + "temperature", # Sampling randomness + "top_k", # Top-K sampling + "top_p", # Nucleus sampling + "repeat_penalty", # Penalizes repetition + "frequency_penalty", # Penalizes frequent tokens + "presence_penalty", # Penalizes new tokens based on presence + "mirostat", # Mirostat sampling + "mirostat_tau", # Mirostat parameter + "mirostat_eta", # Mirostat parameter + "stop" # Stop sequences + ] + + for param in optional_params: + value = os.environ.get(f"ZSH_LLM_SUGGESTION_{param.upper()}") + if value is not None: + # Convert numeric values appropriately + if param in ["temperature", "top_p", "repeat_penalty", "frequency_penalty", "presence_penalty", "mirostat_tau", "mirostat_eta"]: + data[param] = float(value) + elif param in ["top_k", "mirostat", "num_ctx"]: + data[param] = int(value) + elif param == "stop": + # Handle stop sequences as a JSON array if provided + try: + data[param] = json.loads(value) if value.startswith("[") else value + except json.JSONDecodeError: + print(f"Invalid JSON format for {param}, skipping.") + else: + data[param] = value + + # Handle context if enabled + use_context = os.environ.get('ZSH_LLM_SUGGESTION_USE_CONTEXT', 'true').lower() == 'true' + if context and use_context: data["context"] = context try: @@ -109,17 +140,24 @@ def send_request(prompt, system_message=None, context=None): text=True, timeout=60 ) + if response.returncode != 0: + return f"Curl error: {response.stderr.strip()}", None + if response.stdout: - json_response = json.loads(response.stdout) - return json_response.get('response', 'No response received.'), json_response.get('context', None) + try: + json_response = json.loads(response.stdout) + if "error" in json_response: + return f"Error from server: {json_response['error']}", None + return json_response.get('response', 'No response received.'), json_response.get('context', None) + except json.JSONDecodeError: + return f"Invalid JSON response: {response.stdout.strip()}", None else: return "No response received.", None + except subprocess.TimeoutExpired: return "Request timed out. Please try again.", None - except json.JSONDecodeError: - return "Failed to decode the response. Please check the API response format.", None except Exception as e: - return f"Error: {str(e)}", None + return f"Unexpected error: {str(e)}", None def zsh_llm_suggestions_ollama(prompt, system_message=None, context=None): try: From 71941d8f91693d5b02495b5ffb6bed1fc1dd17fe Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 15 Dec 2024 19:23:24 -0500 Subject: [PATCH 56/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index 5614ed8..d1486b2 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -86,7 +86,7 @@ def highlight_explanation(explanation): def send_request(prompt, system_message=None, context=None): server_address = os.environ.get('ZSH_LLM_SUGGESTION_SERVER', 'localhost:11434') - model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'llama3.3') # Default model + model = os.environ.get('ZSH_LLM_SUGGESTION_MODEL', 'tinyllama') # Default model # Base request data data = { From 43e19ba883ce4b8e957bc56f56d33e2b3ff24f79 Mon Sep 17 00:00:00 2001 From: p1r473 Date: Sun, 15 Dec 2024 19:32:03 -0500 Subject: [PATCH 57/57] Update zsh-llm-suggestions-ollama.py --- zsh-llm-suggestions-ollama.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py index d1486b2..cfe6847 100644 --- a/zsh-llm-suggestions-ollama.py +++ b/zsh-llm-suggestions-ollama.py @@ -76,7 +76,6 @@ def filter_non_ascii(text): def highlight_explanation(explanation): try: - import pygments from pygments.lexers import MarkdownLexer from pygments.formatters import TerminalFormatter return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material')) @@ -133,6 +132,11 @@ def send_request(prompt, system_message=None, context=None): if context and use_context: data["context"] = context + debug = os.environ.get('ZSH_LLM_SUGGESTION_DEBUG', 'false').lower() == 'true' + if debug: + print("Final API Request Payload:") + print(json.dumps(data, indent=4)) # Pretty-print the data dictionary for easy debugging + try: response = subprocess.run( ["curl", "-XPOST", f"http://{server_address}/api/generate", "-H", "Content-Type: application/json", "-d", json.dumps(data)],