From f851895e9a77d9e51bfecb962a1fe0902770f73b Mon Sep 17 00:00:00 2001
From: Bart Trojanowski <bart@jukie.net>
Date: Sun, 26 May 2024 10:29:06 -0400
Subject: [PATCH 1/2] add scripts to support ollama

---
 .gitignore                                    |  1 +
 zsh-llm-suggestions-ollama.explain.modelfile  |  7 ++
 zsh-llm-suggestions-ollama.generate.modelfile |  8 +++
 zsh-llm-suggestions-ollama.py                 | 65 +++++++++++++++++++
 zsh-llm-suggestions.zsh                       | 10 +++
 5 files changed, 91 insertions(+)
 create mode 100644 .gitignore
 create mode 100644 zsh-llm-suggestions-ollama.explain.modelfile
 create mode 100644 zsh-llm-suggestions-ollama.generate.modelfile
 create mode 100755 zsh-llm-suggestions-ollama.py

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b25c15b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*~
diff --git a/zsh-llm-suggestions-ollama.explain.modelfile b/zsh-llm-suggestions-ollama.explain.modelfile
new file mode 100644
index 0000000..f1b1dfc
--- /dev/null
+++ b/zsh-llm-suggestions-ollama.explain.modelfile
@@ -0,0 +1,7 @@
+FROM llama3:latest
+
+PARAMETER temperature 1
+
+SYSTEM """
+You are a zsh shell expert, please briefly explain how the given command works. Be as concise as possible. Use Markdown syntax for formatting.
+"""
diff --git a/zsh-llm-suggestions-ollama.generate.modelfile b/zsh-llm-suggestions-ollama.generate.modelfile
new file mode 100644
index 0000000..8fa9668
--- /dev/null
+++ b/zsh-llm-suggestions-ollama.generate.modelfile
@@ -0,0 +1,8 @@
+FROM llama3:latest
+
+PARAMETER temperature 1
+
+SYSTEM """
+You are a zsh shell expert, please write a ZSH command that solves my problem. You should only output the completed command, no need to include any other explanation.
+Output must begin with ``` and end with ```.
+"""
diff --git a/zsh-llm-suggestions-ollama.py b/zsh-llm-suggestions-ollama.py
new file mode 100755
index 0000000..ee835d9
--- /dev/null
+++ b/zsh-llm-suggestions-ollama.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+
+import sys
+import os
+import subprocess
+
+def highlight_explanation(explanation):
+  try:
+    import pygments
+    from pygments.lexers import MarkdownLexer
+    from pygments.formatters import TerminalFormatter
+    return pygments.highlight(explanation, MarkdownLexer(), TerminalFormatter(style='material'))
+  except ImportError:
+    return explanation
+
+def setup_ollama(name, file):
+  command = ["ollama", "create", name, "--file", file]
+  result = subprocess.run(command, capture_output=True, text=True)
+  if result.returncode == 0:
+    print(f"Successfully generated {name} ollma profile.")
+    return True
+
+  print(f"ERROR: Failed to setup {name} ollama profile.", file=sys.stderr)
+  print(result.stderr, file=sys.stderr)
+  sys.exit(1)
+
+def run_ollama(model, prompt):
+  command = ["ollama", "run", model, prompt]
+  result = subprocess.run(command, capture_output=True, text=True)
+  if result.returncode == 0:
+    return True, result.stdout
+  return False, result.stderr
+
+def main():
+  mode = sys.argv[1] if len(sys.argv)>1 else None
+  if mode == 'setup':
+    setup_ollama("zsh-llm-suggesions-generate", "zsh-llm-suggestions-ollama.generate.modelfile")
+    setup_ollama("zsh-llm-suggesions-explain", "zsh-llm-suggestions-ollama.explain.modelfile")
+    return
+
+  elif mode == 'generate':
+    model = os.environ.get('ZSH_OLLAMA_GENERATE_MODEL') or 'zsh-llm-suggesions-generate'
+
+  elif mode == 'explain':
+    model = os.environ.get('ZSH_OLLAMA_EXPLAIN_MODEL') or 'zsh-llm-suggesions-explain'
+
+  else:
+    print("ERROR: something went wrong in zsh-llm-suggestions, please report a bug. Got unknown mode: " + str(mode), file=sys.stderr)
+    sys.exit(1)
+
+  prompt = sys.stdin.read().strip()
+  ok, result = run_ollama(model, prompt)
+  if not ok:
+    print("ERROR: something went wrong:\n" + result.strip(), file=sys.stderr)
+    sys.exit(1)
+
+  elif mode == 'generate':
+    result = result.replace('```zsh', '').replace('```', '').strip()
+    print(result)
+
+  elif mode == 'explain':
+    print(highlight_explanation(result))
+
+if __name__ == '__main__':
+  main()
diff --git a/zsh-llm-suggestions.zsh b/zsh-llm-suggestions.zsh
index cfcb0dc..d107364 100644
--- a/zsh-llm-suggestions.zsh
+++ b/zsh-llm-suggestions.zsh
@@ -86,6 +86,10 @@ zsh_llm_suggestions_github_copilot() {
   zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "generate"
 }
 
+zsh_llm_suggestions_ollama() {
+  zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "generate"
+}
+
 zsh_llm_suggestions_openai_explain() {
   zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-openai.py" "explain"
 }
@@ -94,7 +98,13 @@ zsh_llm_suggestions_github_copilot_explain() {
   zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-github-copilot.py" "explain"
 }
 
+zsh_llm_suggestions_ollama_explain() {
+  zsh_llm_completion "$SCRIPT_DIR/zsh-llm-suggestions-ollama.py" "explain"
+}
+
 zle -N zsh_llm_suggestions_openai
 zle -N zsh_llm_suggestions_openai_explain
 zle -N zsh_llm_suggestions_github_copilot
 zle -N zsh_llm_suggestions_github_copilot_explain
+zle -N zsh_llm_suggestions_ollama
+zle -N zsh_llm_suggestions_ollama_explain

From 8056a9bc6525c650b311b275ead412a052f0eec9 Mon Sep 17 00:00:00 2001
From: Bart Trojanowski <bart@jukie.net>
Date: Sun, 26 May 2024 10:35:12 -0400
Subject: [PATCH 2/2] ollama instructions

---
 README.md | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/README.md b/README.md
index 6d6dc59..31715b3 100644
--- a/README.md
+++ b/README.md
@@ -66,6 +66,20 @@ For `zsh_llm_suggestions_github_copilot` (GitHub Copilot suggestions):
   /usr/bin/gh extension install github/gh-copilot
   ```
 
+For `zsh_llm_suggestions_ollama` (defaults to `llama3` model, on your local GPU):
+- Follow [ollama installation instructions](https://github.com/ollama/ollama?tab=readme-ov-file#ollama)
+- This script will not start ollama.  Make sure it is running:
+  ```
+  ollama serve
+  ```
+  (there are many installation options, like [systemd service](https://github.com/ollama/ollama/blob/main/docs/linux.md#adding-ollama-as-a-startup-service-recommended),
+  and [docker container](https://github.com/ollama/ollama/blob/main/docs/docker.md))
+- Run the setup script to download llama3 model and setup the system prompts:
+  ```
+  ./zsh-llm-suggestions-ollama.py setup
+  ```
+- setup desired keybindings for `zsh_llm_suggestions_ollama` and `zsh_llm_suggestions_ollama_explain`
+
 ## Usage
 
 ### LLM suggested commands
@@ -95,3 +109,4 @@ There are some risks using `zsh-llm-suggestions`:
 Right now, two LLMs are supported:
 1. GitHub Copilot (via GitHub CLI). Requires a GitHub Copilot subscription.
 2. OpenAI. Requires an OpenAI API key. Currently uses `gpt-4-1106-preview`.
+3. Ollama. Requires local GPU and [ollama](https://github.com/ollama/ollama) installed.  Uses llama3 model by default.