Skip to content

Commit

Permalink
Merge pull request #16 from edouard-sn/main
Browse files Browse the repository at this point in the history
Python implementation. Big thanks to @edouard-sn
  • Loading branch information
dave1010 authored Nov 29, 2023
2 parents 17a78b4 + 5ee941d commit 38d36f0
Show file tree
Hide file tree
Showing 21 changed files with 417 additions and 235 deletions.
7 changes: 7 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
__pycache__
.vscode
.ruff_cache
dist
clipea.egg-info
build
.DS_Store
27 changes: 15 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -157,21 +157,24 @@ Clipea doesn't have any context of what it said before, though this may be added

## 📦 Installation and setup

### Mac
### Manual installation

[![Install with Homebrew](https://img.shields.io/badge/Homebrew-Tap-blue.svg)](https://github.com/dave1010/homebrew-clipea)
Python >=3.10 is required.

brew tap dave1010/clipea
brew install clipea
clipea setup
You can use the provided `setup.py`. ([setuptools docs](https://setuptools.pypa.io/en/latest/deprecated/easy_install.html))

You can install it quickly like so:

python3 setup.py sdist
pip install dist/clipea-{version}.tar.gz

Or development mode:

pip install -e .

### Manual install
### With PyPi (soon)

pip install llm
git clone https://github.com/dave1010/clipea.git
cd clipea
./clipea setup
./clipea add current dir to my path on shell login
#pip install clipea

### Zsh Shell integration and Alias

Expand All @@ -182,7 +185,7 @@ Clipea doesn't have any context of what it said before, though this may be added

## Internals

Clipea is currently written in PHP but may switch to Python ([#3](https://github.com/dave1010/clipea/issues/3)).
Clipea is written in Python (3.10+).

Clipea uses [llm](https://github.com/simonw/llm) to interact with large language models.

Expand Down
94 changes: 0 additions & 94 deletions clipea

This file was deleted.

24 changes: 2 additions & 22 deletions clipea.zsh
100755 → 100644
Original file line number Diff line number Diff line change
@@ -1,24 +1,4 @@
#!/bin/zsh

if [[ $ZSH_EVAL_CONTEXT != 'toplevel:file' ]]; then
echo "Error: Source the script instead of executing it:"
echo
echo "source $0"
return 1 2>/dev/null || exit 1
fi

DIR="$(dirname -- "$0")"

TMP_FILE=$(mktemp)

# Execute the PHP script with an environment variable
COMMAND_OUTPUT_FILE="$TMP_FILE" "$DIR/clipea" "$@"

# Read the command to be placed on the Zsh command line
commandToPlace=$(< "$TMP_FILE")

# Place it on the Zsh command line
print -rz "$commandToPlace"

# Remove the temp file
rm "$TMP_FILE"
echo "Clipea has been updated."
echo "Please run 'clipea alias' or reinstall to upgrade."
19 changes: 19 additions & 0 deletions clipea/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
"""Clipea application
📎🟢 Like Clippy but for the CLI. A blazing fast AI helper for your command line
"""
import os
from clipea import utils, cli


CLIPEA_DIR: str = os.path.dirname(os.path.realpath(__file__))
USAGE_FILE_PATH: str = CLIPEA_DIR + "/usage.txt"
HOME_PATH: str = utils.anystr_force_str(os.path.expanduser("~"))
SYSTEM_PROMPT_FILE: str = utils.get_config_file_with_fallback(
home=HOME_PATH, fallback=CLIPEA_DIR, appname="clipea", filename="system-prompt.txt"
)
ENV: dict[str, str] = {
"shell": cli.get_shell(),
"os": os.name,
"editor": os.getenv("EDITOR", "nano"),
}
SYSTEM_PROMPT: str = utils.read_file(SYSTEM_PROMPT_FILE) + str(ENV)
17 changes: 17 additions & 0 deletions clipea/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
"""Clipea application entry point
"""
import sys
import shutil
from clipea import router


def clipea_main() -> None:
if shutil.which("llm") is None:
sys.exit('Error: dependency "llm" not found. Run "clipea setup" to install')

USER_PROMPT = " ".join(sys.argv[1:])
router.commands_router(USER_PROMPT)


if __name__ == "__main__":
clipea_main()
48 changes: 48 additions & 0 deletions clipea/cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""CLI
Interactions with the terminal
"""
import os
import sys
import subprocess
import shutil


def get_input(max_len: int = 1 << 13) -> str:
"""Get user data and do length check on it
Returns:
str: user input
"""
data: str = ""
if not sys.stdin.isatty():
data = input()
if len(data) > max_len:
raise ValueError(
f"Error: Input too long! Maximum {max_len} characters allowed.\
Try limiting your input using 'head -c {max_len} file.txt"
)
return data


def get_shell() -> str:
"""Get user's default shell
Returns:
str: shell's name
"""

return (
os.popen("ps -o comm= -p $(ps -o ppid= -p $(ps -o ppid= -p $$))").read().strip()
)


def execute_with_prompt(cmd: str, shell: str = None) -> None:
"""Asks the user if he wants to execute a command, executes it if so
Args:
cmd (str): command to execute
shell (str, optional): to execute with a particuliar shell. Defaults to None.
"""
answer = input("\033[0;36mExecute? [y/N] \033[0m").strip().lower()
if sys.stdin.isatty() and answer == "y":
subprocess.run(cmd, shell=True, executable=shutil.which(shell), check=False)
34 changes: 34 additions & 0 deletions clipea/clipea.zsh
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#!/bin/zsh

if [[ $ZSH_EVAL_CONTEXT != 'toplevel:file' ]]; then
echo "Error: Source the script instead of executing it:"
echo
echo "source $0"
return 1 2>/dev/null || exit 1
fi

CLIPEA_TMP_FILE=$(mktemp)

# https://stackoverflow.com/questions/9901210/bash-source0-equivalent-in-zsh
CLIPEA_SCRIPT_DIR=$(dirname $(readlink -f ${(%):-%x}))

CLIPEA_PYTHON=

CLIPEA_PATH=$(which clipea)

# Run clipea from the current dir if possible
if [[ -f $CLIPEA_SCRIPT_DIR/__main__.py ]]; then
CLIPEA_PATH=$CLIPEA_SCRIPT_DIR
CLIPEA_PYTHON="$(which python3 || which python)"
fi

# Execute clipea with an environment variable
CLIPEA_CMD_OUTPUT_FILE="$CLIPEA_TMP_FILE" $CLIPEA_PYTHON "$CLIPEA_PATH" "$@"

# Read the command to be placed on the Zsh command line
CLIPEA_COMMAND_TO_PLACE=$(< "$CLIPEA_TMP_FILE")

# Place it on the Zsh command line
print -rz "$CLIPEA_COMMAND_TO_PLACE"

rm "$CLIPEA_TMP_FILE"
91 changes: 91 additions & 0 deletions clipea/clipea_llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
"""LLM
Interactions with `llm` python library
"""
import os
import llm.cli
import llm
import clipea.cli
from clipea import ENV, HOME_PATH, CLIPEA_DIR, utils


def init_llm(llm_model: str = "") -> llm.Model:
"""Initialize base llm library with user's `llm_model`
Args:
llm_model (str, optional): LLM model name (ex: "gpt-4").
Defaults to content of {clipea config}/clipea_default_model.txt.
Returns:
llm.Model
"""
clipea_default_model_path = utils.get_config_file_with_fallback(
home=HOME_PATH,
fallback=CLIPEA_DIR,
appname="clipea",
filename="clipea_default_model.txt",
)
model = llm.get_model(
llm_model or llm.cli.get_default_model(filename=clipea_default_model_path)
)

if model.needs_key:
model.key = llm.get_key("", model.needs_key, model.key_env_var)
return model


def stream_commands(response: llm.Response, command_prefix: str = "") -> None:
"""Streams llm response which returns shell commands
If a valid shell commands is returned, either prompt to execute it or
put it in zsh's command buffer
The processing is done internally with a nested function `process_command`
A command is considered valid if it starts with '$ ' and is a full line of answer
Args:
response (llm.Response): LLM's answer to user's prompt
command_prefix (str, optional): What to write before streaming the commands. Defaults to "".
"""
command: str = ""
output_file: str = os.getenv("CLIPEA_CMD_OUTPUT_FILE")
buffer: str = ""
new_line_pos: int

def process_command():
nonlocal command, buffer, new_line_pos

current_command: str
if new_line_pos > 0:
current_command = command[2:new_line_pos]
else:
current_command = command[2:]
command = command[new_line_pos + 1 :]

if output_file is not None:
buffer += current_command + os.linesep
else:
clipea.cli.execute_with_prompt(current_command, shell=ENV["shell"])

print(command_prefix, end="")
for chunk in response:
print(chunk, end="", flush=True)
command += chunk

if (new_line_pos := command.find(os.linesep)) == -1:
continue
if command.startswith("$ "):
process_command()
else:
command = ""

# llm CLI put a line feed manually to it's response, but not it's library
# We have to do this to manage the case where the model returns a
# non-linefeed terminated string.
# It also explains why there is a capturing nested function `process_command`
if command.startswith("$ "):
print()
process_command()

if output_file:
utils.write_to_file(
output_file,
';\ '.join(buffer.rstrip(os.linesep).split(os.linesep)) + os.linesep,
)
Loading

0 comments on commit 38d36f0

Please sign in to comment.