From 3544d8a57f8da8c61f3f30f9e81a72f0fd651850 Mon Sep 17 00:00:00 2001 From: Henry Mao <1828968+calclavia@users.noreply.github.com> Date: Thu, 6 Mar 2025 16:04:24 +0800 Subject: [PATCH 1/2] Add Dockerfile --- Dockerfile | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..2d2c541 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,25 @@ +# Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile +FROM node:lts-alpine + +# Set default environment variables to allow the server to initialize +ENV LLM_MODEL_NAME=dummy-model +ENV LLM_MODEL_PROVIDER=ollama +ENV LLM_BASE_URL=http://localhost:11434 + +# Create app directory +WORKDIR /usr/src/app + +# Copy package files +COPY package.json package-lock.json ./ + +# Install dependencies (skip lifecycle scripts if necessary) +RUN npm install --ignore-scripts + +# Copy rest of the code +COPY . . + +# Build the project +RUN npm run build + +# Start the MCP server +CMD [ "npm", "start" ] From 1c7b1a64ec962f68552022baedc0292d28695146 Mon Sep 17 00:00:00 2001 From: Henry Mao <1828968+calclavia@users.noreply.github.com> Date: Thu, 6 Mar 2025 16:04:25 +0800 Subject: [PATCH 2/2] Update README --- README.md | 72 +++++++------------------------------------------------ 1 file changed, 8 insertions(+), 64 deletions(-) diff --git a/README.md b/README.md index 4061b69..445c58c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # MCP LLM +[![smithery badge](https://smithery.ai/badge/@sammcj/mcp-llm)](https://smithery.ai/server/@sammcj/mcp-llm) An MCP server that provides access to LLMs using the LlamaIndexTS library. @@ -18,72 +19,15 @@ This MCP server provides the following tools: ## Installation -Update your MCP config to add the mcp-llm server: +### Installing via Smithery -```json -{ - "mcpServers": { - "llm": { - "command": "npx", - "args": [ - "-y", - "mcp-llm" - ], - "env": { - "LLM_MODEL_NAME": "deepseek-r1:7b-qwen-distill-q6_k_l", - "LLM_MODEL_PROVIDER": "ollama", - "LLM_BASE_URL": "http://localhost:11434", - "LLM_ALLOW_FILE_WRITE": "true", - "LLM_TIMEOUT_S": "240" - }, - "disabled": false, - "autoApprove": [ - "generate_code", - "generate_documentation", - "ask_question", - "generate_code_to_file" - ], - "timeout": 300 - }, - } -} -``` - -## Available Scripts - -- `npm run build` - Build the project -- `npm run watch` - Watch for changes and rebuild -- `npm start` - Start the MCP server -- `npm run example` - Run the example script -- `npm run inspector` - Run the MCP inspector - -## Configuration - -The MCP server is configurable using environment variables: - -### Required Environment Variables - -- `LLM_MODEL_NAME`: The name of the model to use (e.g., `qwen2-32b:q6_k`, `anthropic.claude-3-7-sonnet-20250219-v1:0`) -- `LLM_MODEL_PROVIDER`: The model provider (e.g., `bedrock`, `ollama`, `openai`, `openai-compatible`) - -### Optional Environment Variables - -- `LLM_BASE_URL`: Base URL for the model provider (e.g., `https://ollama.internal`, `http://my-openai-compatible-server.com:3000/v1`) -- `LLM_TEMPERATURE`: Temperature parameter for the model (e.g., `0.2`) -- `LLM_NUM_CTX`: Context window size (e.g., `16384`) -- `LLM_TOP_P`: Top-p parameter for the model (e.g., `0.85`) -- `LLM_TOP_K`: Top-k parameter for the model (e.g., `40`) -- `LLM_MIN_P`: Min-p parameter for the model (e.g., `0.05`) -- `LLM_REPETITION_PENALTY`: Repetition penalty parameter for the model (e.g., `1.05`) -- `LLM_SYSTEM_PROMPT_GENERATE_CODE`: System prompt for the generate_code tool -- `LLM_SYSTEM_PROMPT_GENERATE_DOCUMENTATION`: System prompt for the generate_documentation tool -- `LLM_SYSTEM_PROMPT_ASK_QUESTION`: System prompt for the ask_question tool -- `LLM_TIMEOUT_S`: Timeout in seconds for LLM requests (e.g., `240` for 4 minutes) -- `LLM_ALLOW_FILE_WRITE`: Set to `true` to allow the `generate_code_to_file` tool to write to files (default: `false`) -- `OPENAI_API_KEY`: API key for OpenAI (required when using OpenAI provider) +To install LLM Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@sammcj/mcp-llm): +```bash +npx -y @smithery/cli install @sammcj/mcp-llm --client claude +``` -## Manual Install From Source +### Manual Install From Source 1. Clone the repository 2. Install dependencies: @@ -156,4 +100,4 @@ The `generate_code_to_file` tool supports both relative and absolute file paths. ## License -- [MIT LICENSE](LICENSE) +- [MIT LICENSE](LICENSE) \ No newline at end of file