diff --git a/index.js b/index.js index 8c2fea2..b26278b 100644 --- a/index.js +++ b/index.js @@ -86,9 +86,15 @@ class Dalai { } async install(...models) { + // create venv + const venv_path = path.join(this.home, "venv") + const platform = os.platform() + await this.exec(`python3 -m venv ${venv_path}`) + // different venv paths for Windows + const pip_path = platform === "win32" ? path.join(venv_path, "Scripts", "pip.exe") : path.join(venv_path, "bin", "pip") + const python_path = platform == "win32" ? path.join(venv_path, "Script", "python.exe") : path.join(venv_path, 'bin', 'python') // install to ~/llama.cpp - await this.exec("pip3 install torch torchvision torchaudio sentencepiece numpy") - await this.exec("pip install torch torchvision torchaudio sentencepiece numpy") + await this.exec(`${pip_path} install torch torchvision torchaudio sentencepiece numpy`) await this.exec(`git clone https://github.com/ggerganov/llama.cpp.git ${this.home}`) await this.exec("make", this.home) for(let model of models) { @@ -97,7 +103,7 @@ class Dalai { if (fs.existsSync(outputFile)) { console.log(`Skip conversion, file already exists: ${outputFile}`) } else { - await this.exec(`python3 convert-pth-to-ggml.py models/${model}/ 1`, this.home) + await this.exec(`${python_path} convert-pth-to-ggml.py models/${model}/ 1`, this.home) } await this.quantize(model) }