From d7383136dc434a35f7c7efa65343dd0bafea5256 Mon Sep 17 00:00:00 2001 From: Ben Sima Date: Tue, 2 Apr 2024 00:08:24 -0400 Subject: Add llama-cpp from nixos-23.11 Removes my custom llama-cpp build and instead pulls in the upstream build from nixos-23.11. --- Biz/Bild/Deps/llama-cpp.nix | 32 -------------------------------- 1 file changed, 32 deletions(-) delete mode 100644 Biz/Bild/Deps/llama-cpp.nix (limited to 'Biz/Bild/Deps/llama-cpp.nix') diff --git a/Biz/Bild/Deps/llama-cpp.nix b/Biz/Bild/Deps/llama-cpp.nix deleted file mode 100644 index 2e2aae7..0000000 --- a/Biz/Bild/Deps/llama-cpp.nix +++ /dev/null @@ -1,32 +0,0 @@ -{ stdenv, sources, python3, cmake, pkgconfig, openmpi, cudaPackages }: -let llama-python = python3.withPackages (ps: with ps; [ numpy sentencepiece ]); -in stdenv.mkDerivation { - name = "llama.cpp"; - version = sources.llama-cpp.rev; - - src = sources.llama-cpp; - - postPatch = '' - substituteInPlace ./ggml-metal.m \ - --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";" - substituteInPlace ./*.py --replace '/usr/bin/env python' '${llama-python}/bin/python' - ''; - - nativeBuildInputs = [ cmake pkgconfig ]; - buildInputs = [ openmpi cudaPackages.cudatoolkit ]; - - cmakeFlags = [ - "-DLLAMA_BUILD_SERVER=ON" - "-DLLAMA_MPI=ON" - "-DBUILD_SHARED_LIBS=ON" - "-DCMAKE_SKIP_BUILD_RPATH=ON" - "-DLLAMA_CUBLAS=ON" - ]; - - postInstall = '' - mv $out/bin/main $out/bin/llama - mv $out/bin/server $out/bin/llama-server - ''; - - meta.mainProgram = "llama"; -} -- cgit v1.2.3