summaryrefslogtreecommitdiff
path: root/Biz/Bild/Deps
diff options
context:
space:
mode:
Diffstat (limited to 'Biz/Bild/Deps')
-rw-r--r--Biz/Bild/Deps/exllama.nix54
1 files changed, 0 insertions, 54 deletions
diff --git a/Biz/Bild/Deps/exllama.nix b/Biz/Bild/Deps/exllama.nix
deleted file mode 100644
index 434e9a9..0000000
--- a/Biz/Bild/Deps/exllama.nix
+++ /dev/null
@@ -1,54 +0,0 @@
-{ lib, sources, buildPythonPackage, pythonOlder
-, torch # tested on 2.0.1 and 2.1.0 (nightly) with cu118
-, safetensors, sentencepiece, ninja, cudaPackages, addOpenGLRunpath, which
-, libGL, gcc11 # cuda 11.7 requires g++ <12
-}:
-
-buildPythonPackage rec {
- pname = "exllama";
- version = sources.exllama.rev;
- format = "setuptools";
- disabled = pythonOlder "3.9";
-
- src = sources.exllama;
-
- # I only care about compiling for the Ampere architecture, which is what my
- # RTX 3090 TI is, and for some reason (nix sandbox?) the torch extension
- # builder
- # cannot autodetect the arch
- TORCH_CUDA_ARCH_LIST = "8.0;8.6+PTX";
-
- CUDA_HOME = "${cudaPackages.cuda_nvcc}";
-
- nativeBuildInputs = [
- gcc11
- which
- libGL
- addOpenGLRunpath
- cudaPackages.cuda_nvcc
- cudaPackages.cuda_cudart
- ];
-
- propagatedBuildInputs =
- [ torch safetensors sentencepiece ninja cudaPackages.cudatoolkit ];
-
- doCheck = false; # no tests currently
- pythonImportsCheck = [
- "exllama"
- "exllama.cuda_ext"
- "exllama.generator"
- "exllama.lora"
- "exllama.model"
- "exllama.tokenizer"
- ];
-
- meta = with lib; {
- description = ''
- A more memory-efficient rewrite of the HF transformers implementation of
- Llama for use with quantized weights.
- '';
- homepage = "https://github.com/jllllll/exllama";
- license = licenses.mit;
- maintainers = with maintainers; [ bsima ];
- };
-}