summaryrefslogtreecommitdiff
path: root/Biz/Bild/Deps/bitsandbytes.nix
diff options
context:
space:
mode:
authorBen Sima <ben@bsima.me>2022-12-28 19:53:55 -0500
committerBen Sima <ben@bsima.me>2022-12-28 19:53:55 -0500
commitc3b955145998d39df39370671585a271ca6f80f0 (patch)
tree33614e03c966d205e2eadaf4dd183f52618afebc /Biz/Bild/Deps/bitsandbytes.nix
parent11e480c4b13808f12bc3f5db2765cebebf1aaf46 (diff)
Get ava GPT chatbot prototype working
Mostly thid required packaging up some deps, but also had to recompile stuff with cuda support.
Diffstat (limited to 'Biz/Bild/Deps/bitsandbytes.nix')
-rw-r--r--Biz/Bild/Deps/bitsandbytes.nix94
1 files changed, 94 insertions, 0 deletions
diff --git a/Biz/Bild/Deps/bitsandbytes.nix b/Biz/Bild/Deps/bitsandbytes.nix
new file mode 100644
index 0000000..8d4fa53
--- /dev/null
+++ b/Biz/Bild/Deps/bitsandbytes.nix
@@ -0,0 +1,94 @@
+{ lib
+, fetchgit
+, buildPythonPackage
+, pytorch
+, setuptools
+, typer
+, linuxPackages
+, pytest
+
+# CUDA
+, cudaSupport ? true
+, cudaPackages ? {}
+, addOpenGLRunpath ? null
+, gcc
+}:
+
+let
+ inherit (linuxPackages) nvidia_x11;
+ inherit (cudaPackages) cudatoolkit;
+ cudaVersion =
+ lib.strings.removeSuffix "0"
+ (lib.strings.concatStrings
+ (lib.strings.splitString "." cudatoolkit.version));
+ libraryPath = lib.strings.makeLibraryPath [
+ cudatoolkit
+ cudatoolkit.lib
+ nvidia_x11
+ ];
+in buildPythonPackage rec {
+ pname = "bitsandbytes";
+ version = "unstable-2022-12-21";
+
+ src = fetchgit {
+ url = "https://simatime.com/git/ben/bitsandbytes.git";
+ rev = "31ef751bea48eeee2e0e95aca79df8e59b4c25c4";
+ sha256 = "sha256-/a2NFVuMSvSDELeXyfDdU9FZEJcDxCLa4VbMWBUCRI0=";
+ };
+
+ # any exe must be wrapped with this, it doesn't effect the build but it does
+ # show up on the logs so keep it here for documentation purposes
+ LD_LIBRARY_PATH = libraryPath;
+
+ # this allows the build system to find nvcc
+ CUDA_HOME = "${cudatoolkit}";
+
+ preBuild = ''
+ make cuda11x CUDA_VERSION=${cudaVersion} GPP=${gcc}/bin/g++ -j3
+ ## this python doesn't know where to get libcuda, so explicitly tell it
+ ## this is probably really bad practice, fix this
+ substituteInPlace bitsandbytes/cuda_setup/main.py \
+ --replace "libcuda.so" "${nvidia_x11}/lib/libcuda.so"
+ '';
+
+ propagatedBuildInputs = [
+ (pytorch.override({ inherit cudaSupport;}))
+ setuptools
+ ] ++ lib.optionals cudaSupport [
+ typer
+ cudatoolkit
+ cudatoolkit.lib
+ nvidia_x11
+ ];
+
+ nativeBuildInputs = lib.optionals cudaSupport [
+ gcc
+ addOpenGLRunpath
+ ];
+
+ preFixup = lib.optionalString cudaSupport ''
+ find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib
+ do
+ addOpenGLRunpath "$lib"
+ patchelf \
+ --set-rpath "${libraryPath}" \
+ "$lib"
+ done
+ '';
+
+ checkInputs = [
+ pytest
+ ];
+
+ # disabled because the test suite cannot find any GPUs in the nix sandbox
+ doCheck = false;
+
+ pythonImportsCheck = [
+ "torch"
+ # this tries to load and verify the cuda device on import, since this runs
+ # in the nix sandbox it will fail with a bunch of errors, but curiously the
+ # build still succeeds...
+ "bitsandbytes"
+ ];
+}
+