From c3b955145998d39df39370671585a271ca6f80f0 Mon Sep 17 00:00:00 2001 From: Ben Sima Date: Wed, 28 Dec 2022 19:53:55 -0500 Subject: Get ava GPT chatbot prototype working Mostly thid required packaging up some deps, but also had to recompile stuff with cuda support. --- Biz/Bild/Deps/accelerate.nix | 24 +++++++++++ Biz/Bild/Deps/bitsandbytes.nix | 94 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 Biz/Bild/Deps/accelerate.nix create mode 100644 Biz/Bild/Deps/bitsandbytes.nix (limited to 'Biz/Bild/Deps') diff --git a/Biz/Bild/Deps/accelerate.nix b/Biz/Bild/Deps/accelerate.nix new file mode 100644 index 0000000..5d00f14 --- /dev/null +++ b/Biz/Bild/Deps/accelerate.nix @@ -0,0 +1,24 @@ +{ fetchFromGitHub +, buildPythonPackage +, numpy +, packaging +, psutil +, pyyaml +, torch +}: + +buildPythonPackage rec { + name = "accelerate"; + version = "0.15.0"; + propagatedBuildInputs = [ + numpy packaging psutil pyyaml torch + ]; + doCheck = false; + src = fetchFromGitHub { + owner = "huggingface"; + repo = "accelerate"; + rev = "v${version}"; + sha256 = "sha256-agfbOaa+Nm10HZkd2Y7zR3R37n+vLNsxCyxZax6O3Lo="; + }; +} + diff --git a/Biz/Bild/Deps/bitsandbytes.nix b/Biz/Bild/Deps/bitsandbytes.nix new file mode 100644 index 0000000..8d4fa53 --- /dev/null +++ b/Biz/Bild/Deps/bitsandbytes.nix @@ -0,0 +1,94 @@ +{ lib +, fetchgit +, buildPythonPackage +, pytorch +, setuptools +, typer +, linuxPackages +, pytest + +# CUDA +, cudaSupport ? true +, cudaPackages ? {} +, addOpenGLRunpath ? null +, gcc +}: + +let + inherit (linuxPackages) nvidia_x11; + inherit (cudaPackages) cudatoolkit; + cudaVersion = + lib.strings.removeSuffix "0" + (lib.strings.concatStrings + (lib.strings.splitString "." cudatoolkit.version)); + libraryPath = lib.strings.makeLibraryPath [ + cudatoolkit + cudatoolkit.lib + nvidia_x11 + ]; +in buildPythonPackage rec { + pname = "bitsandbytes"; + version = "unstable-2022-12-21"; + + src = fetchgit { + url = "https://simatime.com/git/ben/bitsandbytes.git"; + rev = "31ef751bea48eeee2e0e95aca79df8e59b4c25c4"; + sha256 = "sha256-/a2NFVuMSvSDELeXyfDdU9FZEJcDxCLa4VbMWBUCRI0="; + }; + + # any exe must be wrapped with this, it doesn't effect the build but it does + # show up on the logs so keep it here for documentation purposes + LD_LIBRARY_PATH = libraryPath; + + # this allows the build system to find nvcc + CUDA_HOME = "${cudatoolkit}"; + + preBuild = '' + make cuda11x CUDA_VERSION=${cudaVersion} GPP=${gcc}/bin/g++ -j3 + ## this python doesn't know where to get libcuda, so explicitly tell it + ## this is probably really bad practice, fix this + substituteInPlace bitsandbytes/cuda_setup/main.py \ + --replace "libcuda.so" "${nvidia_x11}/lib/libcuda.so" + ''; + + propagatedBuildInputs = [ + (pytorch.override({ inherit cudaSupport;})) + setuptools + ] ++ lib.optionals cudaSupport [ + typer + cudatoolkit + cudatoolkit.lib + nvidia_x11 + ]; + + nativeBuildInputs = lib.optionals cudaSupport [ + gcc + addOpenGLRunpath + ]; + + preFixup = lib.optionalString cudaSupport '' + find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib + do + addOpenGLRunpath "$lib" + patchelf \ + --set-rpath "${libraryPath}" \ + "$lib" + done + ''; + + checkInputs = [ + pytest + ]; + + # disabled because the test suite cannot find any GPUs in the nix sandbox + doCheck = false; + + pythonImportsCheck = [ + "torch" + # this tries to load and verify the cuda device on import, since this runs + # in the nix sandbox it will fail with a bunch of errors, but curiously the + # build still succeeds... + "bitsandbytes" + ]; +} + -- cgit v1.2.3