summaryrefslogtreecommitdiff
path: root/bitsandbytes/cuda_setup
diff options
context:
space:
mode:
Diffstat (limited to 'bitsandbytes/cuda_setup')
-rw-r--r--bitsandbytes/cuda_setup/__init__.py0
-rw-r--r--bitsandbytes/cuda_setup/compute_capability.py79
-rw-r--r--bitsandbytes/cuda_setup/env_vars.py51
-rw-r--r--bitsandbytes/cuda_setup/main.py127
-rw-r--r--bitsandbytes/cuda_setup/paths.py126
5 files changed, 383 insertions, 0 deletions
diff --git a/bitsandbytes/cuda_setup/__init__.py b/bitsandbytes/cuda_setup/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/bitsandbytes/cuda_setup/__init__.py
diff --git a/bitsandbytes/cuda_setup/compute_capability.py b/bitsandbytes/cuda_setup/compute_capability.py
new file mode 100644
index 0000000..7a3f463
--- /dev/null
+++ b/bitsandbytes/cuda_setup/compute_capability.py
@@ -0,0 +1,79 @@
+import ctypes
+from dataclasses import dataclass, field
+
+
+@dataclass
+class CudaLibVals:
+ # code bits taken from
+ # https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
+
+ nGpus: ctypes.c_int = field(default=ctypes.c_int())
+ cc_major: ctypes.c_int = field(default=ctypes.c_int())
+ cc_minor: ctypes.c_int = field(default=ctypes.c_int())
+ device: ctypes.c_int = field(default=ctypes.c_int())
+ error_str: ctypes.c_char_p = field(default=ctypes.c_char_p())
+ cuda: ctypes.CDLL = field(init=False, repr=False)
+ ccs: List[str, ...] = field(init=False)
+
+ def _initialize_driver_API(self):
+ self.check_cuda_result(self.cuda.cuInit(0))
+
+ def _load_cuda_lib(self):
+ """
+ 1. find libcuda.so library (GPU driver) (/usr/lib)
+ init_device -> init variables -> call function by reference
+ """
+ libnames = "libcuda.so"
+ for libname in libnames:
+ try:
+ self.cuda = ctypes.CDLL(libname)
+ except OSError:
+ continue
+ else:
+ break
+ else:
+ raise OSError("could not load any of: " + " ".join(libnames))
+
+ def call_cuda_func(self, function_obj, **kwargs):
+ CUDA_SUCCESS = 0 # constant taken from cuda.h
+ pass
+ # if (CUDA_SUCCESS := function_obj(
+
+ def _error_handle(cuda_lib_call_return_value):
+ """
+ 2. call extern C function to determine CC
+ (see https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
+ """
+ CUDA_SUCCESS = 0 # constant taken from cuda.h
+
+ if cuda_lib_call_return_value != CUDA_SUCCESS:
+ self.cuda.cuGetErrorString(
+ cuda_lib_call_return_value,
+ ctypes.byref(self.error_str),
+ )
+ print("Count not initialize CUDA - failure!")
+ raise Exception("CUDA exception!")
+ return cuda_lib_call_return_value
+
+ def __post_init__(self):
+ self._load_cuda_lib()
+ self._initialize_driver_API()
+ self.check_cuda_result(
+ self.cuda, self.cuda.cuDeviceGetCount(ctypes.byref(self.nGpus))
+ )
+ tmp_ccs = []
+ for gpu_index in range(self.nGpus.value):
+ check_cuda_result(
+ self.cuda,
+ self.cuda.cuDeviceGet(ctypes.byref(self.device), gpu_index),
+ )
+ check_cuda_result(
+ self.cuda,
+ self.cuda.cuDeviceComputeCapability(
+ ctypes.byref(self.cc_major),
+ ctypes.byref(self.cc_minor),
+ self.device,
+ ),
+ )
+ tmp_ccs.append(f"{self.cc_major.value}.{self.cc_minor.value}")
+ self.ccs = sorted(tmp_ccs, reverse=True)
diff --git a/bitsandbytes/cuda_setup/env_vars.py b/bitsandbytes/cuda_setup/env_vars.py
new file mode 100644
index 0000000..536a7d8
--- /dev/null
+++ b/bitsandbytes/cuda_setup/env_vars.py
@@ -0,0 +1,51 @@
+import os
+from typing import Dict
+
+
+def to_be_ignored(env_var: str, value: str) -> bool:
+ ignorable = {
+ "PWD", # PWD: this is how the shell keeps track of the current working dir
+ "OLDPWD",
+ "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
+ "SSH_TTY",
+ "HOME", # Linux shell default
+ "TMUX", # Terminal Multiplexer
+ "XDG_DATA_DIRS", # XDG: Desktop environment stuff
+ "XDG_RUNTIME_DIR",
+ "MAIL", # something related to emails
+ "SHELL", # binary for currently invoked shell
+ "DBUS_SESSION_BUS_ADDRESS", # hardware related
+ "PATH", # this is for finding binaries, not libraries
+ "LESSOPEN", # related to the `less` command
+ "LESSCLOSE",
+ "_", # current Python interpreter
+ }
+ return env_var in ignorable
+
+
+def might_contain_a_path(candidate: str) -> bool:
+ return "/" in candidate
+
+
+def is_active_conda_env(env_var: str) -> bool:
+ return "CONDA_PREFIX" == env_var
+
+
+def is_other_conda_env_var(env_var: str) -> bool:
+ return "CONDA" in env_var
+
+
+def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
+ return is_active_conda_env(env_var) or (
+ might_contain_a_path(value) and not
+ is_other_conda_env_var(env_var) and not
+ to_be_ignored(env_var, value)
+ )
+
+
+def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]:
+ return {
+ env_var: value
+ for env_var, value in os.environ.items()
+ if is_relevant_candidate_env_var(env_var, value)
+ }
diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py
new file mode 100644
index 0000000..e96ac70
--- /dev/null
+++ b/bitsandbytes/cuda_setup/main.py
@@ -0,0 +1,127 @@
+"""
+extract factors the build is dependent on:
+[X] compute capability
+ [ ] TODO: Q - What if we have multiple GPUs of different makes?
+- CUDA version
+- Software:
+ - CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
+ - CuBLAS-LT: full-build 8-bit optimizer
+ - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
+
+evaluation:
+ - if paths faulty, return meaningful error
+ - else:
+ - determine CUDA version
+ - determine capabilities
+ - based on that set the default path
+"""
+
+import ctypes
+from pathlib import Path
+
+from ..utils import execute_and_return
+from .paths import determine_cuda_runtime_lib_path
+
+
+def check_cuda_result(cuda, result_val):
+ # 3. Check for CUDA errors
+ if result_val != 0:
+ error_str = ctypes.c_char_p()
+ cuda.cuGetErrorString(result_val, ctypes.byref(error_str))
+ raise Exception(f"CUDA exception! ERROR: {error_str}")
+
+
+def get_compute_capabilities():
+ """
+ 1. find libcuda.so library (GPU driver) (/usr/lib)
+ init_device -> init variables -> call function by reference
+ 2. call extern C function to determine CC
+ (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
+ 3. Check for CUDA errors
+ https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
+ # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
+ """
+
+ # 1. find libcuda.so library (GPU driver) (/usr/lib)
+ try:
+ cuda = ctypes.CDLL("libcuda.so")
+ except OSError:
+ # TODO: shouldn't we error or at least warn here?
+ return None
+
+ nGpus = ctypes.c_int()
+ cc_major = ctypes.c_int()
+ cc_minor = ctypes.c_int()
+
+ result = ctypes.c_int()
+ device = ctypes.c_int()
+
+ check_cuda_result(cuda, cuda.cuInit(0))
+
+ check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))
+ ccs = []
+ for i in range(nGpus.value):
+ check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i))
+ ref_major = ctypes.byref(cc_major)
+ ref_minor = ctypes.byref(cc_minor)
+ # 2. call extern C function to determine CC
+ check_cuda_result(
+ cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)
+ )
+ ccs.append(f"{cc_major.value}.{cc_minor.value}")
+
+ return ccs.sort()
+
+
+# def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
+def get_compute_capability():
+ """
+ Extracts the highest compute capbility from all available GPUs, as compute
+ capabilities are downwards compatible. If no GPUs are detected, it returns
+ None.
+ """
+ if ccs := get_compute_capabilities() is not None:
+ # TODO: handle different compute capabilities; for now, take the max
+ return ccs[-1]
+ return None
+
+
+def evaluate_cuda_setup():
+ cuda_path = determine_cuda_runtime_lib_path()
+ print(f"CUDA SETUP: CUDA path found: {cuda_path}")
+ cc = get_compute_capability()
+ binary_name = "libbitsandbytes_cpu.so"
+
+ # FIXME: has_gpu is still unused
+ if not (has_gpu := bool(cc)):
+ print(
+ "WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..."
+ )
+ return binary_name
+
+ # 7.5 is the minimum CC vor cublaslt
+ has_cublaslt = cc in ["7.5", "8.0", "8.6"]
+
+ # TODO:
+ # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
+ # (2) Multiple CUDA versions installed
+
+ # FIXME: cuda_home is still unused
+ cuda_home = str(Path(cuda_path).parent.parent)
+ # we use ls -l instead of nvcc to determine the cuda version
+ # since most installations will have the libcudart.so installed, but not the compiler
+ ls_output, err = execute_and_return(f"ls -l {cuda_path}")
+ major, minor, revision = (
+ ls_output.split(" ")[-1].replace("libcudart.so.", "").split(".")
+ )
+ cuda_version_string = f"{major}{minor}"
+
+ def get_binary_name():
+ "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
+ bin_base_name = "libbitsandbytes_cuda"
+ if has_cublaslt:
+ return f"{bin_base_name}{cuda_version_string}.so"
+ else:
+ return f"{bin_base_name}_nocublaslt.so"
+
+ return binary_name
diff --git a/bitsandbytes/cuda_setup/paths.py b/bitsandbytes/cuda_setup/paths.py
new file mode 100644
index 0000000..c4a7465
--- /dev/null
+++ b/bitsandbytes/cuda_setup/paths.py
@@ -0,0 +1,126 @@
+from pathlib import Path
+from typing import Set, Union
+from warnings import warn
+
+from ..utils import print_stderr
+from .env_vars import get_potentially_lib_path_containing_env_vars
+
+
+CUDA_RUNTIME_LIB: str = "libcudart.so"
+
+
+def purge_unwanted_semicolon(tentative_path: Path) -> Path:
+ """
+ Special function to handle the following exception:
+ __LMOD_REF_COUNT_PATH=/sw/cuda/11.6.2/bin:2;/mmfs1/home/dettmers/git/sched/bin:1;/mmfs1/home/dettmers/data/anaconda3/bin:1;/mmfs1/home/dettmers/data/anaconda3/condabin:1;/mmfs1/home/dettmers/.local/bin:1;/mmfs1/home/dettmers/bin:1;/usr/local/bin:1;/usr/bin:1;/usr/local/sbin:1;/usr/sbin:1;/mmfs1/home/dettmers/.fzf/bin:1;/mmfs1/home/dettmers/data/local/cuda-11.4/bin:1
+ """
+ # if ';' in str(tentative_path):
+ # path_as_str, _ = str(tentative_path).split(';')
+ pass
+
+
+def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
+ return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
+
+
+def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
+ non_existent_directories: Set[Path] = {
+ path for path in candidate_paths if not path.exists()
+ }
+
+ if non_existent_directories:
+ print_stderr(
+ "WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}"
+ )
+
+ return candidate_paths - non_existent_directories
+
+
+def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
+ return {
+ path / CUDA_RUNTIME_LIB
+ for path in candidate_paths
+ if (path / CUDA_RUNTIME_LIB).is_file()
+ }
+
+
+def resolve_paths_list(paths_list_candidate: str) -> Set[Path]:
+ """
+ Searches a given environmental var for the CUDA runtime library,
+ i.e. `libcudart.so`.
+ """
+ return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate))
+
+
+def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]:
+ return get_cuda_runtime_lib_paths(
+ resolve_paths_list(paths_list_candidate)
+ )
+
+
+def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
+ if len(results_paths) > 1:
+ warning_msg = (
+ f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
+ "We'll flip a coin and try one of these, in order to fail forward.\n"
+ "Either way, this might cause trouble in the future:\n"
+ "If you get `CUDA error: invalid device function` errors, the above "
+ "might be the cause and the solution is to make sure only one "
+ f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env."
+ )
+ warn(warning_msg)
+
+
+def determine_cuda_runtime_lib_path() -> Union[Path, None]:
+ """
+ Searches for a cuda installations, in the following order of priority:
+ 1. active conda env
+ 2. LD_LIBRARY_PATH
+ 3. any other env vars, while ignoring those that
+ - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
+ - don't contain the path separator `/`
+
+ If multiple libraries are found in part 3, we optimistically try one,
+ while giving a warning message.
+ """
+ candidate_env_vars = get_potentially_lib_path_containing_env_vars()
+
+ if "CONDA_PREFIX" in candidate_env_vars:
+ conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
+
+ conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
+ warn_in_case_of_duplicates(conda_cuda_libs)
+
+ if conda_cuda_libs:
+ return next(iter(conda_cuda_libs))
+
+ warn(
+ f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
+ f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...'
+ )
+
+ if "LD_LIBRARY_PATH" in candidate_env_vars:
+ lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
+
+ if lib_ld_cuda_libs:
+ return next(iter(lib_ld_cuda_libs))
+ warn_in_case_of_duplicates(lib_ld_cuda_libs)
+
+ warn(
+ f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain '
+ f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...'
+ )
+
+ remaining_candidate_env_vars = {
+ env_var: value for env_var, value in candidate_env_vars.items()
+ if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
+ }
+
+ cuda_runtime_libs = set()
+ for env_var, value in remaining_candidate_env_vars:
+ cuda_runtime_libs.update(find_cuda_lib_in(value))
+
+ warn_in_case_of_duplicates(cuda_runtime_libs)
+
+ return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else set()