diff options
author | Tim Dettmers <tim.dettmers@gmail.com> | 2022-08-01 19:43:09 -0700 |
---|---|---|
committer | Tim Dettmers <tim.dettmers@gmail.com> | 2022-08-01 19:43:09 -0700 |
commit | 3479d02a769c1cbd6679caaf5695f382b558e36b (patch) | |
tree | fcaa7aa4e7a11d3fa2e61390acacdd6627fa0e31 | |
parent | 8bf3e9faab6dfb04d676a5ea413530cdee09744c (diff) |
Added some more docs and comments.
-rw-r--r-- | bitsandbytes/cuda_setup.py | 59 | ||||
-rw-r--r-- | tests/test_cuda_setup_evaluator.py | 3 |
2 files changed, 36 insertions, 26 deletions
diff --git a/bitsandbytes/cuda_setup.py b/bitsandbytes/cuda_setup.py index 59e90e4..95f90d4 100644 --- a/bitsandbytes/cuda_setup.py +++ b/bitsandbytes/cuda_setup.py @@ -27,17 +27,24 @@ from .utils import print_err, warn_of_missing_prerequisite, execute_and_return def check_cuda_result(cuda, result_val): + # 3. Check for CUDA errors if result_val != 0: - # TODO: undefined name 'error_str' + error_str = ctypes.c_char_p() cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) - print("Count not initialize CUDA - failure!") - raise Exception("CUDA exception!") - return result_val + raise Exception(f"CUDA exception! ERROR: {error_str}") # taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549 def get_compute_capability(): - libnames = ("libcuda.so", "libcuda.dylib", "cuda.dll") + # 1. find libcuda.so library (GPU driver) (/usr/lib) + # init_device -> init variables -> call function by reference + # 2. call extern C function to determine CC + # (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html) + # 3. Check for CUDA errors + # https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api + + # 1. find libcuda.so library (GPU driver) (/usr/lib) + libnames = ("libcuda.so",) for libname in libnames: try: cuda = ctypes.CDLL(libname) @@ -54,31 +61,23 @@ def get_compute_capability(): result = ctypes.c_int() device = ctypes.c_int() - # TODO: local variable 'context' is assigned to but never used - context = ctypes.c_void_p() - # TODO: local variable 'error_str' is assigned to but never used - error_str = ctypes.c_char_p() - result = check_cuda_result(cuda, cuda.cuInit(0)) + check_cuda_result(cuda, cuda.cuInit(0)) - result = check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) + check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) ccs = [] for i in range(nGpus.value): - result = check_cuda_result( - cuda, cuda.cuDeviceGet(ctypes.byref(device), i) - ) - result = check_cuda_result( - cuda, - cuda.cuDeviceComputeCapability( - ctypes.byref(cc_major), ctypes.byref(cc_minor), device - ), - ) + check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i)) + ref_major = ctypes(cc_major) + ref_minor = ctypes(cc_minor) + # 2. call extern C function to determine CC + check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)) ccs.append(f"{cc_major.value}.{cc_minor.value}") # TODO: handle different compute capabilities; for now, take the max ccs.sort() - # return ccs[-1] - return ccs + max_cc = ccs[-1] + return max_cc CUDA_RUNTIME_LIB: str = "libcudart.so" @@ -89,6 +88,7 @@ def tokenize_paths(paths: str) -> Set[Path]: def resolve_env_variable(env_var): + '''Searches a given envirionmental library or path for the CUDA runtime library (libcudart.so)''' paths: Set[Path] = tokenize_paths(env_var) non_existent_directories: Set[Path] = { @@ -112,13 +112,16 @@ def resolve_env_variable(env_var): f"Found duplicate {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.." ) raise FileNotFoundError(err_msg) - elif len(cuda_runtime_libs) == 0: return None + elif len(cuda_runtime_libs) == 0: return None # this is not en error, since other envs can contain CUDA else: return next(iter(cuda_runtime_libs)) # for now just return the first def get_cuda_runtime_lib_path() -> Union[Path, None]: - """# TODO: add doc-string""" + '''Searches conda installation and environmental paths for a cuda installations.''' cuda_runtime_libs = [] + # CONDA_PREFIX/lib is the default location for a default conda + # install of pytorch. This location takes priortiy over all + # other defined variables if 'CONDA_PREFIX' in os.environ: lib_conda_path = f'{os.environ["CONDA_PREFIX"]}/lib/' print(lib_conda_path) @@ -126,6 +129,8 @@ def get_cuda_runtime_lib_path() -> Union[Path, None]: if len(cuda_runtime_libs) == 1: return cuda_runtime_libs[0] + # if CONDA_PREFIX does not have the library, search the environment + # (in particualr LD_LIBRARY PATH) for var in os.environ: cuda_runtime_libs.append(resolve_env_variable(var)) @@ -146,17 +151,19 @@ def evaluate_cuda_setup(): if not (has_gpu := bool(cc)): print( - "WARNING: No GPU detected! Check our CUDA paths. Processing to load CPU-only library..." + "WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..." ) return binary_name has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: - # (1) Model missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) + # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed cuda_home = str(Path(cuda_path).parent.parent) + # we use ls -l instead of nvcc to determine the cuda version + # since most installations will have the libcudart.so installed, but not the compiler ls_output, err = execute_and_return(f"ls -l {cuda_path}") major, minor, revision = ls_output.split(' ')[-1].replace('libcudart.so.', '').split('.') cuda_version_string = f"{major}{minor}" diff --git a/tests/test_cuda_setup_evaluator.py b/tests/test_cuda_setup_evaluator.py index 5da190d..119e21a 100644 --- a/tests/test_cuda_setup_evaluator.py +++ b/tests/test_cuda_setup_evaluator.py @@ -92,6 +92,9 @@ def test_get_cuda_runtime_lib_path__non_existent_dir(capsys, tmp_path): def test_full_system(): ## this only tests the cuda version and not compute capability + + # if CONDA_PREFIX exists, it has priority before all other env variables + # but it does not contain the library directly, so we need to look at the a sub-folder version = '' if 'CONDA_PREFIX' in os.environ: ls_output, err = bnb.utils.execute_and_return(f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so') |