diff options
author | Titus von Koeller <titus@vonkoeller.com> | 2022-08-02 21:26:50 -0700 |
---|---|---|
committer | Titus von Koeller <titus@vonkoeller.com> | 2022-08-02 21:26:50 -0700 |
commit | 59a615b3869eb8488a748e2aa51224a5e3d366bb (patch) | |
tree | 5f348d63ba837d08bbc5df703a748c0ae6e34ddd | |
parent | 3809236428e704f9a7e22232701a651aafa5ca1b (diff) |
factored cuda_setup.main out into smaller modules and functions
-rw-r--r-- | bitsandbytes/__init__.py | 2 | ||||
-rw-r--r-- | bitsandbytes/__main__.py | 97 | ||||
-rw-r--r-- | bitsandbytes/cextension.py | 21 | ||||
-rw-r--r-- | bitsandbytes/cuda_setup/env_vars.py | 51 | ||||
-rw-r--r-- | bitsandbytes/cuda_setup/main.py | 142 | ||||
-rw-r--r-- | bitsandbytes/cuda_setup/paths.py | 126 | ||||
-rw-r--r-- | bitsandbytes/utils.py | 8 | ||||
-rw-r--r-- | quicktest.py | 112 | ||||
-rw-r--r-- | tests/test_cuda_setup_evaluator.py | 67 |
9 files changed, 385 insertions, 241 deletions
diff --git a/bitsandbytes/__init__.py b/bitsandbytes/__init__.py index 76a5b48..7901f96 100644 --- a/bitsandbytes/__init__.py +++ b/bitsandbytes/__init__.py @@ -22,3 +22,5 @@ __pdoc__ = { "optim.optimizer.Optimizer8bit": False, "optim.optimizer.MockArgs": False, } + +PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes" diff --git a/bitsandbytes/__main__.py b/bitsandbytes/__main__.py index a91e942..7f3d24c 100644 --- a/bitsandbytes/__main__.py +++ b/bitsandbytes/__main__.py @@ -1,3 +1,96 @@ -from bitsandbytes.debug_cli import cli +# from bitsandbytes.debug_cli import cli -cli() +# cli() +import os +import sys +import torch + + +HEADER_WIDTH = 60 + + +def print_header( + txt: str, width: int = HEADER_WIDTH, filler: str = "+" +) -> None: + txt = f" {txt} " if txt else "" + print(txt.center(width, filler)) + + +def print_debug_info() -> None: + print( + "\nAbove we output some debug information. Please provide this info when " + f"creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose ...\n" + ) + + +print_header("") +print_header("DEBUG INFORMATION") +print_header("") +print() + + +from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL +from .cuda_setup.main import get_compute_capabilities +from .cuda_setup.env_vars import to_be_ignored +from .utils import print_stderr + + +print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS") +for k, v in os.environ.items(): + if "/" in v and not to_be_ignored(k, v): + print(f"'{k}': '{v}'") +print_header("") + +print( + "\nWARNING: Please be sure to sanitize sensible info from any such env vars!\n" +) + +print_header("OTHER") +print(f"{COMPILED_WITH_CUDA = }") +print(f"COMPUTE_CAPABILITIES_PER_GPU = {get_compute_capabilities()}") +print_header("") +print_header("DEBUG INFO END") +print_header("") +print( + """ +Running a quick check that: + + library is importable + + CUDA function is callable +""" +) + +try: + from bitsandbytes.optim import Adam + + p = torch.nn.Parameter(torch.rand(10, 10).cuda()) + a = torch.rand(10, 10).cuda() + + p1 = p.data.sum().item() + + adam = Adam([p]) + + out = a * p + loss = out.sum() + loss.backward() + adam.step() + + p2 = p.data.sum().item() + + assert p1 != p2 + print("SUCCESS!") + print("Installation was successful!") + sys.exit(0) + +except ImportError: + print() + print_stderr( + f"WARNING: {__package__} is currently running as CPU-only!\n" + "Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n" + f"If you think that this is so erroneously,\nplease report an issue!" + ) + print_debug_info() + sys.exit(0) +except Exception as e: + print(e) + print_debug_info() + sys.exit(1) diff --git a/bitsandbytes/cextension.py b/bitsandbytes/cextension.py index f5b97fd..66c79d8 100644 --- a/bitsandbytes/cextension.py +++ b/bitsandbytes/cextension.py @@ -1,8 +1,8 @@ import ctypes as ct -import os +from pathlib import Path from warnings import warn -from bitsandbytes.cuda_setup.main import evaluate_cuda_setup +from .cuda_setup.main import evaluate_cuda_setup class CUDALibrary_Singleton(object): @@ -12,18 +12,17 @@ class CUDALibrary_Singleton(object): raise RuntimeError("Call get_instance() instead") def initialize(self): - self.context = {} binary_name = evaluate_cuda_setup() - if not os.path.exists(os.path.dirname(__file__) + f"/{binary_name}"): + package_dir = Path(__file__).parent + binary_path = package_dir / binary_name + + if not binary_path.exists(): print(f"TODO: compile library for specific version: {binary_name}") - print("defaulting to libbitsandbytes.so") - self.lib = ct.cdll.LoadLibrary( - os.path.dirname(__file__) + "/libbitsandbytes.so" - ) + legacy_binary_name = "libbitsandbytes.so" + print(f"Defaulting to {legacy_binary_name}...") + self.lib = ct.cdll.LoadLibrary(package_dir / legacy_binary_name) else: - self.lib = ct.cdll.LoadLibrary( - os.path.dirname(__file__) + f"/{binary_name}" - ) + self.lib = ct.cdll.LoadLibrary(package_dir / binary_name) @classmethod def get_instance(cls): diff --git a/bitsandbytes/cuda_setup/env_vars.py b/bitsandbytes/cuda_setup/env_vars.py new file mode 100644 index 0000000..536a7d8 --- /dev/null +++ b/bitsandbytes/cuda_setup/env_vars.py @@ -0,0 +1,51 @@ +import os +from typing import Dict + + +def to_be_ignored(env_var: str, value: str) -> bool: + ignorable = { + "PWD", # PWD: this is how the shell keeps track of the current working dir + "OLDPWD", + "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated + "SSH_TTY", + "HOME", # Linux shell default + "TMUX", # Terminal Multiplexer + "XDG_DATA_DIRS", # XDG: Desktop environment stuff + "XDG_RUNTIME_DIR", + "MAIL", # something related to emails + "SHELL", # binary for currently invoked shell + "DBUS_SESSION_BUS_ADDRESS", # hardware related + "PATH", # this is for finding binaries, not libraries + "LESSOPEN", # related to the `less` command + "LESSCLOSE", + "_", # current Python interpreter + } + return env_var in ignorable + + +def might_contain_a_path(candidate: str) -> bool: + return "/" in candidate + + +def is_active_conda_env(env_var: str) -> bool: + return "CONDA_PREFIX" == env_var + + +def is_other_conda_env_var(env_var: str) -> bool: + return "CONDA" in env_var + + +def is_relevant_candidate_env_var(env_var: str, value: str) -> bool: + return is_active_conda_env(env_var) or ( + might_contain_a_path(value) and not + is_other_conda_env_var(env_var) and not + to_be_ignored(env_var, value) + ) + + +def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]: + return { + env_var: value + for env_var, value in os.environ.items() + if is_relevant_candidate_env_var(env_var, value) + } diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py index 6d70c92..e96ac70 100644 --- a/bitsandbytes/cuda_setup/main.py +++ b/bitsandbytes/cuda_setup/main.py @@ -8,8 +8,6 @@ extract factors the build is dependent on: - CuBLAS-LT: full-build 8-bit optimizer - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`) -alle Binaries packagen - evaluation: - if paths faulty, return meaningful error - else: @@ -19,11 +17,10 @@ evaluation: """ import ctypes -import os from pathlib import Path -from typing import Set, Union -from ..utils import print_err, warn_of_missing_prerequisite, execute_and_return +from ..utils import execute_and_return +from .paths import determine_cuda_runtime_lib_path def check_cuda_result(cuda, result_val): @@ -34,26 +31,23 @@ def check_cuda_result(cuda, result_val): raise Exception(f"CUDA exception! ERROR: {error_str}") -# taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549 -def get_compute_capability(): - # 1. find libcuda.so library (GPU driver) (/usr/lib) - # init_device -> init variables -> call function by reference - # 2. call extern C function to determine CC - # (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html) - # 3. Check for CUDA errors - # https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api +def get_compute_capabilities(): + """ + 1. find libcuda.so library (GPU driver) (/usr/lib) + init_device -> init variables -> call function by reference + 2. call extern C function to determine CC + (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html) + 3. Check for CUDA errors + https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api + # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549 + """ # 1. find libcuda.so library (GPU driver) (/usr/lib) - libnames = ("libcuda.so",) - for libname in libnames: - try: - cuda = ctypes.CDLL(libname) - except OSError: - continue - else: - break - else: - raise OSError("could not load any of: " + " ".join(libnames)) + try: + cuda = ctypes.CDLL("libcuda.so") + except OSError: + # TODO: shouldn't we error or at least warn here? + return None nGpus = ctypes.c_int() cc_major = ctypes.c_int() @@ -70,104 +64,64 @@ def get_compute_capability(): check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i)) ref_major = ctypes.byref(cc_major) ref_minor = ctypes.byref(cc_minor) - # 2. call extern C function to determine CC - check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)) - ccs.append(f"{cc_major.value}.{cc_minor.value}") - - # TODO: handle different compute capabilities; for now, take the max - ccs.sort() - max_cc = ccs[-1] - return max_cc - - -CUDA_RUNTIME_LIB: str = "libcudart.so" - - -def tokenize_paths(paths: str) -> Set[Path]: - return {Path(ld_path) for ld_path in paths.split(":") if ld_path} - - -def resolve_env_variable(env_var): - '''Searches a given envirionmental library or path for the CUDA runtime library (libcudart.so)''' - paths: Set[Path] = tokenize_paths(env_var) - - non_existent_directories: Set[Path] = { - path for path in paths if not path.exists() - } - - if non_existent_directories: - print_err( - "WARNING: The following directories listed your path were found to " - f"be non-existent: {non_existent_directories}" + # 2. call extern C function to determine CC + check_cuda_result( + cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device) ) + ccs.append(f"{cc_major.value}.{cc_minor.value}") - cuda_runtime_libs: Set[Path] = { - path / CUDA_RUNTIME_LIB - for path in paths - if (path / CUDA_RUNTIME_LIB).is_file() - } - non_existent_directories + return ccs.sort() - if len(cuda_runtime_libs) > 1: - err_msg = ( - f"Found duplicate {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.." - ) - raise FileNotFoundError(err_msg) - elif len(cuda_runtime_libs) == 0: return None # this is not en error, since other envs can contain CUDA - else: return next(iter(cuda_runtime_libs)) # for now just return the first - -def get_cuda_runtime_lib_path() -> Union[Path, None]: - '''Searches conda installation and environmental paths for a cuda installations.''' - - cuda_runtime_libs = [] - # CONDA_PREFIX/lib is the default location for a default conda - # install of pytorch. This location takes priortiy over all - # other defined variables - if 'CONDA_PREFIX' in os.environ: - lib_conda_path = f'{os.environ["CONDA_PREFIX"]}/lib/' - print(lib_conda_path) - cuda_runtime_libs.append(resolve_env_variable(lib_conda_path)) - - if len(cuda_runtime_libs) == 1: return cuda_runtime_libs[0] - - # if CONDA_PREFIX does not have the library, search the environment - # (in particualr LD_LIBRARY PATH) - for var in os.environ: - cuda_runtime_libs.append(resolve_env_variable(var)) - - if len(cuda_runtime_libs) < 1: - err_msg = ( - f"Did not find {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.." - ) - raise FileNotFoundError(err_msg) - return cuda_runtime_libs.pop() +# def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error +def get_compute_capability(): + """ + Extracts the highest compute capbility from all available GPUs, as compute + capabilities are downwards compatible. If no GPUs are detected, it returns + None. + """ + if ccs := get_compute_capabilities() is not None: + # TODO: handle different compute capabilities; for now, take the max + return ccs[-1] + return None def evaluate_cuda_setup(): - cuda_path = get_cuda_runtime_lib_path() - print(f'CUDA SETUP: CUDA path found: {cuda_path}') + cuda_path = determine_cuda_runtime_lib_path() + print(f"CUDA SETUP: CUDA path found: {cuda_path}") cc = get_compute_capability() binary_name = "libbitsandbytes_cpu.so" + # FIXME: has_gpu is still unused if not (has_gpu := bool(cc)): print( "WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..." ) return binary_name + # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed + # FIXME: cuda_home is still unused cuda_home = str(Path(cuda_path).parent.parent) # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler ls_output, err = execute_and_return(f"ls -l {cuda_path}") - major, minor, revision = ls_output.split(' ')[-1].replace('libcudart.so.', '').split('.') + major, minor, revision = ( + ls_output.split(" ")[-1].replace("libcudart.so.", "").split(".") + ) cuda_version_string = f"{major}{minor}" - binary_name = f'libbitsandbytes_cuda{cuda_version_string}{("" if has_cublaslt else "_nocublaslt")}.so' + def get_binary_name(): + "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" + bin_base_name = "libbitsandbytes_cuda" + if has_cublaslt: + return f"{bin_base_name}{cuda_version_string}.so" + else: + return f"{bin_base_name}_nocublaslt.so" return binary_name diff --git a/bitsandbytes/cuda_setup/paths.py b/bitsandbytes/cuda_setup/paths.py new file mode 100644 index 0000000..c4a7465 --- /dev/null +++ b/bitsandbytes/cuda_setup/paths.py @@ -0,0 +1,126 @@ +from pathlib import Path +from typing import Set, Union +from warnings import warn + +from ..utils import print_stderr +from .env_vars import get_potentially_lib_path_containing_env_vars + + +CUDA_RUNTIME_LIB: str = "libcudart.so" + + +def purge_unwanted_semicolon(tentative_path: Path) -> Path: + """ + Special function to handle the following exception: + __LMOD_REF_COUNT_PATH=/sw/cuda/11.6.2/bin:2;/mmfs1/home/dettmers/git/sched/bin:1;/mmfs1/home/dettmers/data/anaconda3/bin:1;/mmfs1/home/dettmers/data/anaconda3/condabin:1;/mmfs1/home/dettmers/.local/bin:1;/mmfs1/home/dettmers/bin:1;/usr/local/bin:1;/usr/bin:1;/usr/local/sbin:1;/usr/sbin:1;/mmfs1/home/dettmers/.fzf/bin:1;/mmfs1/home/dettmers/data/local/cuda-11.4/bin:1 + """ + # if ';' in str(tentative_path): + # path_as_str, _ = str(tentative_path).split(';') + pass + + +def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + + +def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]: + non_existent_directories: Set[Path] = { + path for path in candidate_paths if not path.exists() + } + + if non_existent_directories: + print_stderr( + "WARNING: The following directories listed in your path were found to " + f"be non-existent: {non_existent_directories}" + ) + + return candidate_paths - non_existent_directories + + +def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + return { + path / CUDA_RUNTIME_LIB + for path in candidate_paths + if (path / CUDA_RUNTIME_LIB).is_file() + } + + +def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + + +def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + + +def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None: + if len(results_paths) > 1: + warning_msg = ( + f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. " + "We'll flip a coin and try one of these, in order to fail forward.\n" + "Either way, this might cause trouble in the future:\n" + "If you get `CUDA error: invalid device function` errors, the above " + "might be the cause and the solution is to make sure only one " + f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env." + ) + warn(warning_msg) + + +def determine_cuda_runtime_lib_path() -> Union[Path, None]: + """ + Searches for a cuda installations, in the following order of priority: + 1. active conda env + 2. LD_LIBRARY_PATH + 3. any other env vars, while ignoring those that + - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`) + - don't contain the path separator `/` + + If multiple libraries are found in part 3, we optimistically try one, + while giving a warning message. + """ + candidate_env_vars = get_potentially_lib_path_containing_env_vars() + + if "CONDA_PREFIX" in candidate_env_vars: + conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib" + + conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path)) + warn_in_case_of_duplicates(conda_cuda_libs) + + if conda_cuda_libs: + return next(iter(conda_cuda_libs)) + + warn( + f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' + f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...' + ) + + if "LD_LIBRARY_PATH" in candidate_env_vars: + lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"]) + + if lib_ld_cuda_libs: + return next(iter(lib_ld_cuda_libs)) + warn_in_case_of_duplicates(lib_ld_cuda_libs) + + warn( + f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain ' + f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...' + ) + + remaining_candidate_env_vars = { + env_var: value for env_var, value in candidate_env_vars.items() + if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"} + } + + cuda_runtime_libs = set() + for env_var, value in remaining_candidate_env_vars: + cuda_runtime_libs.update(find_cuda_lib_in(value)) + + warn_in_case_of_duplicates(cuda_runtime_libs) + + return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else set() diff --git a/bitsandbytes/utils.py b/bitsandbytes/utils.py index e1d9460..4256a87 100644 --- a/bitsandbytes/utils.py +++ b/bitsandbytes/utils.py @@ -1,9 +1,9 @@ -import sys import shlex import subprocess - +import sys from typing import Tuple + def execute_and_return(command_string: str) -> Tuple[str, str]: def _decode(subprocess_err_out_tuple): return tuple( @@ -24,9 +24,9 @@ def execute_and_return(command_string: str) -> Tuple[str, str]: return std_out, std_err -def print_err(s: str) -> None: +def print_stderr(s: str) -> None: print(s, file=sys.stderr) def warn_of_missing_prerequisite(s: str) -> None: - print_err("WARNING, missing pre-requisite: " + s) + print_stderr("WARNING, missing pre-requisite: " + s) diff --git a/quicktest.py b/quicktest.py deleted file mode 100644 index 0fcda64..0000000 --- a/quicktest.py +++ /dev/null @@ -1,112 +0,0 @@ -from itertools import product - -import torch - -import bitsandbytes as bnb -import bitsandbytes.functional as F - - -def test_igemmlt(dim1, dim2, dim3, dim4, dims, ldb): - k = 25 - for i in range(k): - if dims == 2: - A = torch.randint(-128, 127, size=(dim1, dim3), device="cuda").to( - torch.int8 - ) - elif dims == 3: - A = torch.randint( - -128, 127, size=(dim1, dim2, dim3), device="cuda" - ).to(torch.int8) - B = torch.randint(-128, 127, size=(dim4, dim3), device="cuda").to( - torch.int8 - ) - C1 = torch.matmul(A.float(), B.t().float()) - - A2, SA = F.transform(A, "col32") - B2, SB = F.transform(B, "colx") - if dims == 2: - C2, SC = F.transform( - torch.zeros( - A.shape[0], B.shape[0], dtype=torch.int32, device="cuda" - ), - "col32", - ) - else: - C2, SC = F.transform( - torch.zeros( - A.shape[0], - A.shape[1], - B.shape[0], - dtype=torch.int32, - device="cuda", - ), - "col32", - ) - F.igemmlt(A2, B2, C2, SA, SB, SC) - C3, S = F.transform(C2, "row", state=SC) - # torch.testing.assert_allclose(C1, C3.float()) - # print(C1) - # print(C2) - # print(C3) - allclose = torch.allclose(C1, C3.float()) - if allclose: - print(C1) - print(C2) - print(C3) - - ## transposed - # A = torch.randint(-128, 127, size=(dim4, dim3), device='cuda').to(torch.int8) - # if dims == 2: - # B = torch.randint(-128, 127, size=(dim1, dim3), device='cuda').to(torch.int8) - # C1 = torch.matmul(A.float(), B.float().t()) - # elif dims == 3: - # B = torch.randint(-128, 127, size=(dim1, dim2, dim3), device='cuda').to(torch.int8) - # C1 = torch.matmul(B.float(), A.t().float()) - # C1 = C1.permute([2, 0, 1]) - - # A2, SA = F.transform(A, 'col32') - # B2, SB = F.transform(B, 'colx') - # if dims == 2: - # C2, SC = F.transform(torch.zeros(A.shape[0], B.shape[0], dtype=torch.int32, device='cuda'), 'col32') - # else: - # C2 = torch.zeros(A.shape[0], B.shape[0], B.shape[1], dtype=torch.int32, device='cuda') - # state = (C2.shape, 'row', A.shape[0]) - # C2, SC = F.transform(C2, 'col32', state=state) - # F.igemmlt(A2, B2, C2, SA, SB, SC) - # C3, S = F.transform(C2, 'row', state=SC, ld=[0]) - # torch.testing.assert_allclose(C1, C3.float()) - - ## weight update - # if dims == 3: - # A = torch.randint(-128, 127, size=(dim1, dim2, dim3), device='cuda').to(torch.int8) - # B = torch.randint(-128, 127, size=(dim1, dim2, dim4), device='cuda').to(torch.int8) - # C1 = torch.matmul(B.view(-1, B.shape[-1]).t().float(), A.view(-1, A.shape[-1]).float()) - - # A2, SA = F.transform(A.view(-1, A.shape[-1]).t().contiguous(), 'colx') - # B2, SB = F.transform(B.view(-1, B.shape[-1]).t().contiguous(), 'col32') - # C2 = torch.zeros(B.shape[-1], A.shape[-1], dtype=torch.int32, device='cuda') - # C2, SC = F.transform(C2, 'col32') - # F.igemmlt(B2, A2, C2, SB, SA, SC) - # C3, S = F.transform(C2, 'row', state=SC) - # torch.testing.assert_allclose(C1, C3.float()) - - -dims = (2, 3) -ldb = [0] - -n = 2 -dim1 = torch.randint(1, 256, size=(n,)).tolist() -dim2 = torch.randint(32, 512, size=(n,)).tolist() -dim3 = torch.randint(32, 1024, size=(n,)).tolist() -dim4 = torch.randint(32, 1024, size=(n,)).tolist() -values = list(product(dim1, dim2, dim3, dim4, dims, ldb)) - -for ldb in range(32, 4096, 32): - # for ldb in [None]: - val = test_igemmlt(2, 2, 2, 2, 2, ldb) - if val: - print(val, ldb) - else: - print("nope", ldb) -# for val in values: -# test_igemmlt(*val) diff --git a/tests/test_cuda_setup_evaluator.py b/tests/test_cuda_setup_evaluator.py index 119e21a..3d34c29 100644 --- a/tests/test_cuda_setup_evaluator.py +++ b/tests/test_cuda_setup_evaluator.py @@ -7,10 +7,38 @@ from typing import List, NamedTuple from bitsandbytes.cuda_setup import ( CUDA_RUNTIME_LIB, evaluate_cuda_setup, - get_cuda_runtime_lib_path, - tokenize_paths, + determine_cuda_runtime_lib_path, + extract_candidate_paths, ) +""" +'LD_LIBRARY_PATH': ':/mnt/D/titus/local/cuda-11.1/lib64/' +'CONDA_EXE': '/mnt/D/titus/miniconda/bin/conda' +'LESSCLOSE': '/usr/bin/lesspipe %s %s' +'OLDPWD': '/mnt/D/titus/src' +'CONDA_PREFIX': '/mnt/D/titus/miniconda/envs/8-bit' +'SSH_AUTH_SOCK': '/mnt/D/titus/.ssh/ssh-agent.tim-uw.sock' +'CONDA_PREFIX_1': '/mnt/D/titus/miniconda' +'PWD': '/mnt/D/titus/src/8-bit' +'HOME': '/mnt/D/titus' +'CONDA_PYTHON_EXE': '/mnt/D/titus/miniconda/bin/python' +'CUDA_HOME': '/mnt/D/titus/local/cuda-11.1/' +'TMUX': '/tmp/tmux-1007/default,59286,1' +'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop' +'SSH_TTY': '/dev/pts/0' +'MAIL': '/var/mail/titus' +'SHELL': '/bin/bash' +'DBUS_SESSION_BUS_ADDRESS': 'unix:path=/run/user/1007/bus' +'XDG_RUNTIME_DIR': '/run/user/1007' +'PATH': '/mnt/D/titus/miniconda/envs/8-bit/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/mnt/D/titus/local/cuda-11.1/bin' +'LESSOPEN': '| /usr/bin/lesspipe %s' +'_': '/mnt/D/titus/miniconda/envs/8-bit/bin/python' +# any that include 'CONDA' that are not 'CONDA_PREFIX' + +# we search for +'CUDA_HOME': '/mnt/D/titus/local/cuda-11.1/' +""" + class InputAndExpectedOutput(NamedTuple): input: str @@ -47,20 +75,20 @@ HAPPY_PATH__LD_LIB_TEST_PATHS: List[InputAndExpectedOutput] = [ @pytest.fixture(params=HAPPY_PATH__LD_LIB_TEST_PATHS) def happy_path_path_string(tmpdir, request): - for path in tokenize_paths(request.param): + for path in extract_candidate_paths(request.param): test_dir.mkdir() if CUDA_RUNTIME_LIB in path: (test_input / CUDA_RUNTIME_LIB).touch() @pytest.mark.parametrize("test_input, expected", HAPPY_PATH__LD_LIB_TEST_PATHS) -def test_get_cuda_runtime_lib_path__happy_path( +def test_determine_cuda_runtime_lib_path__happy_path( tmp_path, test_input: str, expected: str ): - for path in tokenize_paths(test_input): + for path in extract_candidate_paths(test_input): path.mkdir() (path / CUDA_RUNTIME_LIB).touch() - assert get_cuda_runtime_lib_path(test_input) == expected + assert determine_cuda_runtime_lib_path(test_input) == expected UNHAPPY_PATH__LD_LIB_TEST_PATHS = [ @@ -70,21 +98,21 @@ UNHAPPY_PATH__LD_LIB_TEST_PATHS = [ @pytest.mark.parametrize("test_input", UNHAPPY_PATH__LD_LIB_TEST_PATHS) -def test_get_cuda_runtime_lib_path__unhappy_path(tmp_path, test_input: str): +def test_determine_cuda_runtime_lib_path__unhappy_path(tmp_path, test_input: str): test_input = tmp_path / test_input (test_input / CUDA_RUNTIME_LIB).touch() with pytest.raises(FileNotFoundError) as err_info: - get_cuda_runtime_lib_path(test_input) + determine_cuda_runtime_lib_path(test_input) assert all(match in err_info for match in {"duplicate", CUDA_RUNTIME_LIB}) -def test_get_cuda_runtime_lib_path__non_existent_dir(capsys, tmp_path): +def test_determine_cuda_runtime_lib_path__non_existent_dir(capsys, tmp_path): existent_dir = tmp_path / "a/b" existent_dir.mkdir() non_existent_dir = tmp_path / "c/d" # non-existent dir test_input = ":".join([str(existent_dir), str(non_existent_dir)]) - get_cuda_runtime_lib_path(test_input) + determine_cuda_runtime_lib_path(test_input) std_err = capsys.readouterr().err assert all(match in std_err for match in {"WARNING", "non-existent"}) @@ -95,14 +123,17 @@ def test_full_system(): # if CONDA_PREFIX exists, it has priority before all other env variables # but it does not contain the library directly, so we need to look at the a sub-folder - version = '' - if 'CONDA_PREFIX' in os.environ: - ls_output, err = bnb.utils.execute_and_return(f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so') - major, minor, revision = ls_output.split(' ')[-1].replace('libcudart.so.', '').split('.') - version = float(f'{major}.{minor}') - - - if version == '' and 'LD_LIBRARY_PATH': + version = "" + if "CONDA_PREFIX" in os.environ: + ls_output, err = bnb.utils.execute_and_return( + f'ls -l {os.environ["CONDA_PREFIX"]}/lib/libcudart.so' + ) + major, minor, revision = ( + ls_output.split(" ")[-1].replace("libcudart.so.", "").split(".") + ) + version = float(f"{major}.{minor}") + + if version == "" and "LD_LIBRARY_PATH": ld_path = os.environ["LD_LIBRARY_PATH"] paths = ld_path.split(":") version = "" |