summaryrefslogtreecommitdiff
path: root/bitsandbytes
diff options
context:
space:
mode:
Diffstat (limited to 'bitsandbytes')
-rw-r--r--bitsandbytes/__init__.py2
-rw-r--r--bitsandbytes/__main__.py97
-rw-r--r--bitsandbytes/cextension.py21
-rw-r--r--bitsandbytes/cuda_setup/env_vars.py51
-rw-r--r--bitsandbytes/cuda_setup/main.py142
-rw-r--r--bitsandbytes/cuda_setup/paths.py126
-rw-r--r--bitsandbytes/utils.py8
7 files changed, 336 insertions, 111 deletions
diff --git a/bitsandbytes/__init__.py b/bitsandbytes/__init__.py
index 76a5b48..7901f96 100644
--- a/bitsandbytes/__init__.py
+++ b/bitsandbytes/__init__.py
@@ -22,3 +22,5 @@ __pdoc__ = {
"optim.optimizer.Optimizer8bit": False,
"optim.optimizer.MockArgs": False,
}
+
+PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
diff --git a/bitsandbytes/__main__.py b/bitsandbytes/__main__.py
index a91e942..7f3d24c 100644
--- a/bitsandbytes/__main__.py
+++ b/bitsandbytes/__main__.py
@@ -1,3 +1,96 @@
-from bitsandbytes.debug_cli import cli
+# from bitsandbytes.debug_cli import cli
-cli()
+# cli()
+import os
+import sys
+import torch
+
+
+HEADER_WIDTH = 60
+
+
+def print_header(
+ txt: str, width: int = HEADER_WIDTH, filler: str = "+"
+) -> None:
+ txt = f" {txt} " if txt else ""
+ print(txt.center(width, filler))
+
+
+def print_debug_info() -> None:
+ print(
+ "\nAbove we output some debug information. Please provide this info when "
+ f"creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose ...\n"
+ )
+
+
+print_header("")
+print_header("DEBUG INFORMATION")
+print_header("")
+print()
+
+
+from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL
+from .cuda_setup.main import get_compute_capabilities
+from .cuda_setup.env_vars import to_be_ignored
+from .utils import print_stderr
+
+
+print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS")
+for k, v in os.environ.items():
+ if "/" in v and not to_be_ignored(k, v):
+ print(f"'{k}': '{v}'")
+print_header("")
+
+print(
+ "\nWARNING: Please be sure to sanitize sensible info from any such env vars!\n"
+)
+
+print_header("OTHER")
+print(f"{COMPILED_WITH_CUDA = }")
+print(f"COMPUTE_CAPABILITIES_PER_GPU = {get_compute_capabilities()}")
+print_header("")
+print_header("DEBUG INFO END")
+print_header("")
+print(
+ """
+Running a quick check that:
+ + library is importable
+ + CUDA function is callable
+"""
+)
+
+try:
+ from bitsandbytes.optim import Adam
+
+ p = torch.nn.Parameter(torch.rand(10, 10).cuda())
+ a = torch.rand(10, 10).cuda()
+
+ p1 = p.data.sum().item()
+
+ adam = Adam([p])
+
+ out = a * p
+ loss = out.sum()
+ loss.backward()
+ adam.step()
+
+ p2 = p.data.sum().item()
+
+ assert p1 != p2
+ print("SUCCESS!")
+ print("Installation was successful!")
+ sys.exit(0)
+
+except ImportError:
+ print()
+ print_stderr(
+ f"WARNING: {__package__} is currently running as CPU-only!\n"
+ "Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
+ f"If you think that this is so erroneously,\nplease report an issue!"
+ )
+ print_debug_info()
+ sys.exit(0)
+except Exception as e:
+ print(e)
+ print_debug_info()
+ sys.exit(1)
diff --git a/bitsandbytes/cextension.py b/bitsandbytes/cextension.py
index f5b97fd..66c79d8 100644
--- a/bitsandbytes/cextension.py
+++ b/bitsandbytes/cextension.py
@@ -1,8 +1,8 @@
import ctypes as ct
-import os
+from pathlib import Path
from warnings import warn
-from bitsandbytes.cuda_setup.main import evaluate_cuda_setup
+from .cuda_setup.main import evaluate_cuda_setup
class CUDALibrary_Singleton(object):
@@ -12,18 +12,17 @@ class CUDALibrary_Singleton(object):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
- self.context = {}
binary_name = evaluate_cuda_setup()
- if not os.path.exists(os.path.dirname(__file__) + f"/{binary_name}"):
+ package_dir = Path(__file__).parent
+ binary_path = package_dir / binary_name
+
+ if not binary_path.exists():
print(f"TODO: compile library for specific version: {binary_name}")
- print("defaulting to libbitsandbytes.so")
- self.lib = ct.cdll.LoadLibrary(
- os.path.dirname(__file__) + "/libbitsandbytes.so"
- )
+ legacy_binary_name = "libbitsandbytes.so"
+ print(f"Defaulting to {legacy_binary_name}...")
+ self.lib = ct.cdll.LoadLibrary(package_dir / legacy_binary_name)
else:
- self.lib = ct.cdll.LoadLibrary(
- os.path.dirname(__file__) + f"/{binary_name}"
- )
+ self.lib = ct.cdll.LoadLibrary(package_dir / binary_name)
@classmethod
def get_instance(cls):
diff --git a/bitsandbytes/cuda_setup/env_vars.py b/bitsandbytes/cuda_setup/env_vars.py
new file mode 100644
index 0000000..536a7d8
--- /dev/null
+++ b/bitsandbytes/cuda_setup/env_vars.py
@@ -0,0 +1,51 @@
+import os
+from typing import Dict
+
+
+def to_be_ignored(env_var: str, value: str) -> bool:
+ ignorable = {
+ "PWD", # PWD: this is how the shell keeps track of the current working dir
+ "OLDPWD",
+ "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
+ "SSH_TTY",
+ "HOME", # Linux shell default
+ "TMUX", # Terminal Multiplexer
+ "XDG_DATA_DIRS", # XDG: Desktop environment stuff
+ "XDG_RUNTIME_DIR",
+ "MAIL", # something related to emails
+ "SHELL", # binary for currently invoked shell
+ "DBUS_SESSION_BUS_ADDRESS", # hardware related
+ "PATH", # this is for finding binaries, not libraries
+ "LESSOPEN", # related to the `less` command
+ "LESSCLOSE",
+ "_", # current Python interpreter
+ }
+ return env_var in ignorable
+
+
+def might_contain_a_path(candidate: str) -> bool:
+ return "/" in candidate
+
+
+def is_active_conda_env(env_var: str) -> bool:
+ return "CONDA_PREFIX" == env_var
+
+
+def is_other_conda_env_var(env_var: str) -> bool:
+ return "CONDA" in env_var
+
+
+def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
+ return is_active_conda_env(env_var) or (
+ might_contain_a_path(value) and not
+ is_other_conda_env_var(env_var) and not
+ to_be_ignored(env_var, value)
+ )
+
+
+def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]:
+ return {
+ env_var: value
+ for env_var, value in os.environ.items()
+ if is_relevant_candidate_env_var(env_var, value)
+ }
diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py
index 6d70c92..e96ac70 100644
--- a/bitsandbytes/cuda_setup/main.py
+++ b/bitsandbytes/cuda_setup/main.py
@@ -8,8 +8,6 @@ extract factors the build is dependent on:
- CuBLAS-LT: full-build 8-bit optimizer
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
-alle Binaries packagen
-
evaluation:
- if paths faulty, return meaningful error
- else:
@@ -19,11 +17,10 @@ evaluation:
"""
import ctypes
-import os
from pathlib import Path
-from typing import Set, Union
-from ..utils import print_err, warn_of_missing_prerequisite, execute_and_return
+from ..utils import execute_and_return
+from .paths import determine_cuda_runtime_lib_path
def check_cuda_result(cuda, result_val):
@@ -34,26 +31,23 @@ def check_cuda_result(cuda, result_val):
raise Exception(f"CUDA exception! ERROR: {error_str}")
-# taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
-def get_compute_capability():
- # 1. find libcuda.so library (GPU driver) (/usr/lib)
- # init_device -> init variables -> call function by reference
- # 2. call extern C function to determine CC
- # (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
- # 3. Check for CUDA errors
- # https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
+def get_compute_capabilities():
+ """
+ 1. find libcuda.so library (GPU driver) (/usr/lib)
+ init_device -> init variables -> call function by reference
+ 2. call extern C function to determine CC
+ (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
+ 3. Check for CUDA errors
+ https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
+ # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
+ """
# 1. find libcuda.so library (GPU driver) (/usr/lib)
- libnames = ("libcuda.so",)
- for libname in libnames:
- try:
- cuda = ctypes.CDLL(libname)
- except OSError:
- continue
- else:
- break
- else:
- raise OSError("could not load any of: " + " ".join(libnames))
+ try:
+ cuda = ctypes.CDLL("libcuda.so")
+ except OSError:
+ # TODO: shouldn't we error or at least warn here?
+ return None
nGpus = ctypes.c_int()
cc_major = ctypes.c_int()
@@ -70,104 +64,64 @@ def get_compute_capability():
check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i))
ref_major = ctypes.byref(cc_major)
ref_minor = ctypes.byref(cc_minor)
- # 2. call extern C function to determine CC
- check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device))
- ccs.append(f"{cc_major.value}.{cc_minor.value}")
-
- # TODO: handle different compute capabilities; for now, take the max
- ccs.sort()
- max_cc = ccs[-1]
- return max_cc
-
-
-CUDA_RUNTIME_LIB: str = "libcudart.so"
-
-
-def tokenize_paths(paths: str) -> Set[Path]:
- return {Path(ld_path) for ld_path in paths.split(":") if ld_path}
-
-
-def resolve_env_variable(env_var):
- '''Searches a given envirionmental library or path for the CUDA runtime library (libcudart.so)'''
- paths: Set[Path] = tokenize_paths(env_var)
-
- non_existent_directories: Set[Path] = {
- path for path in paths if not path.exists()
- }
-
- if non_existent_directories:
- print_err(
- "WARNING: The following directories listed your path were found to "
- f"be non-existent: {non_existent_directories}"
+ # 2. call extern C function to determine CC
+ check_cuda_result(
+ cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)
)
+ ccs.append(f"{cc_major.value}.{cc_minor.value}")
- cuda_runtime_libs: Set[Path] = {
- path / CUDA_RUNTIME_LIB
- for path in paths
- if (path / CUDA_RUNTIME_LIB).is_file()
- } - non_existent_directories
+ return ccs.sort()
- if len(cuda_runtime_libs) > 1:
- err_msg = (
- f"Found duplicate {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.."
- )
- raise FileNotFoundError(err_msg)
- elif len(cuda_runtime_libs) == 0: return None # this is not en error, since other envs can contain CUDA
- else: return next(iter(cuda_runtime_libs)) # for now just return the first
-
-def get_cuda_runtime_lib_path() -> Union[Path, None]:
- '''Searches conda installation and environmental paths for a cuda installations.'''
-
- cuda_runtime_libs = []
- # CONDA_PREFIX/lib is the default location for a default conda
- # install of pytorch. This location takes priortiy over all
- # other defined variables
- if 'CONDA_PREFIX' in os.environ:
- lib_conda_path = f'{os.environ["CONDA_PREFIX"]}/lib/'
- print(lib_conda_path)
- cuda_runtime_libs.append(resolve_env_variable(lib_conda_path))
-
- if len(cuda_runtime_libs) == 1: return cuda_runtime_libs[0]
-
- # if CONDA_PREFIX does not have the library, search the environment
- # (in particualr LD_LIBRARY PATH)
- for var in os.environ:
- cuda_runtime_libs.append(resolve_env_variable(var))
-
- if len(cuda_runtime_libs) < 1:
- err_msg = (
- f"Did not find {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.."
- )
- raise FileNotFoundError(err_msg)
- return cuda_runtime_libs.pop()
+# def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error
+def get_compute_capability():
+ """
+ Extracts the highest compute capbility from all available GPUs, as compute
+ capabilities are downwards compatible. If no GPUs are detected, it returns
+ None.
+ """
+ if ccs := get_compute_capabilities() is not None:
+ # TODO: handle different compute capabilities; for now, take the max
+ return ccs[-1]
+ return None
def evaluate_cuda_setup():
- cuda_path = get_cuda_runtime_lib_path()
- print(f'CUDA SETUP: CUDA path found: {cuda_path}')
+ cuda_path = determine_cuda_runtime_lib_path()
+ print(f"CUDA SETUP: CUDA path found: {cuda_path}")
cc = get_compute_capability()
binary_name = "libbitsandbytes_cpu.so"
+ # FIXME: has_gpu is still unused
if not (has_gpu := bool(cc)):
print(
"WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..."
)
return binary_name
+ # 7.5 is the minimum CC vor cublaslt
has_cublaslt = cc in ["7.5", "8.0", "8.6"]
# TODO:
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
# (2) Multiple CUDA versions installed
+ # FIXME: cuda_home is still unused
cuda_home = str(Path(cuda_path).parent.parent)
# we use ls -l instead of nvcc to determine the cuda version
# since most installations will have the libcudart.so installed, but not the compiler
ls_output, err = execute_and_return(f"ls -l {cuda_path}")
- major, minor, revision = ls_output.split(' ')[-1].replace('libcudart.so.', '').split('.')
+ major, minor, revision = (
+ ls_output.split(" ")[-1].replace("libcudart.so.", "").split(".")
+ )
cuda_version_string = f"{major}{minor}"
- binary_name = f'libbitsandbytes_cuda{cuda_version_string}{("" if has_cublaslt else "_nocublaslt")}.so'
+ def get_binary_name():
+ "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
+ bin_base_name = "libbitsandbytes_cuda"
+ if has_cublaslt:
+ return f"{bin_base_name}{cuda_version_string}.so"
+ else:
+ return f"{bin_base_name}_nocublaslt.so"
return binary_name
diff --git a/bitsandbytes/cuda_setup/paths.py b/bitsandbytes/cuda_setup/paths.py
new file mode 100644
index 0000000..c4a7465
--- /dev/null
+++ b/bitsandbytes/cuda_setup/paths.py
@@ -0,0 +1,126 @@
+from pathlib import Path
+from typing import Set, Union
+from warnings import warn
+
+from ..utils import print_stderr
+from .env_vars import get_potentially_lib_path_containing_env_vars
+
+
+CUDA_RUNTIME_LIB: str = "libcudart.so"
+
+
+def purge_unwanted_semicolon(tentative_path: Path) -> Path:
+ """
+ Special function to handle the following exception:
+ __LMOD_REF_COUNT_PATH=/sw/cuda/11.6.2/bin:2;/mmfs1/home/dettmers/git/sched/bin:1;/mmfs1/home/dettmers/data/anaconda3/bin:1;/mmfs1/home/dettmers/data/anaconda3/condabin:1;/mmfs1/home/dettmers/.local/bin:1;/mmfs1/home/dettmers/bin:1;/usr/local/bin:1;/usr/bin:1;/usr/local/sbin:1;/usr/sbin:1;/mmfs1/home/dettmers/.fzf/bin:1;/mmfs1/home/dettmers/data/local/cuda-11.4/bin:1
+ """
+ # if ';' in str(tentative_path):
+ # path_as_str, _ = str(tentative_path).split(';')
+ pass
+
+
+def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
+ return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
+
+
+def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
+ non_existent_directories: Set[Path] = {
+ path for path in candidate_paths if not path.exists()
+ }
+
+ if non_existent_directories:
+ print_stderr(
+ "WARNING: The following directories listed in your path were found to "
+ f"be non-existent: {non_existent_directories}"
+ )
+
+ return candidate_paths - non_existent_directories
+
+
+def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
+ return {
+ path / CUDA_RUNTIME_LIB
+ for path in candidate_paths
+ if (path / CUDA_RUNTIME_LIB).is_file()
+ }
+
+
+def resolve_paths_list(paths_list_candidate: str) -> Set[Path]:
+ """
+ Searches a given environmental var for the CUDA runtime library,
+ i.e. `libcudart.so`.
+ """
+ return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate))
+
+
+def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]:
+ return get_cuda_runtime_lib_paths(
+ resolve_paths_list(paths_list_candidate)
+ )
+
+
+def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
+ if len(results_paths) > 1:
+ warning_msg = (
+ f"Found duplicate {CUDA_RUNTIME_LIB} files: {results_paths}.. "
+ "We'll flip a coin and try one of these, in order to fail forward.\n"
+ "Either way, this might cause trouble in the future:\n"
+ "If you get `CUDA error: invalid device function` errors, the above "
+ "might be the cause and the solution is to make sure only one "
+ f"{CUDA_RUNTIME_LIB} in the paths that we search based on your env."
+ )
+ warn(warning_msg)
+
+
+def determine_cuda_runtime_lib_path() -> Union[Path, None]:
+ """
+ Searches for a cuda installations, in the following order of priority:
+ 1. active conda env
+ 2. LD_LIBRARY_PATH
+ 3. any other env vars, while ignoring those that
+ - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
+ - don't contain the path separator `/`
+
+ If multiple libraries are found in part 3, we optimistically try one,
+ while giving a warning message.
+ """
+ candidate_env_vars = get_potentially_lib_path_containing_env_vars()
+
+ if "CONDA_PREFIX" in candidate_env_vars:
+ conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
+
+ conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
+ warn_in_case_of_duplicates(conda_cuda_libs)
+
+ if conda_cuda_libs:
+ return next(iter(conda_cuda_libs))
+
+ warn(
+ f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
+ f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...'
+ )
+
+ if "LD_LIBRARY_PATH" in candidate_env_vars:
+ lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
+
+ if lib_ld_cuda_libs:
+ return next(iter(lib_ld_cuda_libs))
+ warn_in_case_of_duplicates(lib_ld_cuda_libs)
+
+ warn(
+ f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain '
+ f'{CUDA_RUNTIME_LIB} as expected! Searching further paths...'
+ )
+
+ remaining_candidate_env_vars = {
+ env_var: value for env_var, value in candidate_env_vars.items()
+ if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
+ }
+
+ cuda_runtime_libs = set()
+ for env_var, value in remaining_candidate_env_vars:
+ cuda_runtime_libs.update(find_cuda_lib_in(value))
+
+ warn_in_case_of_duplicates(cuda_runtime_libs)
+
+ return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else set()
diff --git a/bitsandbytes/utils.py b/bitsandbytes/utils.py
index e1d9460..4256a87 100644
--- a/bitsandbytes/utils.py
+++ b/bitsandbytes/utils.py
@@ -1,9 +1,9 @@
-import sys
import shlex
import subprocess
-
+import sys
from typing import Tuple
+
def execute_and_return(command_string: str) -> Tuple[str, str]:
def _decode(subprocess_err_out_tuple):
return tuple(
@@ -24,9 +24,9 @@ def execute_and_return(command_string: str) -> Tuple[str, str]:
return std_out, std_err
-def print_err(s: str) -> None:
+def print_stderr(s: str) -> None:
print(s, file=sys.stderr)
def warn_of_missing_prerequisite(s: str) -> None:
- print_err("WARNING, missing pre-requisite: " + s)
+ print_stderr("WARNING, missing pre-requisite: " + s)