From a6664de0720c7d8572a475a9c59f7dd85b5f83b0 Mon Sep 17 00:00:00 2001 From: Tim Dettmers Date: Tue, 16 Aug 2022 19:03:19 -0700 Subject: Enhanced error handling in CUDA SETUP failures. --- CHANGELOG.md | 23 +++++++++++++++++++++++ bitsandbytes/cextension.py | 13 +++++++++---- bitsandbytes/cuda_setup/main.py | 11 ++++++----- csrc/ops.cu | 3 +++ setup.py | 2 +- 5 files changed, 42 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 285984e..1017721 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,3 +67,26 @@ Features: Deprecated: - Pre-compiled release for CUDA 9.2, 10.0, 10.2 no longer available + +### 0.31.0 + +#### 8-bit Inference and Packaging Update + +Features: + - added direct outlier extraction. This enables outlier extraction without fp16 weights without performance degradation. + - Added automatic CUDA SETUP procedure and packaging all binaries into a single bitsandbytes package. + +### 0.32.0 + +#### 8-bit Inference Performance Enhancements + +We added performance enhancements for small models. This makes small models about 2x faster for LLM.int8() inference. + +Features: + - Int32 dequantization now supports fused biases. + - Linear8bitLt now uses a fused bias implementation. + - Change `.data.storage().data_ptr()` to `.data.data_ptr()` to enhance inference performance. + +Bug fixes: + - Now throws and error if LLM.int8() is used on a GPU that is not supported. + - Enhances error messaging if CUDA SETUP fails. diff --git a/bitsandbytes/cextension.py b/bitsandbytes/cextension.py index e0f280a..af23c8f 100644 --- a/bitsandbytes/cextension.py +++ b/bitsandbytes/cextension.py @@ -17,12 +17,17 @@ class CUDALibrary_Singleton(object): binary_path = package_dir / binary_name if not binary_path.exists(): - print(f"CUDA_SETUP: TODO: compile library for specific version: {binary_name}") + print(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}") legacy_binary_name = "libbitsandbytes.so" - print(f"CUDA_SETUP: Defaulting to {legacy_binary_name}...") - self.lib = ct.cdll.LoadLibrary(package_dir / legacy_binary_name) + print(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") + binary_path = package_dir / legacy_binary_name + if not binary_path.exists(): + print('CUDA SETUP: CUDA detection failed. Either CUDA driver not installed, CUDA not installed, or you have multiple conflicting CUDA libraries!') + print('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') + raise Exception('CUDA SETUP: Setup Failed!') + self.lib = ct.cdll.LoadLibrary(binary_path) else: - print(f"CUDA_SETUP: Loading binary {binary_path}...") + print(f"CUDA SETUP: Loading binary {binary_path}...") self.lib = ct.cdll.LoadLibrary(binary_path) @classmethod diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py index 1f2ceb4..975b772 100644 --- a/bitsandbytes/cuda_setup/main.py +++ b/bitsandbytes/cuda_setup/main.py @@ -46,7 +46,7 @@ def get_cuda_version(cuda, cudart_path): minor = (version-(major*1000))//10 if major < 11: - print('CUDA SETUP: CUDA version lower than 11 are currenlty not supported!') + print('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' @@ -57,7 +57,7 @@ def get_cuda_lib_handle(): cuda = ctypes.CDLL("libcuda.so") except OSError: # TODO: shouldn't we error or at least warn here? - print('ERROR: libcuda.so not found!') + raise Exception('CUDA SETUP: ERROR! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) @@ -115,7 +115,8 @@ def get_compute_capability(cuda): def evaluate_cuda_setup(): print('') print('='*35 + 'BUG REPORT' + '='*35) - print('Welcome to bitsandbytes. For bug reports, please use this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') + print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') + print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') print('='*80) binary_name = "libbitsandbytes_cpu.so" cudart_path = determine_cuda_runtime_lib_path() @@ -125,7 +126,7 @@ def evaluate_cuda_setup(): ) return binary_name - print(f"CUDA SETUP: CUDA path found: {cudart_path}") + print(f"CUDA SETUP: CUDA runtime path found: {cudart_path}") cuda = get_cuda_lib_handle() cc = get_compute_capability(cuda) print(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") @@ -147,7 +148,7 @@ def evaluate_cuda_setup(): # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler - print(f'CUDA_SETUP: Detected CUDA version {cuda_version_string}') + print(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') def get_binary_name(): "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" diff --git a/csrc/ops.cu b/csrc/ops.cu index ed32828..c0ec3cb 100644 --- a/csrc/ops.cu +++ b/csrc/ops.cu @@ -371,6 +371,9 @@ template void transform(cublasLtHandle_t ltHandl template int igemmlt(cublasLtHandle_t ltHandle, int m, int n, int k, const int8_t *A, const int8_t *B, void *C, float *row_scale, int lda, int ldb, int ldc) { #ifdef NO_CUBLASLT + printf("ERROR: Your GPU does not support Int8 Matmul!"); + assert(false); + return 0; #else int has_error = 0; diff --git a/setup.py b/setup.py index 61a5d05..2b25720 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ def read(fname): setup( name=f"bitsandbytes", - version=f"0.31.8", + version=f"0.32.0", author="Tim Dettmers", author_email="dettmers@cs.washington.edu", description="8-bit optimizers and matrix multiplication routines.", -- cgit v1.2.3