diff options
Diffstat (limited to 'bitsandbytes')
-rw-r--r-- | bitsandbytes/cuda_setup/main.py | 9 | ||||
-rw-r--r-- | bitsandbytes/functional.py | 5 | ||||
-rw-r--r-- | bitsandbytes/optim/__init__.py | 15 |
3 files changed, 16 insertions, 13 deletions
diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py index 975b772..d305c64 100644 --- a/bitsandbytes/cuda_setup/main.py +++ b/bitsandbytes/cuda_setup/main.py @@ -17,6 +17,7 @@ evaluation: """ import ctypes +import torch from pathlib import Path from ..utils import execute_and_return @@ -28,7 +29,7 @@ def check_cuda_result(cuda, result_val): if result_val != 0: error_str = ctypes.c_char_p() cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) - raise Exception(f"CUDA exception! Error code: {error_str.value.decode()}") + print(f"CUDA exception! Error code: {error_str.value.decode()}") def get_cuda_version(cuda, cudart_path): # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION @@ -57,7 +58,7 @@ def get_cuda_lib_handle(): cuda = ctypes.CDLL("libcuda.so") except OSError: # TODO: shouldn't we error or at least warn here? - raise Exception('CUDA SETUP: ERROR! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') + print('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) @@ -119,6 +120,10 @@ def evaluate_cuda_setup(): print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') print('='*80) binary_name = "libbitsandbytes_cpu.so" + #if not torch.cuda.is_available(): + #print('No GPU detected. Loading CPU library...') + #return binary_name + cudart_path = determine_cuda_runtime_lib_path() if cudart_path is None: print( diff --git a/bitsandbytes/functional.py b/bitsandbytes/functional.py index 6637554..745e7e4 100644 --- a/bitsandbytes/functional.py +++ b/bitsandbytes/functional.py @@ -1686,11 +1686,10 @@ def double_quant( def get_special_format_str(): + if not torch.cuda.is_available(): return 'col_turning' major, minor = torch.cuda.get_device_capability() if major < 7: - print( - f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!" - ) + print(f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!") assert major >= 7 if major == 7: return 'col_turing' diff --git a/bitsandbytes/optim/__init__.py b/bitsandbytes/optim/__init__.py index a76d717..d18f1d1 100644 --- a/bitsandbytes/optim/__init__.py +++ b/bitsandbytes/optim/__init__.py @@ -5,13 +5,12 @@ from bitsandbytes.cextension import COMPILED_WITH_CUDA -if COMPILED_WITH_CUDA: - from .adam import Adam, Adam8bit, Adam32bit - from .adamw import AdamW, AdamW8bit, AdamW32bit - from .sgd import SGD, SGD8bit, SGD32bit - from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS - from .lamb import LAMB, LAMB8bit, LAMB32bit - from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit - from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit +from .adam import Adam, Adam8bit, Adam32bit +from .adamw import AdamW, AdamW8bit, AdamW32bit +from .sgd import SGD, SGD8bit, SGD32bit +from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS +from .lamb import LAMB, LAMB8bit, LAMB32bit +from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit +from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit from .optimizer import GlobalOptimManager |