From 7e0fb655e1e040221054886fbee9d5682aa6e4e2 Mon Sep 17 00:00:00 2001 From: Tim Dettmers Date: Tue, 23 Aug 2022 13:59:34 -0700 Subject: Some initial code. Needs to be tested. --- bitsandbytes/functional.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'bitsandbytes/functional.py') diff --git a/bitsandbytes/functional.py b/bitsandbytes/functional.py index 6637554..745e7e4 100644 --- a/bitsandbytes/functional.py +++ b/bitsandbytes/functional.py @@ -1686,11 +1686,10 @@ def double_quant( def get_special_format_str(): + if not torch.cuda.is_available(): return 'col_turning' major, minor = torch.cuda.get_device_capability() if major < 7: - print( - f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!" - ) + print(f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!") assert major >= 7 if major == 7: return 'col_turing' -- cgit v1.2.3 From ee5b947e63c2340405f25e4e83066f39292bc0ed Mon Sep 17 00:00:00 2001 From: Tim Dettmers Date: Tue, 23 Aug 2022 16:00:26 -0700 Subject: Fixed issue where Pascal was not displaying proper error. --- bitsandbytes/functional.py | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) (limited to 'bitsandbytes/functional.py') diff --git a/bitsandbytes/functional.py b/bitsandbytes/functional.py index 745e7e4..75d083b 100644 --- a/bitsandbytes/functional.py +++ b/bitsandbytes/functional.py @@ -185,14 +185,9 @@ def create_dynamic_map(signed=True, n=7): def get_special_format_str(): + if not torch.cuda.is_available(): return 'col_turing' major, minor = torch.cuda.get_device_capability() - if major < 7: - print( - f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!" - ) - assert major >= 7 - - if major == 7: + if major <= 7: return "col_turing" elif major == 8: return "col_ampere" @@ -1685,20 +1680,6 @@ def double_quant( return out_row, out_col, row_stats, col_stats, coo_tensor -def get_special_format_str(): - if not torch.cuda.is_available(): return 'col_turning' - major, minor = torch.cuda.get_device_capability() - if major < 7: - print(f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!") - assert major >= 7 - - if major == 7: return 'col_turing' - elif major == 8: return 'col_ampere' - else: return 'col_turing' - - - - def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None): prev_device = pre_call(A.device) if state is None: state = (A.shape, from_order) -- cgit v1.2.3