summaryrefslogtreecommitdiff
path: root/bitsandbytes
diff options
context:
space:
mode:
Diffstat (limited to 'bitsandbytes')
-rw-r--r--bitsandbytes/functional.py23
1 files changed, 2 insertions, 21 deletions
diff --git a/bitsandbytes/functional.py b/bitsandbytes/functional.py
index 745e7e4..75d083b 100644
--- a/bitsandbytes/functional.py
+++ b/bitsandbytes/functional.py
@@ -185,14 +185,9 @@ def create_dynamic_map(signed=True, n=7):
def get_special_format_str():
+ if not torch.cuda.is_available(): return 'col_turing'
major, minor = torch.cuda.get_device_capability()
- if major < 7:
- print(
- f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!"
- )
- assert major >= 7
-
- if major == 7:
+ if major <= 7:
return "col_turing"
elif major == 8:
return "col_ampere"
@@ -1685,20 +1680,6 @@ def double_quant(
return out_row, out_col, row_stats, col_stats, coo_tensor
-def get_special_format_str():
- if not torch.cuda.is_available(): return 'col_turning'
- major, minor = torch.cuda.get_device_capability()
- if major < 7:
- print(f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!")
- assert major >= 7
-
- if major == 7: return 'col_turing'
- elif major == 8: return 'col_ampere'
- else: return 'col_turing'
-
-
-
-
def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None):
prev_device = pre_call(A.device)
if state is None: state = (A.shape, from_order)