summaryrefslogtreecommitdiff
path: root/bitsandbytes/functional.py
diff options
context:
space:
mode:
authorTim Dettmers <TimDettmers@users.noreply.github.com>2022-09-05 16:29:25 -0700
committerGitHub <noreply@github.com>2022-09-05 16:29:25 -0700
commitaca55881b9815a462142f42f3ff0dc917830d85c (patch)
tree75477acfbce2da2a753ee21d4cf0da64f3f50ea5 /bitsandbytes/functional.py
parent92a3363096e10ad6a5c4e944af898bd1186d806a (diff)
parenteab4d8232d558f2e6bd7f7cc3d00e2e6e94f4e80 (diff)
Merge branch 'main' into remove_unused_code
Diffstat (limited to 'bitsandbytes/functional.py')
-rw-r--r--bitsandbytes/functional.py24
1 files changed, 2 insertions, 22 deletions
diff --git a/bitsandbytes/functional.py b/bitsandbytes/functional.py
index 236c8ce..22200f2 100644
--- a/bitsandbytes/functional.py
+++ b/bitsandbytes/functional.py
@@ -184,14 +184,9 @@ def create_dynamic_map(signed=True, n=7):
def get_special_format_str():
+ if not torch.cuda.is_available(): return 'col_turing'
major, minor = torch.cuda.get_device_capability()
- if major < 7:
- print(
- f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!"
- )
- assert major >= 7
-
- if major == 7:
+ if major <= 7:
return "col_turing"
elif major == 8:
return "col_ampere"
@@ -1667,21 +1662,6 @@ def double_quant(
return out_row, out_col, row_stats, col_stats, coo_tensor
-def get_special_format_str():
- major, minor = torch.cuda.get_device_capability()
- if major < 7:
- print(
- f"Device with CUDA capability of {major} not supported for 8-bit matmul. Device has no tensor cores!"
- )
- assert major >= 7
-
- if major == 7: return 'col_turing'
- elif major == 8: return 'col_ampere'
- else: return 'col_turing'
-
-
-
-
def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None):
prev_device = pre_call(A.device)
if state is None: state = (A.shape, from_order)