summaryrefslogtreecommitdiff
path: root/bitsandbytes/autograd
diff options
context:
space:
mode:
authorjustheuristic <justheuristic@gmail.com>2022-09-17 23:24:26 +0300
committerjustheuristic <justheuristic@gmail.com>2022-09-17 23:24:26 +0300
commitfc4a135ed1604d1f6190af725bea912e19e8a88a (patch)
tree92269a32408d189e436e7ae618a56c1ad7d2449c /bitsandbytes/autograd
parente29c5f5c41627668c650a2849e29599cd4f0bf3a (diff)
clearer assertions
Diffstat (limited to 'bitsandbytes/autograd')
-rw-r--r--bitsandbytes/autograd/_functions.py4
1 files changed, 2 insertions, 2 deletions
diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py
index 6d473e9..f4a6d57 100644
--- a/bitsandbytes/autograd/_functions.py
+++ b/bitsandbytes/autograd/_functions.py
@@ -232,8 +232,8 @@ class MatMul8bitLt(torch.autograd.Function):
# Cast A to fp16
A_dtype = A.dtype
if A_dtype != torch.float16:
- warnings.warn(f"MatMul8bitLt: temporarily casting input matrix from {A_dtype} to float16")
- A = A.to(torch.float16)
+ warnings.warn(f"MatMul8bitLt: input matrix will be converted from {A_dtype} to float16")
+ A = A.to(torch.float16)
# 1. Quantize A
if len(A.shape) == 3: