summaryrefslogtreecommitdiff
path: root/bitsandbytes/autograd
diff options
context:
space:
mode:
Diffstat (limited to 'bitsandbytes/autograd')
-rw-r--r--bitsandbytes/autograd/_functions.py2
1 files changed, 1 insertions, 1 deletions
diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py
index 5a83dfd..36c392b 100644
--- a/bitsandbytes/autograd/_functions.py
+++ b/bitsandbytes/autograd/_functions.py
@@ -231,7 +231,7 @@ class MatMul8bitLt(torch.autograd.Function):
# Cast A to fp16
if A.dtype != torch.float16:
- warnings.warn(f"MatMul8bitLt: input matrix will be cast from {A.dtype} to float16")
+ warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
# 1. Quantize A
if len(A.shape) == 3: