diff options
author | justheuristic <justheuristic@gmail.com> | 2022-09-18 00:35:03 +0300 |
---|---|---|
committer | justheuristic <justheuristic@gmail.com> | 2022-09-18 00:35:03 +0300 |
commit | e35e2c665a69647d829c48e22fba0230180c11e7 (patch) | |
tree | ae71bd4aa07956036bad4454bf3963b23623edf7 /bitsandbytes/autograd | |
parent | 577275bd8c1b4191284c4fb34799d252ae8667a1 (diff) |
cast properly
Diffstat (limited to 'bitsandbytes/autograd')
-rw-r--r-- | bitsandbytes/autograd/_functions.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index 5a83dfd..36c392b 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -231,7 +231,7 @@ class MatMul8bitLt(torch.autograd.Function): # Cast A to fp16 if A.dtype != torch.float16: - warnings.warn(f"MatMul8bitLt: input matrix will be cast from {A.dtype} to float16") + warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization") # 1. Quantize A if len(A.shape) == 3: |