From cc4858c2fd48ef17a888b9d45bb35bb00e373eb8 Mon Sep 17 00:00:00 2001 From: justheuristic Date: Sat, 17 Sep 2022 20:46:04 +0300 Subject: some kind of warning or something when this is first executed to make people aware that a cast happens and the operation quantization is performed in fp16. --- bitsandbytes/autograd/_functions.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'bitsandbytes/autograd/_functions.py') diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index bdcbec5..6d473e9 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -1,4 +1,6 @@ import operator +import warnings + import torch import bitsandbytes.functional as F @@ -229,6 +231,8 @@ class MatMul8bitLt(torch.autograd.Function): # Cast A to fp16 A_dtype = A.dtype + if A_dtype != torch.float16: + warnings.warn(f"MatMul8bitLt: temporarily casting input matrix from {A_dtype} to float16") A = A.to(torch.float16) # 1. Quantize A -- cgit v1.2.3