diff options
author | justheuristic <justheuristic@gmail.com> | 2022-09-18 00:15:57 +0300 |
---|---|---|
committer | justheuristic <justheuristic@gmail.com> | 2022-09-18 00:15:57 +0300 |
commit | 1145589f84d2ba4eb3b4a18fa33423298f5747c0 (patch) | |
tree | 5d06ec1e54d2a4c26f9294155fdad65a04a942e0 /bitsandbytes/autograd | |
parent | d6e25b5f5ea36c1565145da773fbf0f842b1c235 (diff) |
change typecast behavior
Diffstat (limited to 'bitsandbytes/autograd')
-rw-r--r-- | bitsandbytes/autograd/_functions.py | 3 |
1 files changed, 1 insertions, 2 deletions
diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index 0e594a5..b54ac24 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -328,7 +328,6 @@ class MatMul8bitLt(torch.autograd.Function): ctx.formatB = formatB ctx.grad_shape = input_shape - ctx.req_grads = [requires_gradA, requires_gradB, requires_gradBias] ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype if requires_gradA or requires_gradB: @@ -357,7 +356,7 @@ class MatMul8bitLt(torch.autograd.Function): if req_gradBias: # compute grad_bias first before changing grad_output dtype - grad_bias = grad_output.sum(0).to(ctx.bias_dtype) + grad_bias = grad_output.sum(0).to(ctx.dtype_bias) # Cast grad_output to fp16 if len(grad_output.shape) == 3: |