diff options
author | justheuristic <justheuristic@gmail.com> | 2022-09-20 06:40:36 +0300 |
---|---|---|
committer | justheuristic <justheuristic@gmail.com> | 2022-09-20 06:40:36 +0300 |
commit | a07825ac31eb5585bd75f9788880536d5fc77f3a (patch) | |
tree | 6a9ea0b4ef915d342edad02f8f67a24eea6d3efd /tests | |
parent | 9b7d307b8cc9d88310fe0c0548e4a0fb094f45d3 (diff) |
review
Diffstat (limited to 'tests')
-rw-r--r-- | tests/test_modules.py | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/tests/test_modules.py b/tests/test_modules.py index faf91b8..235acde 100644 --- a/tests/test_modules.py +++ b/tests/test_modules.py @@ -569,12 +569,10 @@ def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward): (o1 * grad_proj).sum().backward() grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half() scale = grad_ref.abs().mean() - assert torch.allclose(b1.grad, grad_ref, rtol=0, atol=0.05 * scale) - - - - + torch.testing.assert_allclose(b1.grad, grad_ref, rtol=0, atol=0.05 * scale) + idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1) + assert (idx == 0).sum().item() <= b1.numel() * 0.0 def test_linear8bitlt_fp32_bias(): |