From 8b3c0f355c779170d55a1975df981df9e53b59fa Mon Sep 17 00:00:00 2001 From: Tim Dettmers Date: Wed, 10 Nov 2021 15:10:02 -0800 Subject: Added adagrad with tests (no clipping). --- bitsandbytes/optim/adagrad.py | 57 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 bitsandbytes/optim/adagrad.py (limited to 'bitsandbytes/optim/adagrad.py') diff --git a/bitsandbytes/optim/adagrad.py b/bitsandbytes/optim/adagrad.py new file mode 100644 index 0000000..84ade3c --- /dev/null +++ b/bitsandbytes/optim/adagrad.py @@ -0,0 +1,57 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +from bitsandbytes.optim.optimizer import Optimizer1State + +torch.optim.Adagrad + +class Adagrad(Optimizer1State): + def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, + optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if initial_accumulator_value != 0.0: + raise ValueError('Initial accumulator value != 0.0 not supported!') + if lr_decay != 0.0: + raise ValueError('Lr Decay != 0.0 not supported!') + super(Adagrad, self).__init__('adagrad', params, lr, (0.0, 0.0), eps, + weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise) + +class Adagrad8bit(Optimizer1State): + def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, + optim_bits=8, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if initial_accumulator_value != 0.0: + raise ValueError('Initial accumulator value != 0.0 not supported!') + if lr_decay != 0.0: + raise ValueError('Lr Decay != 0.0 not supported!') + assert block_wise + super(Adagrad8bit, self).__init__('adagrad', params, lr, (0.0, 0.0), eps, + weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise) + +class Adagrad32bit(Optimizer1State): + def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, + optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if initial_accumulator_value != 0.0: + raise ValueError('Initial accumulator value != 0.0 not supported!') + if lr_decay != 0.0: + raise ValueError('Lr Decay != 0.0 not supported!') + super(Adagrad32bit, self).__init__('adagrad', params, lr, (0.0, 0.0), eps, + weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise) -- cgit v1.2.3