NNFS
Neural network library from scratch
Loading...
Searching...
No Matches
RMSProp.hpp
Go to the documentation of this file.
1#pragma once
2
3#include <Eigen/Dense>
4#include "Optimizer.hpp"
5
6namespace NNFS
7{
13 class RMSProp : public Optimizer
14 {
15 public:
24 RMSProp(double lr = 1e-3, double decay = 1e-3, double epsilon = 1e-7, double rho = .9) : Optimizer(lr, decay),
25 _epsilon(epsilon),
26 _rho(rho)
27 {
28 }
29
35 void update_params(std::shared_ptr<Dense> &layer)
36 {
37 Eigen::MatrixXd weights = layer->weights();
38 Eigen::MatrixXd biases = layer->biases();
39 Eigen::MatrixXd dweights = layer->dweights();
40 Eigen::MatrixXd dbiases = layer->dbiases();
41
42 Eigen::MatrixXd weights_cache = layer->weights_optimizer();
43 Eigen::MatrixXd biases_cache = layer->biases_optimizer();
44
45 weights_cache = _rho * weights_cache + (1 - _rho) * dweights.cwisePow(2);
46 biases_cache = _rho * biases_cache + (1 - _rho) * dbiases.cwisePow(2);
47
48 weights += (-_current_lr * dweights.array() / (weights_cache.cwisePow(.5).array() + _epsilon)).matrix();
49 biases += (-_current_lr * dbiases.array() / (biases_cache.cwisePow(.5).array() + _epsilon)).matrix();
50
51 layer->weights_optimizer(weights_cache);
52 layer->biases_optimizer(biases_cache);
53
54 layer->weights(weights);
55 layer->biases(biases);
56 }
57
58 private:
59 double _epsilon; // Epsilon - to avoid division by zero
60 double _rho; // RMSProp uses "rho" to calculate an exponentially weighted average over the square of the gradients.
61 };
62} // namespace NNFS
Base class for all optimizers.
Definition Optimizer.hpp:15
double _current_lr
Definition Optimizer.hpp:78
Root Mean Square Propagation optimizer.
Definition RMSProp.hpp:14
void update_params(std::shared_ptr< Dense > &layer)
Update the parameters of the layer.
Definition RMSProp.hpp:35
RMSProp(double lr=1e-3, double decay=1e-3, double epsilon=1e-7, double rho=.9)
Construct a new RMSProp object.
Definition RMSProp.hpp:24
Definition Activation.hpp:6