Struct rusty_machine::learning::optim::grad_desc::AdaGrad [] [src]

pub struct AdaGrad {
    // some fields omitted
}

Adaptive Gradient Descent

The adaptive gradient descent algorithm (Duchi et al. 2010).

Methods

impl AdaGrad
[src]

fn new(alpha: f64, tau: f64, iters: usize) -> AdaGrad

Constructs a new AdaGrad algorithm.

Examples

use rusty_machine::learning::optim::grad_desc::AdaGrad;

// Create a new AdaGrad algorithm with step size 0.5
// and adaptive scaling constant 1.0
let gd = AdaGrad::new(0.5, 1.0, 100);

Trait Implementations

impl Debug for AdaGrad
[src]

fn fmt(&self, __arg_0: &mut Formatter) -> Result

Formats the value using the given formatter.

impl Default for AdaGrad
[src]

fn default() -> AdaGrad

Returns the "default value" for a type. Read more

impl<M: Optimizable<Inputs=Matrix<f64>, Targets=Matrix<f64>>> OptimAlgorithm<M> for AdaGrad
[src]

fn optimize(&self, model: &M, start: &[f64], inputs: &M::Inputs, targets: &M::Targets) -> Vec<f64>

Return the optimized parameter using gradient optimization. Read more