Skip to content

Commit

Permalink
Added Newton_Raphson and simple_gd optimizers.
Browse files Browse the repository at this point in the history
  • Loading branch information
KOLANICH committed Jun 11, 2019
1 parent 815a0b9 commit b97e55c
Showing 1 changed file with 23 additions and 0 deletions.
23 changes: 23 additions & 0 deletions autograd/misc/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,16 @@ def _optimize(grad, x0, callback=None, *args, **kwargs):

return _optimize

@unflatten_optimizer
def simple_gd(grad, x, callback=None, num_iters=2000, step_size=0.01):
"""A simple gradient descent without momentum.
grad() must have signature grad(x, i), where i is the iteration number."""
for i in range(num_iters):
g = - grad(x, i)
if callback: callback(x, i, g)
x = x + step_size * g
return x

@unflatten_optimizer
def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
"""Stochastic gradient descent with momentum.
Expand All @@ -41,6 +51,19 @@ def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
x = x + step_size * velocity
return x

def Newton_Raphson(grad, hess, x, optimizerFunc=None, *args, **kwargs):
"""A second order optimization method. `hess` is a function with the same signature as `grad`. `optimizerFunc` is a function from this module - this optimizer will use them passing them not gradient, but the step computed by Newton-Raphson method."""
if optimizerFunc is None:
optimizerFunc = simple_gd

def pseudograd(x, i):
g = grad(x, i)
h = hess(x, i)
invH = np.linalg.inv(h)
return np.einsum('ijk,ik->ik', invH, g)
return optimizerFunc(pseudoGrad, x, *args, **kwargs)


@unflatten_optimizer
def rmsprop(grad, x, callback=None, num_iters=100,
step_size=0.1, gamma=0.9, eps=10**-8):
Expand Down

0 comments on commit b97e55c

Please sign in to comment.