Skip to content

Commit

Permalink
Merge branch 'derived2'
Browse files Browse the repository at this point in the history
  • Loading branch information
adammoss committed Dec 11, 2019
2 parents cdd2df5 + 58bfd46 commit f76b7f6
Show file tree
Hide file tree
Showing 21 changed files with 897 additions and 189 deletions.
10 changes: 8 additions & 2 deletions CHANGELOG.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,14 @@
Changelog
=========

0.1.4 (xx.xx.xxxx) - IN DEVELOPMENT
0.2.0 (xx.xx.xxxx) - IN DEVELOPMENT
~~~~~~~~~~~~~~~~~~

* Added derived parameters
* Added rejection sampling

0.1.4 (xx.xx.xxxx)
~~~~~~~~~~~~~~~~~~

* Added patience parameter to stop training early when validation loss hasn't improved
* Fixed dynamical stepsize
* Fix: Dynamical stepsize
68 changes: 68 additions & 0 deletions examples/flow/gauss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import os
import sys
import argparse

import numpy as np
from scipy.stats import multivariate_normal
import torch
import scipy.special

sys.path.append(os.getcwd())


def main(args):

from nnest.trainer import Trainer
from nnest.distributions import GeneralisedNormal

def loglike(x):
return multivariate_normal.logpdf(x, mean=np.zeros(args.x_dim), cov=np.eye(args.x_dim) + args.corr * (1 - np.eye(args.x_dim)))

def transform(x):
return 3. * x

n_samples = args.num_live_points
fraction = args.fraction

x = 2 * (np.random.uniform(size=(int(n_samples / fraction), 2)) - 0.5)
likes = loglike(transform(x))
idx = np.argsort(-likes)
samples = x[idx[0:n_samples]]

if args.base_dist == 'gen_normal':
base_dist = GeneralisedNormal(torch.zeros(args.x_dim), torch.ones(args.x_dim), torch.tensor(args.beta))
else:
base_dist = None

t = Trainer(args.x_dim, args.hidden_dim, log_dir=args.log_dir, num_blocks=args.num_blocks,
num_layers=args.num_layers, base_dist=base_dist, scale=args.scale)
t.train(samples, max_iters=args.train_iters)


if __name__ == '__main__':

parser = argparse.ArgumentParser()

parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=1000,
help="number of train iters")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=0)
parser.add_argument('--corr', type=float, default=0.99)
parser.add_argument('--log_dir', type=str, default='logs/flow/gauss')
parser.add_argument('--beta', type=float, default=8.0)
parser.add_argument('--base_dist', type=str, default='')
parser.add_argument('--scale', type=str, default='constant')
parser.add_argument('--fraction', type=float, default=0.02)

args = parser.parse_args()
main(args)
68 changes: 68 additions & 0 deletions examples/flow/himmelblau.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import os
import sys
import argparse

import numpy as np
import torch

sys.path.append(os.getcwd())


def main(args):

from nnest.trainer import Trainer
from nnest.distributions import GeneralisedNormal

def loglike(z):
z1 = z[:, 0]
z2 = z[:, 1]
return - (z1 ** 2 + z2 - 11.) ** 2 - (z1 + z2 ** 2 - 7.) ** 2

def transform(x):
return 5. * x

n_samples = args.num_live_points
fraction = args.fraction

x = 2 * (np.random.uniform(size=(int(n_samples / fraction), 2)) - 0.5)
likes = loglike(transform(x))
idx = np.argsort(-likes)
samples = x[idx[0:n_samples]]

if args.base_dist == 'gen_normal':
base_dist = GeneralisedNormal(torch.zeros(args.x_dim), torch.ones(args.x_dim), torch.tensor(args.beta))
else:
base_dist = None

t = Trainer(args.x_dim, args.hidden_dim, log_dir=args.log_dir, num_blocks=args.num_blocks,
num_layers=args.num_layers, base_dist=base_dist, scale=args.scale)
t.train(samples, max_iters=args.train_iters)


if __name__ == '__main__':

parser = argparse.ArgumentParser()

parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=1000,
help="number of train iters")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=0)
parser.add_argument('--corr', type=float, default=0.99)
parser.add_argument('--log_dir', type=str, default='logs/flow/himmelblau')
parser.add_argument('--beta', type=float, default=8.0)
parser.add_argument('--base_dist', type=str, default='')
parser.add_argument('--scale', type=str, default='constant')
parser.add_argument('--fraction', type=float, default=0.02)

args = parser.parse_args()
main(args)
121 changes: 121 additions & 0 deletions examples/flow/mog4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import os
import sys
import argparse

import numpy as np
import torch

sys.path.append(os.getcwd())


def log_gaussian_pdf(theta, sigma=1, mu=0, ndim=None):
if ndim is None:
try:
ndim = len(theta)
except TypeError:
assert isinstance(theta, (float, int)), theta
ndim = 1
logl = -(np.sum((theta - mu) ** 2) / (2 * sigma ** 2))
logl -= np.log(2 * np.pi * (sigma ** 2)) * ndim / 2.0
return logl


class Gaussian(object):

def __init__(self, sigma=1.0, nderived=0):
self.sigma = sigma
self.nderived = nderived

def __call__(self, theta):
logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)
return logl, [0.0] * self.nderived


class GaussianMix(object):

def __init__(self, sep=4, weights=(0.4, 0.3, 0.2, 0.1), sigma=1,
nderived=0):
assert len(weights) in [2, 3, 4], (
'Weights must have 2, 3 or 4 components. Weights=' + str(weights))
assert np.isclose(sum(weights), 1), (
'Weights must sum to 1! Weights=' + str(weights))
self.nderived = nderived
self.weights = weights
self.sigmas = [sigma] * len(weights)
positions = []
positions.append(np.asarray([0, sep]))
positions.append(np.asarray([0, -sep]))
positions.append(np.asarray([sep, 0]))
positions.append(np.asarray([-sep, 0]))
self.positions = positions[:len(weights)]

def __call__(self, theta):
thetas = []
for pos in self.positions:
thetas.append(copy.deepcopy(theta))
thetas[-1][:2] -= pos
logls = [(Gaussian(sigma=self.sigmas[i])(thetas[i])[0]
+ np.log(self.weights[i])) for i in range(len(self.weights))]
logl = scipy.special.logsumexp(logls)
return logl, [0.0] * self.nderived



def main(args):

from nnest.trainer import Trainer
from nnest.distributions import GeneralisedNormal

g = GaussianMix()

def loglike(z):
return np.array([g(x)[0] for x in z])

def transform(x):
return 10. * x

n_samples = args.num_live_points
fraction = args.fraction

x = 2 * (np.random.uniform(size=(int(n_samples / fraction), 2)) - 0.5)
likes = loglike(transform(x))
idx = np.argsort(-likes)
samples = x[idx[0:n_samples]]

if args.base_dist == 'gen_normal':
base_dist = GeneralisedNormal(torch.zeros(args.x_dim), torch.ones(args.x_dim), torch.tensor(args.beta))
else:
base_dist = None

t = Trainer(args.x_dim, args.hidden_dim, log_dir=args.log_dir, num_blocks=args.num_blocks,
num_layers=args.num_layers, base_dist=base_dist, scale=args.scale)
t.train(samples, max_iters=args.train_iters)


if __name__ == '__main__':

parser = argparse.ArgumentParser()

parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=1000,
help="number of train iters")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=0)
parser.add_argument('--corr', type=float, default=0.99)
parser.add_argument('--log_dir', type=str, default='logs/flow/gauss')
parser.add_argument('--beta', type=float, default=8.0)
parser.add_argument('--base_dist', type=str, default='')
parser.add_argument('--scale', type=str, default='constant')
parser.add_argument('--fraction', type=float, default=0.02)

args = parser.parse_args()
main(args)
66 changes: 66 additions & 0 deletions examples/flow/rosenbrock.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import os
import sys
import argparse

import numpy as np
import torch

sys.path.append(os.getcwd())


def main(args):

from nnest.trainer import Trainer
from nnest.distributions import GeneralisedNormal

def loglike(z):
return np.array([-sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0) for x in z])

def transform(x):
return 5. * x

n_samples = args.num_live_points
fraction = args.fraction

x = 2 * (np.random.uniform(size=(int(n_samples / fraction), 2)) - 0.5)
likes = loglike(transform(x))
idx = np.argsort(-likes)
samples = x[idx[0:n_samples]]

if args.base_dist == 'gen_normal':
base_dist = GeneralisedNormal(torch.zeros(args.x_dim), torch.ones(args.x_dim), torch.tensor(args.beta))
else:
base_dist = None

t = Trainer(args.x_dim, args.hidden_dim, log_dir=args.log_dir, num_blocks=args.num_blocks,
num_layers=args.num_layers, base_dist=base_dist, scale=args.scale)
t.train(samples, max_iters=args.train_iters)


if __name__ == '__main__':

parser = argparse.ArgumentParser()

parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=1000,
help="number of train iters")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=0)
parser.add_argument('--corr', type=float, default=0.99)
parser.add_argument('--log_dir', type=str, default='logs/flow/rosenbrock')
parser.add_argument('--beta', type=float, default=8.0)
parser.add_argument('--base_dist', type=str, default='')
parser.add_argument('--scale', type=str, default='constant')
parser.add_argument('--fraction', type=float, default=0.02)

args = parser.parse_args()
main(args)
14 changes: 13 additions & 1 deletion examples/nested/gauss.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,30 @@

import numpy as np
from scipy.stats import multivariate_normal
import torch

sys.path.append(os.getcwd())


def main(args):

from nnest import NestedSampler
from nnest.distributions import GeneralisedNormal

def loglike(x):
return multivariate_normal.logpdf(x, mean=np.zeros(args.x_dim), cov=np.eye(args.x_dim) + args.corr * (1 - np.eye(args.x_dim)))

def transform(x):
return 3. * x

if args.base_dist == 'gen_normal':
base_dist = GeneralisedNormal(torch.zeros(args.x_dim), torch.ones(args.x_dim), torch.tensor(args.beta))
else:
base_dist = None

sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,
hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,
use_gpu=args.use_gpu)
use_gpu=args.use_gpu, base_dist=base_dist, scale=args.scale)
sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=args.switch, noise=args.noise)


Expand All @@ -46,6 +53,11 @@ def transform(x):
parser.add_argument('--num_slow', type=int, default=0)
parser.add_argument('--corr', type=float, default=0.99)
parser.add_argument('--log_dir', type=str, default='logs/gauss')
parser.add_argument('--base_dist', type=str, default='')
parser.add_argument('--scale', type=str, default='constant')
parser.add_argument('--beta', type=float, default=8.0)

args = parser.parse_args()
main(args)

print('Expected log Z: %5.4f' % (args.x_dim * np.log(6)))
Loading

0 comments on commit f76b7f6

Please sign in to comment.