Skip to content

Commit

Permalink
Merge pull request #247 from LilithHafner/lh/format
Browse files Browse the repository at this point in the history
Run JuliaFormatter.format()
  • Loading branch information
ChrisRackauckas authored Feb 15, 2024
2 parents 46d9f10 + a2a4836 commit 532e9e4
Show file tree
Hide file tree
Showing 32 changed files with 263 additions and 243 deletions.
14 changes: 7 additions & 7 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@ ENV["GKSwstype"] = "100"
include("pages.jl")

makedocs(sitename = "DiffEqParamEstim.jl",
authors = "Chris Rackauckas et al.",
modules = [DiffEqParamEstim],
clean = true, doctest = false, linkcheck = true,
format = Documenter.HTML(assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/DiffEqParamEstim/stable/"),
pages = pages)
authors = "Chris Rackauckas et al.",
modules = [DiffEqParamEstim],
clean = true, doctest = false, linkcheck = true,
format = Documenter.HTML(assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/DiffEqParamEstim/stable/"),
pages = pages)

deploydocs(repo = "github.com/SciML/DiffEqParamEstim.jl";
push_preview = true)
push_preview = true)
10 changes: 5 additions & 5 deletions docs/pages.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
pages = ["index.md",
"getting_started.md",
"Tutorials" => Any["tutorials/global_optimization.md",
"tutorials/generalized_likelihood.md",
"tutorials/stochastic_evaluations.md",
"tutorials/ensemble.md"],
"tutorials/generalized_likelihood.md",
"tutorials/stochastic_evaluations.md",
"tutorials/ensemble.md"],
"Methods" => Any["methods/recommended_methods.md",
"methods/optimization_based_methods.md",
"methods/collocation_loss.md"],
"methods/optimization_based_methods.md",
"methods/collocation_loss.md"]
]
34 changes: 17 additions & 17 deletions docs/src/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ function:

```@example ode
cost_function = build_loss_objective(prob, Tsit5(), L2Loss(t, data),
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false)
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false)
```

This objective function internally is calling the ODE solver to get solutions
Expand All @@ -103,8 +103,8 @@ of parameter values:
```@example ode
vals = 0.0:0.1:10.0
plot(vals, [cost_function(i) for i in vals], yscale = :log10,
xaxis = "Parameter", yaxis = "Cost", title = "1-Parameter Cost Function",
lw = 3)
xaxis = "Parameter", yaxis = "Cost", title = "1-Parameter Cost Function",
lw = 3)
```

Here we see that there is a very well-defined minimum in our cost function at
Expand Down Expand Up @@ -168,8 +168,8 @@ We can build an objective function and solve the multiple parameter version just

```@example ode
cost_function = build_loss_objective(prob, Tsit5(), L2Loss(t, data),
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false)
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false)
optprob = Optimization.OptimizationProblem(cost_function, [1.3, 0.8, 2.8, 1.2])
result_bfgs = solve(optprob, BFGS())
```
Expand All @@ -184,10 +184,10 @@ differencing loss to the total loss.

```@example ode
cost_function = build_loss_objective(prob, Tsit5(),
L2Loss(t, data, differ_weight = 0.3,
data_weight = 0.7),
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false)
L2Loss(t, data, differ_weight = 0.3,
data_weight = 0.7),
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false)
optprob = Optimization.OptimizationProblem(cost_function, [1.3, 0.8, 2.8, 1.2])
result_bfgs = solve(optprob, BFGS())
```
Expand All @@ -206,14 +206,14 @@ ms_prob = ODEProblem(ms_f1, ms_u0, tspan, ms_p)
t = collect(range(0, stop = 10, length = 200))
data = Array(solve(ms_prob, Tsit5(), saveat = t, abstol = 1e-12, reltol = 1e-12))
bound = Tuple{Float64, Float64}[(0, 10), (0, 10), (0, 10), (0, 10),
(0, 10), (0, 10), (0, 10), (0, 10),
(0, 10), (0, 10), (0, 10), (0, 10),
(0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10)]
(0, 10), (0, 10), (0, 10), (0, 10),
(0, 10), (0, 10), (0, 10), (0, 10),
(0, 10), (0, 10), (0, 10), (0, 10), (0, 10), (0, 10)]
ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data),
Optimization.AutoForwardDiff();
discontinuity_weight = 1.0, abstol = 1e-12,
reltol = 1e-12)
Optimization.AutoForwardDiff();
discontinuity_weight = 1.0, abstol = 1e-12,
reltol = 1e-12)
```

This creates the objective function that can be passed to an optimizer, from which we can then get the parameter values
Expand All @@ -222,7 +222,7 @@ a global optimization method to improve robustness even more:

```@example ode
optprob = Optimization.OptimizationProblem(ms_obj, zeros(18), lb = first.(bound),
ub = last.(bound))
ub = last.(bound))
optsol_ms = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 10_000)
```

Expand Down
4 changes: 2 additions & 2 deletions docs/src/methods/collocation_loss.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ but is much faster, and is a good method to try first to get in the general

```julia
function two_stage_objective(prob::DEProblem, tpoints, data, adtype = SciMLBase.NoAD(), ;
kernel = :Epanechnikov,
loss_func = L2DistLoss)
kernel = :Epanechnikov,
loss_func = L2DistLoss)
end
```
28 changes: 14 additions & 14 deletions docs/src/methods/optimization_based_methods.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ and MathProgBase-associated solvers like NLopt.

```julia
function build_loss_objective(prob::DEProblem, alg, loss,
adtype = SciMLBase.NoAD(),
regularization = nothing;
priors = nothing,
prob_generator = STANDARD_PROB_GENERATOR,
kwargs...)
adtype = SciMLBase.NoAD(),
regularization = nothing;
priors = nothing,
prob_generator = STANDARD_PROB_GENERATOR,
kwargs...)
end
```

Expand All @@ -38,12 +38,12 @@ proceeds as follows:

```julia
function multiple_shooting_objective(prob::DiffEqBase.DEProblem, alg, loss,
adtype = SciMLBase.NoAD(),
regularization = nothing;
priors = nothing,
discontinuity_weight = 1.0,
prob_generator = STANDARD_PROB_GENERATOR,
kwargs...)
adtype = SciMLBase.NoAD(),
regularization = nothing;
priors = nothing,
discontinuity_weight = 1.0,
prob_generator = STANDARD_PROB_GENERATOR,
kwargs...)
end
```

Expand All @@ -67,7 +67,7 @@ cost functions:

```julia
L2Loss(t, data; differ_weight = nothing, data_weight = nothing,
colloc_grad = nothing, dudt = nothing)
colloc_grad = nothing, dudt = nothing)
```

where `t` is the set of timepoints which the data are found at, and
Expand Down Expand Up @@ -213,6 +213,6 @@ the parameters or a multivariate distribution.

```julia
ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data); priors = priors,
discontinuity_weight = 1.0, abstol = 1e-12,
reltol = 1e-12)
discontinuity_weight = 1.0, abstol = 1e-12,
reltol = 1e-12)
```
12 changes: 6 additions & 6 deletions docs/src/tutorials/ensemble.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ initial_conditions = [
[0.5, 0.5],
[2.0, 1.0],
[1.0, 2.0],
[2.0, 2.0],
[2.0, 2.0]
]
function prob_func(prob, i, repeat)
ODEProblem(prob.f, initial_conditions[i], prob.tspan, prob.p)
Expand Down Expand Up @@ -111,8 +111,8 @@ Put this into build_loss_objective.

```@example ensemble
obj = build_loss_objective(enprob, Tsit5(), loss, Optimization.AutoForwardDiff(),
trajectories = N,
saveat = data_times)
trajectories = N,
saveat = data_times)
```

Notice that we added the kwargs for `solve` of the `EnsembleProblem` into this. They get passed to the internal `solve`
Expand Down Expand Up @@ -141,9 +141,9 @@ to decrease the tolerance of the ODE solvers via

```@example ensemble
obj = build_loss_objective(enprob, Tsit5(), loss, Optimization.AutoForwardDiff(),
trajectories = N,
abstol = 1e-8, reltol = 1e-8,
saveat = data_times)
trajectories = N,
abstol = 1e-8, reltol = 1e-8,
saveat = data_times)
optprob = OptimizationProblem(obj, [1.3, 0.9], lb = lower, ub = upper)
result = solve(optprob, BFGS()) #OptimizationOptimJL detects that it's a box constrained problem and use Fminbox wrapper over BFGS
```
Expand Down
10 changes: 5 additions & 5 deletions docs/src/tutorials/generalized_likelihood.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ corresponding to that distribution fit:

```@example likelihood
obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions),
maxiters = 10000, verbose = false)
maxiters = 10000, verbose = false)
```

First, let's use the objective function to plot the likelihood landscape:
Expand All @@ -79,8 +79,8 @@ using Plots;
plotly();
prange = 0.5:0.1:5.0
heatmap(prange, prange, [obj([j, i]) for i in prange, j in prange],
yscale = :log10, xlabel = "Parameter 1", ylabel = "Parameter 2",
title = "Likelihood Landscape")
yscale = :log10, xlabel = "Parameter 1", ylabel = "Parameter 2",
title = "Likelihood Landscape")
```

![2 Parameter Likelihood](../assets/2paramlike.png)
Expand All @@ -92,8 +92,8 @@ one-dimensional slice:

```julia
plot(prange, [obj([1.5, i]) for i in prange], lw = 3,
title = "Parameter 2 Likelihood (Parameter 1 = 1.5)",
xlabel = "Parameter 2", ylabel = "Objective Function Value")
title = "Parameter 2 Likelihood (Parameter 1 = 1.5)",
xlabel = "Parameter 2", ylabel = "Objective Function Value")
```

![1 Parameter Likelihood](../assets/1paramlike.png)
Expand Down
8 changes: 4 additions & 4 deletions docs/src/tutorials/global_optimization.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ We can even use things like the Improved Stochastic Ranking Evolution Strategy
```@example global_optimization
optprob = Optimization.OptimizationProblem(obj, [0.2], lb = [-1.0], ub = [5.0])
res = solve(optprob,
OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer,
"algorithm" => :GN_ISRES,
"xtol_rel" => 1e-3,
"maxeval" => 10000))
OptimizationMOI.MOI.OptimizerWithAttributes(NLopt.Optimizer,
"algorithm" => :GN_ISRES,
"xtol_rel" => 1e-3,
"maxeval" => 10000))
```

which is very robust to the initial condition. We can also directly use the NLopt interface as below. The fastest result comes from the
Expand Down
10 changes: 5 additions & 5 deletions docs/src/tutorials/stochastic_evaluations.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ We use Optim.jl for optimization below

```@example sde
obj = build_loss_objective(monte_prob, SOSRI(), L2Loss(t, aggregate_data),
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false, trajectories = 1000)
Optimization.AutoForwardDiff(),
maxiters = 10000, verbose = false, trajectories = 1000)
optprob = Optimization.OptimizationProblem(obj, [1.0, 0.5])
result = solve(optprob, Optim.BFGS())
```
Expand All @@ -68,9 +68,9 @@ Instead, when we use `L2Loss` with first differencing enabled, we get much more

```@example sde
obj = build_loss_objective(monte_prob, SRIW1(),
L2Loss(t, aggregate_data, differ_weight = 1.0,
data_weight = 0.5), Optimization.AutoForwardDiff(),
verbose = false, trajectories = 1000, maxiters = 1000)
L2Loss(t, aggregate_data, differ_weight = 1.0,
data_weight = 0.5), Optimization.AutoForwardDiff(),
verbose = false, trajectories = 1000, maxiters = 1000)
optprob = Optimization.OptimizationProblem(obj, [1.0, 0.5])
result = solve(optprob, Optim.BFGS())
result.original
Expand Down
10 changes: 5 additions & 5 deletions src/DiffEqParamEstim.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,18 @@ import PreallocationTools
STANDARD_PROB_GENERATOR(prob, p) = remake(prob; u0 = eltype(p).(prob.u0), p = p)
function STANDARD_PROB_GENERATOR(prob::EnsembleProblem, p)
EnsembleProblem(remake(prob.prob; u0 = eltype(p).(prob.prob.u0), p = p),
output_func = prob.output_func,
prob_func = prob.prob_func,
reduction = prob.reduction,
u_init = prob.u_init)
output_func = prob.output_func,
prob_func = prob.prob_func,
reduction = prob.reduction,
u_init = prob.u_init)
end
STANDARD_MS_PROB_GENERATOR = function (prob, p, k)
t0, tf = prob.tspan
P, N = length(prob.p), length(prob.u0)
K = Int((length(p) - P) / N)
τ = range(t0, tf, length = K + 1)
remake(prob; u0 = p[(1 + (k - 1) * N):(k * N)], p = p[(end - P + 1):end],
tspan = (τ[k], τ[k + 1]))
tspan = (τ[k], τ[k + 1]))
end

include("cost_functions.jl")
Expand Down
12 changes: 6 additions & 6 deletions src/build_loss_objective.jl
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
export build_loss_objective

function build_loss_objective(prob::SciMLBase.AbstractSciMLProblem, alg, loss,
adtype = SciMLBase.NoAD(),
regularization = nothing, args...;
priors = nothing,
prob_generator = STANDARD_PROB_GENERATOR,
kwargs...)
adtype = SciMLBase.NoAD(),
regularization = nothing, args...;
priors = nothing,
prob_generator = STANDARD_PROB_GENERATOR,
kwargs...)
cost_function = function (p, _ = nothing)
tmp_prob = prob_generator(prob, p)
if loss isa Union{L2Loss, LogLikeLoss}
sol = solve(tmp_prob, alg, args...; saveat = loss.t, save_everystep = false,
dense = false, kwargs...)
dense = false, kwargs...)
else
sol = solve(tmp_prob, alg, args...; kwargs...)
end
Expand Down
20 changes: 12 additions & 8 deletions src/cost_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,12 @@ function (f::L2Loss)(sol::SciMLBase.AbstractSciMLSolution)
for j in 1:length(sol[i])
if diff_weight isa Real
sumsq += diff_weight *
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
((data[j, i] - data[j, i - 1] - sol[j, i] +
sol[j, i - 1])^2)
else
sumsq += diff_weight[j, i] *
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
((data[j, i] - data[j, i - 1] - sol[j, i] +
sol[j, i - 1])^2)
end
end
end
Expand All @@ -112,10 +114,12 @@ function (f::L2Loss)(sol::SciMLBase.AbstractSciMLSolution)
for j in 1:length(sol[i])
if diff_weight isa Real
sumsq += diff_weight *
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
((data[j, i] - data[j, i - 1] - sol[j, i] +
sol[j, i - 1])^2)
else
sumsq += diff_weight[j, i] *
((data[j, i] - data[j, i - 1] - sol[j, i] + sol[j, i - 1])^2)
((data[j, i] - data[j, i - 1] - sol[j, i] +
sol[j, i - 1])^2)
end
end
end
Expand All @@ -135,11 +139,11 @@ end
matrixize(x) = x isa Vector ? reshape(x, 1, length(x)) : x

function L2Loss(t, data; differ_weight = nothing, data_weight = nothing,
colloc_grad = nothing,
dudt = nothing)
colloc_grad = nothing,
dudt = nothing)
L2Loss(t, matrixize(data), matrixize(differ_weight),
matrixize(data_weight), matrixize(colloc_grad),
colloc_grad == nothing ? nothing : zeros(size(colloc_grad)))
matrixize(data_weight), matrixize(colloc_grad),
colloc_grad == nothing ? nothing : zeros(size(colloc_grad)))
end

function (f::L2Loss)(sol::DiffEqBase.AbstractEnsembleSolution)
Expand Down
Loading

0 comments on commit 532e9e4

Please sign in to comment.