From 117ffadec5cf18ca0e6bcc7e59d808f4b433f8c8 Mon Sep 17 00:00:00 2001 From: "Documenter.jl" Date: Thu, 24 Aug 2023 08:13:23 +0000 Subject: [PATCH] build based on d5adbf3 --- dev/apireference/index.html | 104 +- dev/changelog/index.html | 2 +- dev/examples/FAST_hydro_thermal/index.html | 8 +- .../FAST_production_management/index.html | 2 +- dev/examples/FAST_quickstart/index.html | 2 +- dev/examples/Hydro_thermal/index.html | 21 +- dev/examples/SDDP.log | 653 ++-- dev/examples/SDDP_0.0.log | 6 +- dev/examples/SDDP_0.0625.log | 6 +- dev/examples/SDDP_0.125.log | 6 +- dev/examples/SDDP_0.25.log | 6 +- dev/examples/SDDP_0.375.log | 6 +- dev/examples/SDDP_0.5.log | 6 +- dev/examples/SDDP_0.625.log | 6 +- dev/examples/SDDP_0.75.log | 6 +- dev/examples/SDDP_0.875.log | 6 +- dev/examples/SDDP_1.0.log | 6 +- .../index.html | 24 +- .../index.html | 22 +- .../index.html | 14 +- .../index.html | 14 +- .../agriculture_mccardle_farm/index.html | 2 +- dev/examples/air_conditioning/index.html | 14 +- .../air_conditioning_forward/index.html | 2 +- dev/examples/all_blacks/index.html | 8 +- .../asset_management_simple/index.html | 22 +- .../asset_management_stagewise/index.html | 24 +- dev/examples/belief/index.html | 24 +- dev/examples/biobjective_hydro/index.html | 62 +- dev/examples/booking_management/index.html | 2 +- dev/examples/generation_expansion/index.html | 24 +- dev/examples/hydro_valley/index.html | 2 +- .../infinite_horizon_hydro_thermal/index.html | 18 +- .../infinite_horizon_trivial/index.html | 12 +- dev/examples/no_strong_duality/index.html | 8 +- .../objective_state_newsvendor/index.html | 221 +- dev/examples/sldp_example_one/index.html | 25 +- dev/examples/sldp_example_two/index.html | 38 +- dev/examples/stochastic_all_blacks/index.html | 10 +- dev/examples/the_farmers_problem/index.html | 10 +- dev/examples/vehicle_location/index.html | 2 +- dev/explanation/risk/index.html | 4 +- dev/explanation/theory_intro.ipynb | 27 +- dev/explanation/theory_intro.jl | 24 +- dev/explanation/theory_intro/index.html | 310 +- .../access_previous_variables/index.html | 2 +- .../index.html | 2 +- dev/guides/add_a_risk_measure/index.html | 16 +- dev/guides/add_integrality/index.html | 2 +- .../add_multidimensional_noise/index.html | 2 +- .../index.html | 2 +- dev/guides/choose_a_stopping_rule/index.html | 2 +- dev/guides/create_a_belief_state/index.html | 2 +- .../create_a_general_policy_graph/index.html | 2 +- dev/guides/debug_a_model/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- dev/index.html | 2 +- dev/release_notes/index.html | 2 +- dev/search/index.html | 2 +- dev/search_index.js | 2 +- dev/tutorial/SDDP.log | 204 +- dev/tutorial/arma/index.html | 54 +- dev/tutorial/convex.cuts.json | 2 +- dev/tutorial/decision_hazard/index.html | 2 +- dev/tutorial/example_milk_producer/index.html | 3069 ++++++++--------- dev/tutorial/example_newsvendor/index.html | 92 +- dev/tutorial/example_reservoir/index.html | 784 +++-- dev/tutorial/first_steps/index.html | 24 +- dev/tutorial/markov_uncertainty/index.html | 10 +- dev/tutorial/mdps/index.html | 31 +- dev/tutorial/objective_states/index.html | 38 +- dev/tutorial/objective_uncertainty/index.html | 12 +- dev/tutorial/pglib_opf/index.html | 40 +- dev/tutorial/plotting/index.html | 140 +- dev/tutorial/spaghetti_plot.html | 2 +- dev/tutorial/warnings/index.html | 14 +- 77 files changed, 3213 insertions(+), 3172 deletions(-) diff --git a/dev/apireference/index.html b/dev/apireference/index.html index d39f311e0..e3e5ed417 100644 --- a/dev/apireference/index.html +++ b/dev/apireference/index.html @@ -21,7 +21,7 @@ Nodes {} Arcs - {}source
SDDP.add_nodeFunction
add_node(graph::Graph{T}, node::T) where {T}

Add a node to the graph graph.

Examples

julia> graph = SDDP.Graph(:root);
+ {}
source
SDDP.add_nodeFunction
add_node(graph::Graph{T}, node::T) where {T}

Add a node to the graph graph.

Examples

julia> graph = SDDP.Graph(:root);
 
 julia> SDDP.add_node(graph, :A)
 
@@ -41,7 +41,7 @@
 Nodes
  2
 Arcs
- {}
source
SDDP.add_edgeFunction
add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T}

Add an edge to the graph graph.

Examples

julia> graph = SDDP.Graph(0);
+ {}
source
SDDP.add_edgeFunction
add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T}

Add an edge to the graph graph.

Examples

julia> graph = SDDP.Graph(0);
 
 julia> SDDP.add_node(graph, 1)
 
@@ -65,7 +65,7 @@
 Nodes
  A
 Arcs
- root => A w.p. 1.0
source
SDDP.add_ambiguity_setFunction
add_ambiguity_set(
+ root => A w.p. 1.0
source
SDDP.add_ambiguity_setFunction
add_ambiguity_set(
     graph::Graph{T},
     set::Vector{T},
     lipschitz::Vector{Float64},
@@ -98,7 +98,7 @@
  2 => 3 w.p. 1.0
 Partitions
  {1, 2}
- {3}
source
add_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)

Add set to the belief partition of graph.

lipschitz is a Lipschitz constant for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.

Examples

julia> graph = SDDP.LinearGraph(3);
+ {3}
source
add_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)

Add set to the belief partition of graph.

lipschitz is a Lipschitz constant for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.

Examples

julia> graph = SDDP.LinearGraph(3);
 
 julia> SDDP.add_ambiguity_set(graph, [1, 2], 1e3)
 
@@ -117,7 +117,7 @@
  2 => 3 w.p. 1.0
 Partitions
  {1, 2}
- {3}
source
SDDP.LinearGraphFunction
LinearGraph(stages::Int)

Create a linear graph with stages number of nodes.

Examples

julia> graph = SDDP.LinearGraph(3)
+ {3}
source
SDDP.LinearGraphFunction
LinearGraph(stages::Int)

Create a linear graph with stages number of nodes.

Examples

julia> graph = SDDP.LinearGraph(3)
 Root
  0
 Nodes
@@ -127,7 +127,7 @@
 Arcs
  0 => 1 w.p. 1.0
  1 => 2 w.p. 1.0
- 2 => 3 w.p. 1.0
source
SDDP.MarkovianGraphFunction
MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})

Construct a Markovian graph from the vector of transition matrices.

transition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.

The dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.

Examples

julia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]])
+ 2 => 3 w.p. 1.0
source
SDDP.MarkovianGraphFunction
MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})

Construct a Markovian graph from the vector of transition matrices.

transition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.

The dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.

Examples

julia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]])
 Root
  (0, 1)
 Nodes
@@ -143,7 +143,7 @@
  (2, 1) => (3, 1) w.p. 0.8
  (2, 1) => (3, 2) w.p. 0.2
  (2, 2) => (3, 1) w.p. 0.2
- (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(;
+ (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(;
     stages::Int,
     transition_matrix::Matrix{Float64},
     root_node_transition::Vector{Float64},
@@ -171,11 +171,11 @@
  (2, 1) => (3, 1) w.p. 0.8
  (2, 1) => (3, 2) w.p. 0.2
  (2, 2) => (3, 1) w.p. 0.2
- (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(
+ (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(
     simulator::Function;
     budget::Union{Int,Vector{Int}},
     scenarios::Int = 1000,
-)

Construct a Markovian graph by fitting Markov chain to scenarios generated by simulator().

budget is the total number of nodes in the resulting Markov chain. This can either be specified as a single Int, in which case we will attempt to intelligently distributed the nodes between stages. Alternatively, budget can be a Vector{Int}, which details the number of Markov state to have in each stage.

source
SDDP.UnicyclicGraphFunction
UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)

Construct a graph composed of num_nodes nodes that form a single cycle, with a probability of discount_factor of continuing the cycle.

Examples

julia> graph = SDDP.UnicyclicGraph(0.9; num_nodes = 2)
+)

Construct a Markovian graph by fitting Markov chain to scenarios generated by simulator().

budget is the total number of nodes in the resulting Markov chain. This can either be specified as a single Int, in which case we will attempt to intelligently distributed the nodes between stages. Alternatively, budget can be a Vector{Int}, which details the number of Markov state to have in each stage.

source
SDDP.UnicyclicGraphFunction
UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)

Construct a graph composed of num_nodes nodes that form a single cycle, with a probability of discount_factor of continuing the cycle.

Examples

julia> graph = SDDP.UnicyclicGraph(0.9; num_nodes = 2)
 Root
  0
 Nodes
@@ -184,7 +184,7 @@
 Arcs
  0 => 1 w.p. 1.0
  1 => 2 w.p. 1.0
- 2 => 1 w.p. 0.9
source
SDDP.LinearPolicyGraphFunction
LinearPolicyGraph(builder::Function; stages::Int, kwargs...)

Create a linear policy graph with stages number of stages.

Keyword arguments

  • stages: the number of stages in the graph

  • kwargs: other keyword arguments are passed to SDDP.PolicyGraph.

Examples

julia> SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
+ 2 => 1 w.p. 0.9
source
SDDP.LinearPolicyGraphFunction
LinearPolicyGraph(builder::Function; stages::Int, kwargs...)

Create a linear policy graph with stages number of stages.

Keyword arguments

  • stages: the number of stages in the graph

  • kwargs: other keyword arguments are passed to SDDP.PolicyGraph.

Examples

julia> SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
     # ... build model ...
 end
 A policy graph with 2 nodes.
@@ -194,7 +194,7 @@
     # ... build model ...
 end
 A policy graph with 2 nodes.
-Node indices: 1, 2
source
SDDP.MarkovianPolicyGraphFunction
MarkovianPolicyGraph(
+Node indices: 1, 2
source
SDDP.MarkovianPolicyGraphFunction
MarkovianPolicyGraph(
     builder::Function;
     transition_matrices::Vector{Array{Float64,2}},
     kwargs...
@@ -211,7 +211,7 @@
     # ... build model ...
 end
 A policy graph with 5 nodes.
- Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)
source
SDDP.PolicyGraphType
PolicyGraph(
+ Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)
source
SDDP.PolicyGraphType
PolicyGraph(
     builder::Function,
     graph::Graph{T};
     sense::Symbol = :Min,
@@ -233,49 +233,49 @@
     optimizer = HiGHS.Optimizer,
 ) do subproblem, index
     # ... subproblem definitions ...
-end
source

Subproblem definition

SDDP.@stageobjectiveMacro
@stageobjective(subproblem, expr)

Set the stage-objective of subproblem to expr.

Examples

@stageobjective(subproblem, 2x + y)
source
SDDP.parameterizeFunction
parameterize(
+end
source

Subproblem definition

SDDP.@stageobjectiveMacro
@stageobjective(subproblem, expr)

Set the stage-objective of subproblem to expr.

Examples

@stageobjective(subproblem, 2x + y)
source
SDDP.parameterizeFunction
parameterize(
     modify::Function,
     subproblem::JuMP.Model,
     realizations::Vector{T},
     probability::Vector{Float64} = fill(1.0 / length(realizations))
 ) where {T}

Add a parameterization function modify to subproblem. The modify function takes one argument and modifies subproblem based on the realization of the noise sampled from realizations with corresponding probabilities probability.

In order to conduct an out-of-sample simulation, modify should accept arguments that are not in realizations (but still of type T).

Examples

SDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω
     JuMP.set_upper_bound(x, ω)
-end
source
parameterize(node::Node, noise)

Parameterize node node with the noise noise.

source
SDDP.add_objective_stateFunction
add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)

Add an objective state variable to subproblem.

Required kwargs are:

  • initial_value: The initial value of the objective state variable at the root node.
  • lipschitz: The lipschitz constant of the objective state variable.

Setting a tight value for the lipschitz constant can significantly improve the speed of convergence.

Optional kwargs are:

  • lower_bound: A valid lower bound for the objective state variable. Can be -Inf.
  • upper_bound: A valid upper bound for the objective state variable. Can be +Inf.

Setting tight values for these optional variables can significantly improve the speed of convergence.

If the objective state is N-dimensional, each keyword argument must be an NTuple{N,Float64}. For example, initial_value = (0.0, 1.0).

source
SDDP.objective_stateFunction
objective_state(subproblem::JuMP.Model)

Return the current objective state of the problem.

Can only be called from SDDP.parameterize.

source
SDDP.NoiseType
Noise(support, probability)

An atom of a discrete random variable at the point of support support and associated probability probability.

source

Training the policy

SDDP.numerical_stability_reportFunction
numerical_stability_report(
+end
source
parameterize(node::Node, noise)

Parameterize node node with the noise noise.

source
SDDP.add_objective_stateFunction
add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)

Add an objective state variable to subproblem.

Required kwargs are:

  • initial_value: The initial value of the objective state variable at the root node.
  • lipschitz: The lipschitz constant of the objective state variable.

Setting a tight value for the lipschitz constant can significantly improve the speed of convergence.

Optional kwargs are:

  • lower_bound: A valid lower bound for the objective state variable. Can be -Inf.
  • upper_bound: A valid upper bound for the objective state variable. Can be +Inf.

Setting tight values for these optional variables can significantly improve the speed of convergence.

If the objective state is N-dimensional, each keyword argument must be an NTuple{N,Float64}. For example, initial_value = (0.0, 1.0).

source
SDDP.objective_stateFunction
objective_state(subproblem::JuMP.Model)

Return the current objective state of the problem.

Can only be called from SDDP.parameterize.

source
SDDP.NoiseType
Noise(support, probability)

An atom of a discrete random variable at the point of support support and associated probability probability.

source

Training the policy

SDDP.numerical_stability_reportFunction
numerical_stability_report(
     [io::IO = stdout,]
     model::PolicyGraph;
     by_node::Bool = false,
     print::Bool = true,
     warn::Bool = true,
-)

Print a report identifying possible numeric stability issues.

Keyword arguments

  • If by_node, print a report for each node in the graph.

  • If print, print to io.

  • If warn, warn if the coefficients may cause numerical issues.

source
SDDP.trainFunction
SDDP.train(model::PolicyGraph; kwargs...)

Train the policy for model.

Keyword arguments

  • iteration_limit::Int: number of iterations to conduct before termination.

  • time_limit::Float64: number of seconds to train before termination.

  • stoping_rules: a vector of SDDP.AbstractStoppingRules. Defaults to SimulationStoppingRule.

  • print_level::Int: control the level of printing to the screen. Defaults to 1. Set to 0 to disable all printing.

  • log_file::String: filepath at which to write a log of the training progress. Defaults to SDDP.log.

  • log_frequency::Int: control the frequency with which the logging is outputted (iterations/log). Defaults to 1.

  • log_every_seconds::Float64: control the frequency with which the logging is outputted (seconds/log). Defaults to 0.0.

  • run_numerical_stability_report::Bool: generate (and print) a numerical stability report prior to solve. Defaults to true.

  • refine_at_similar_nodes::Bool: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In almost all cases this should be set to true.

  • cut_deletion_minimum::Int: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver specific; however, smaller values result in smaller subproblems (and therefore quicker solves), at the expense of more time spent performing cut selection.

  • risk_measure: the risk measure to use at each node. Defaults to Expectation.

  • sampling_scheme: a sampling scheme to use on the forward pass of the algorithm. Defaults to InSampleMonteCarlo.

  • backward_sampling_scheme: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to CompleteSampler.

  • cut_type: choose between SDDP.SINGLE_CUT and SDDP.MULTI_CUT versions of SDDP.

  • dashboard::Bool: open a visualization of the training over time. Defaults to false.

  • parallel_scheme::AbstractParallelScheme: specify a scheme for solving in parallel. Defaults to Serial().

  • forward_pass::AbstractForwardPass: specify a scheme to use for the forward passes.

  • forward_pass_resampling_probability::Union{Nothing,Float64}: set to a value in (0, 1) to enable RiskAdjustedForwardPass. Defaults to nothing (disabled).

  • add_to_existing_cuts::Bool: set to true to allow training a model that was previously trained. Defaults to false.

  • duality_handler::AbstractDualityHandler: specify a duality handler to use when creating cuts.

  • post_iteration_callback::Function: a callback with the signature post_iteration_callback(::IterationResult) that is evaluated after each iteration of the algorithm.

There is also a special option for infinite horizon problems

  • cycle_discretization_delta: the maximum distance between states allowed on the forward pass. This is for advanced users only and needs to be used in conjunction with a different sampling_scheme.
source
SDDP.termination_statusFunction
termination_status(model::PolicyGraph)::Symbol

Query the reason why the training stopped.

source
SDDP.write_cuts_to_fileFunction
write_cuts_to_file(
+)

Print a report identifying possible numeric stability issues.

Keyword arguments

  • If by_node, print a report for each node in the graph.

  • If print, print to io.

  • If warn, warn if the coefficients may cause numerical issues.

source
SDDP.trainFunction
SDDP.train(model::PolicyGraph; kwargs...)

Train the policy for model.

Keyword arguments

  • iteration_limit::Int: number of iterations to conduct before termination.

  • time_limit::Float64: number of seconds to train before termination.

  • stoping_rules: a vector of SDDP.AbstractStoppingRules. Defaults to SimulationStoppingRule.

  • print_level::Int: control the level of printing to the screen. Defaults to 1. Set to 0 to disable all printing.

  • log_file::String: filepath at which to write a log of the training progress. Defaults to SDDP.log.

  • log_frequency::Int: control the frequency with which the logging is outputted (iterations/log). Defaults to 1.

  • log_every_seconds::Float64: control the frequency with which the logging is outputted (seconds/log). Defaults to 0.0.

  • run_numerical_stability_report::Bool: generate (and print) a numerical stability report prior to solve. Defaults to true.

  • refine_at_similar_nodes::Bool: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In almost all cases this should be set to true.

  • cut_deletion_minimum::Int: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver specific; however, smaller values result in smaller subproblems (and therefore quicker solves), at the expense of more time spent performing cut selection.

  • risk_measure: the risk measure to use at each node. Defaults to Expectation.

  • sampling_scheme: a sampling scheme to use on the forward pass of the algorithm. Defaults to InSampleMonteCarlo.

  • backward_sampling_scheme: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to CompleteSampler.

  • cut_type: choose between SDDP.SINGLE_CUT and SDDP.MULTI_CUT versions of SDDP.

  • dashboard::Bool: open a visualization of the training over time. Defaults to false.

  • parallel_scheme::AbstractParallelScheme: specify a scheme for solving in parallel. Defaults to Serial().

  • forward_pass::AbstractForwardPass: specify a scheme to use for the forward passes.

  • forward_pass_resampling_probability::Union{Nothing,Float64}: set to a value in (0, 1) to enable RiskAdjustedForwardPass. Defaults to nothing (disabled).

  • add_to_existing_cuts::Bool: set to true to allow training a model that was previously trained. Defaults to false.

  • duality_handler::AbstractDualityHandler: specify a duality handler to use when creating cuts.

  • post_iteration_callback::Function: a callback with the signature post_iteration_callback(::IterationResult) that is evaluated after each iteration of the algorithm.

There is also a special option for infinite horizon problems

  • cycle_discretization_delta: the maximum distance between states allowed on the forward pass. This is for advanced users only and needs to be used in conjunction with a different sampling_scheme.
source
SDDP.termination_statusFunction
termination_status(model::PolicyGraph)::Symbol

Query the reason why the training stopped.

source
SDDP.write_cuts_to_fileFunction
write_cuts_to_file(
     model::PolicyGraph{T},
     filename::String;
     node_name_parser::Function = string,
-) where {T}

Write the cuts that form the policy in model to filename in JSON format.

node_name_parser is a function which converts the name of each node into a string representation. It has the signature: node_name_parser(::T)::String.

See also SDDP.read_cuts_from_file.

source
SDDP.read_cuts_from_fileFunction
read_cuts_from_file(
+) where {T}

Write the cuts that form the policy in model to filename in JSON format.

node_name_parser is a function which converts the name of each node into a string representation. It has the signature: node_name_parser(::T)::String.

See also SDDP.read_cuts_from_file.

source
SDDP.read_cuts_from_fileFunction
read_cuts_from_file(
     model::PolicyGraph{T},
     filename::String;
     node_name_parser::Function = _node_name_parser,
-) where {T}

Read cuts (saved using SDDP.write_cuts_to_file) from filename into model.

Since T can be an arbitrary Julia type, the conversion to JSON is lossy. When reading, read_cuts_from_file only supports T=Int, T=NTuple{N, Int}, and T=Symbol. If you have manually created a policy graph with a different node type T, provide a function node_name_parser with the signature node_name_parser(T, name::String)::T where {T} that returns the name of each node given the string name name.

If node_name_parser returns nothing, those cuts are skipped.

See also SDDP.write_cuts_to_file.

source
SDDP.write_log_to_csvFunction
write_log_to_csv(model::PolicyGraph, filename::String)

Write the log of the most recent training to a csv for post-analysis.

Assumes that the model has been trained via SDDP.train.

source

Stopping rules

SDDP.AbstractStoppingRuleType
AbstractStoppingRule

The abstract type for the stopping-rule interface.

You need to define the following methods:

source
SDDP.stopping_rule_statusFunction
stopping_rule_status(::AbstractStoppingRule)::Symbol

Return a symbol describing the stopping rule.

source
SDDP.convergence_testFunction
convergence_test(
+) where {T}

Read cuts (saved using SDDP.write_cuts_to_file) from filename into model.

Since T can be an arbitrary Julia type, the conversion to JSON is lossy. When reading, read_cuts_from_file only supports T=Int, T=NTuple{N, Int}, and T=Symbol. If you have manually created a policy graph with a different node type T, provide a function node_name_parser with the signature node_name_parser(T, name::String)::T where {T} that returns the name of each node given the string name name.

If node_name_parser returns nothing, those cuts are skipped.

See also SDDP.write_cuts_to_file.

source
SDDP.write_log_to_csvFunction
write_log_to_csv(model::PolicyGraph, filename::String)

Write the log of the most recent training to a csv for post-analysis.

Assumes that the model has been trained via SDDP.train.

source

Stopping rules

SDDP.AbstractStoppingRuleType
AbstractStoppingRule

The abstract type for the stopping-rule interface.

You need to define the following methods:

source
SDDP.stopping_rule_statusFunction
stopping_rule_status(::AbstractStoppingRule)::Symbol

Return a symbol describing the stopping rule.

source
SDDP.convergence_testFunction
convergence_test(
     model::PolicyGraph,
     log::Vector{Log},
     ::AbstractStoppingRule,
-)::Bool

Return a Bool indicating if the algorithm should terminate the training.

source
SDDP.IterationLimitType
IterationLimit(limit::Int)

Teriminate the algorithm after limit number of iterations.

source
SDDP.TimeLimitType
TimeLimit(limit::Float64)

Teriminate the algorithm after limit seconds of computation.

source
SDDP.StatisticalType
Statistical(;
+)::Bool

Return a Bool indicating if the algorithm should terminate the training.

source
SDDP.IterationLimitType
IterationLimit(limit::Int)

Teriminate the algorithm after limit number of iterations.

source
SDDP.TimeLimitType
TimeLimit(limit::Float64)

Teriminate the algorithm after limit seconds of computation.

source
SDDP.StatisticalType
Statistical(;
     num_replications,
     iteration_period = 1,
     z_score = 1.96,
     verbose = true,
-)

Perform an in-sample Monte Carlo simulation of the policy with num_replications replications every iteration_periods. Terminate if the deterministic bound (lower if minimizing) calls into the confidence interval for the mean of the simulated cost. If verbose = true, print the confidence interval.

Note that this tests assumes that the simulated values are normally distributed. In infinite horizon models, this is almost never the case. The distribution is usually closer to exponential or log-normal.

source
SDDP.BoundStallingType
BoundStalling(num_previous_iterations::Int, tolerance::Float64)

Teriminate the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) fails to improve by more than tolerance in absolute terms for more than num_previous_iterations consecutve iterations, provided it has improved relative to the bound after the first iteration.

Checking for an improvement relative to the first iteration avoids early termination in a situation where the bound fails to improve for the first N iterations. This frequently happens in models with a large number of stages, where it takes time for the cuts to propogate backward enough to modify the bound of the root node.

source
SDDP.StoppingChainType
StoppingChain(rules::AbstractStoppingRule...)

Terminate once all of the rules are statified.

This stopping rule short-circuits, so subsequent rules are only tested if the previous pass.

Examples

A stopping rule that runs 100 iterations, then checks for the bound stalling:

StoppingChain(IterationLimit(100), BoundStalling(5, 0.1))
source
SDDP.SimulationStoppingRuleType
SimulationStoppingRule(;
+)

Perform an in-sample Monte Carlo simulation of the policy with num_replications replications every iteration_periods. Terminate if the deterministic bound (lower if minimizing) calls into the confidence interval for the mean of the simulated cost. If verbose = true, print the confidence interval.

Note that this tests assumes that the simulated values are normally distributed. In infinite horizon models, this is almost never the case. The distribution is usually closer to exponential or log-normal.

source
SDDP.BoundStallingType
BoundStalling(num_previous_iterations::Int, tolerance::Float64)

Teriminate the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) fails to improve by more than tolerance in absolute terms for more than num_previous_iterations consecutve iterations, provided it has improved relative to the bound after the first iteration.

Checking for an improvement relative to the first iteration avoids early termination in a situation where the bound fails to improve for the first N iterations. This frequently happens in models with a large number of stages, where it takes time for the cuts to propogate backward enough to modify the bound of the root node.

source
SDDP.StoppingChainType
StoppingChain(rules::AbstractStoppingRule...)

Terminate once all of the rules are statified.

This stopping rule short-circuits, so subsequent rules are only tested if the previous pass.

Examples

A stopping rule that runs 100 iterations, then checks for the bound stalling:

StoppingChain(IterationLimit(100), BoundStalling(5, 0.1))
source
SDDP.SimulationStoppingRuleType
SimulationStoppingRule(;
     sampling_scheme::AbstractSamplingScheme = SDDP.InSampleMonteCarlo(),
     replications::Int = -1,
     period::Int = -1,
     distance_tol::Float64 = 1e-2,
     bound_tol::Float64 = 1e-4,
-)

Terminate the algorithm using a mix of heuristics. Unless you know otherwise, this is typically a good default.

Termination criteria

First, we check that the deterministic bound has stabilized. That is, over the last five iterations, the deterministic bound has changed by less than an absolute or relative tolerance of bound_tol.

Then, if we have not done one in the last period iterations, we perform a primal simulation of the policy using replications out-of-sample realizations from sampling_scheme. The realizations are stored and re-used in each simulation. From each simulation, we record the value of the stage objective. We terminate the policy if each of the trajectories in two consecutive simulations differ by less than distance_tol.

By default, replications and period are -1, and SDDP.jl will guess good values for these. Over-ride the default behavior by setting an appropriate value.

Example

SDDP.train(model; stopping_rules = [SimulationStoppingRule()])
source
SDDP.FirstStageStoppingRuleType
FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)

Terminate the algorithm when the outgoing values of the first-stage state variables have not changed by more than atol for iterations number of consecutive iterations.

Example

SDDP.train(model; stopping_rules = [FirstStageStoppingRule()])
source

Sampling schemes

SDDP.AbstractSamplingSchemeType
AbstractSamplingScheme

The abstract type for the sampling-scheme interface.

You need to define the following methods:

source
SDDP.sample_scenarioFunction
sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T}

Sample a scenario from the policy graph graph based on the sampling scheme.

Returns ::Tuple{Vector{Tuple{T, <:Any}}, Bool}, where the first element is the scenario, and the second element is a Boolean flag indicating if the scenario was terminated due to the detection of a cycle.

The scenario is a list of tuples (type Vector{Tuple{T, <:Any}}) where the first component of each tuple is the index of the node, and the second component is the stagewise-independent noise term observed in that node.

source
SDDP.InSampleMonteCarloType
InSampleMonteCarlo(;
+)

Terminate the algorithm using a mix of heuristics. Unless you know otherwise, this is typically a good default.

Termination criteria

First, we check that the deterministic bound has stabilized. That is, over the last five iterations, the deterministic bound has changed by less than an absolute or relative tolerance of bound_tol.

Then, if we have not done one in the last period iterations, we perform a primal simulation of the policy using replications out-of-sample realizations from sampling_scheme. The realizations are stored and re-used in each simulation. From each simulation, we record the value of the stage objective. We terminate the policy if each of the trajectories in two consecutive simulations differ by less than distance_tol.

By default, replications and period are -1, and SDDP.jl will guess good values for these. Over-ride the default behavior by setting an appropriate value.

Example

SDDP.train(model; stopping_rules = [SimulationStoppingRule()])
source
SDDP.FirstStageStoppingRuleType
FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)

Terminate the algorithm when the outgoing values of the first-stage state variables have not changed by more than atol for iterations number of consecutive iterations.

Example

SDDP.train(model; stopping_rules = [FirstStageStoppingRule()])
source

Sampling schemes

SDDP.AbstractSamplingSchemeType
AbstractSamplingScheme

The abstract type for the sampling-scheme interface.

You need to define the following methods:

source
SDDP.sample_scenarioFunction
sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T}

Sample a scenario from the policy graph graph based on the sampling scheme.

Returns ::Tuple{Vector{Tuple{T, <:Any}}, Bool}, where the first element is the scenario, and the second element is a Boolean flag indicating if the scenario was terminated due to the detection of a cycle.

The scenario is a list of tuples (type Vector{Tuple{T, <:Any}}) where the first component of each tuple is the index of the node, and the second component is the stagewise-independent noise term observed in that node.

source
SDDP.InSampleMonteCarloType
InSampleMonteCarlo(;
     max_depth::Int = 0,
     terminate_on_cycle::Function = false,
     terminate_on_dummy_leaf::Function = true,
     rollout_limit::Function = (i::Int) -> typemax(Int),
     initial_node::Any = nothing,
-)

A Monte Carlo sampling scheme using the in-sample data from the policy graph definition.

If terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.

Note that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.

Control which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.

You can use rollout_limit to set iteration specific depth limits. For example:

InSampleMonteCarlo(rollout_limit = i -> 2 * i)
source
SDDP.OutOfSampleMonteCarloType
OutOfSampleMonteCarlo(
+)

A Monte Carlo sampling scheme using the in-sample data from the policy graph definition.

If terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.

Note that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.

Control which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.

You can use rollout_limit to set iteration specific depth limits. For example:

InSampleMonteCarlo(rollout_limit = i -> 2 * i)
source
SDDP.OutOfSampleMonteCarloType
OutOfSampleMonteCarlo(
     f::Function,
     graph::PolicyGraph;
     use_insample_transition::Bool = false,
@@ -294,7 +294,7 @@
     end
 end

Given linear policy graph graph with T stages:

sampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node
     return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]
-end
source
SDDP.HistoricalType
Historical(
+end
source
SDDP.HistoricalType
Historical(
     scenarios::Vector{Vector{Tuple{T,S}}},
     probability::Vector{Float64};
     terminate_on_cycle::Bool = false,
@@ -305,17 +305,17 @@
         [(1, 1.0), (2, 0.0), (3, 0.0)]
     ],
     [0.2, 0.5, 0.3],
-)
source
Historical(
+)
source
Historical(
     scenarios::Vector{Vector{Tuple{T,S}}};
     terminate_on_cycle::Bool = false,
 ) where {T,S}

A deterministic sampling scheme that iterates through the vector of provided scenarios.

Examples

Historical([
     [(1, 0.5), (2, 1.0), (3, 0.5)],
     [(1, 0.5), (2, 0.0), (3, 1.0)],
     [(1, 1.0), (2, 0.0), (3, 0.0)],
-])
source
Historical(
+])
source
Historical(
     scenario::Vector{Tuple{T,S}};
     terminate_on_cycle::Bool = false,
-) where {T,S}

A deterministic sampling scheme that always samples scenario.

Examples

Historical([(1, 0.5), (2, 1.5), (3, 0.75)])
source
SDDP.PSRSamplingSchemeType
PSRSamplingScheme(N::Int; sampling_scheme = InSampleMonteCarlo())

A sampling scheme with N scenarios, similar to how PSR does it.

source
SDDP.SimulatorSamplingSchemeType
SimulatorSamplingScheme(simulator::Function)

Create a sampling scheme based on a univariate scenario generator simulator, which returns a Vector{Float64} when called with no arguments like simulator().

This sampling scheme must be used with a Markovian graph constructed from the same simulator.

The sample space for SDDP.parameterize must be a tuple in which the first element is the Markov state.

This sampling scheme generates a new scenario by calling simulator(), and then picking the sequence of nodes in the Markovian graph that is closest to the new trajectory.

Example

julia> using SDDP
+) where {T,S}

A deterministic sampling scheme that always samples scenario.

Examples

Historical([(1, 0.5), (2, 1.5), (3, 0.75)])
source
SDDP.PSRSamplingSchemeType
PSRSamplingScheme(N::Int; sampling_scheme = InSampleMonteCarlo())

A sampling scheme with N scenarios, similar to how PSR does it.

source
SDDP.SimulatorSamplingSchemeType
SimulatorSamplingScheme(simulator::Function)

Create a sampling scheme based on a univariate scenario generator simulator, which returns a Vector{Float64} when called with no arguments like simulator().

This sampling scheme must be used with a Markovian graph constructed from the same simulator.

The sample space for SDDP.parameterize must be a tuple in which the first element is the Markov state.

This sampling scheme generates a new scenario by calling simulator(), and then picking the sequence of nodes in the Markovian graph that is closest to the new trajectory.

Example

julia> using SDDP
 
 julia> import HiGHS
 
@@ -347,49 +347,49 @@
            iteration_limit = 10,
            sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
        )
-
source

Parallel schemes

SDDP.AbstractParallelSchemeType
AbstractParallelScheme

Abstract type for different parallelism schemes.

source
SDDP.SerialType
Serial()

Run SDDP in serial mode.

source
SDDP.AsynchronousType
Asynchronous(
+
source

Parallel schemes

SDDP.AbstractParallelSchemeType
AbstractParallelScheme

Abstract type for different parallelism schemes.

source
SDDP.SerialType
Serial()

Run SDDP in serial mode.

source
SDDP.AsynchronousType
Asynchronous(
     [init_callback::Function,]
     slave_pids::Vector{Int} = workers();
     use_master::Bool = true,
-)

Run SDDP in asynchronous mode workers with pid's slave_pids.

After initializing the models on each worker, call init_callback(model). Note that init_callback is run locally on the worker and not on the master thread.

If use_master is true, iterations are also conducted on the master process.

source
Asynchronous(
+)

Run SDDP in asynchronous mode workers with pid's slave_pids.

After initializing the models on each worker, call init_callback(model). Note that init_callback is run locally on the worker and not on the master thread.

If use_master is true, iterations are also conducted on the master process.

source
Asynchronous(
     solver::Any,
     slave_pids::Vector{Int} = workers();
     use_master::Bool = true,
-)

Run SDDP in asynchronous mode workers with pid's slave_pids.

Set the optimizer on each worker by calling JuMP.set_optimizer(model, solver).

source

Forward passes

SDDP.AbstractForwardPassType
AbstractForwardPass

Abstract type for different forward passes.

source
SDDP.DefaultForwardPassType
DefaultForwardPass(; include_last_node::Bool = true)

The default forward pass.

If include_last_node = false and the sample terminated due to a cycle, then the last node (which forms the cycle) is omitted. This can be useful option to set when training, but it comes at the cost of not knowing which node formed the cycle (if there are multiple possibilities).

source
SDDP.RevisitingForwardPassType
RevisitingForwardPass(
+)

Run SDDP in asynchronous mode workers with pid's slave_pids.

Set the optimizer on each worker by calling JuMP.set_optimizer(model, solver).

source

Forward passes

SDDP.AbstractForwardPassType
AbstractForwardPass

Abstract type for different forward passes.

source
SDDP.DefaultForwardPassType
DefaultForwardPass(; include_last_node::Bool = true)

The default forward pass.

If include_last_node = false and the sample terminated due to a cycle, then the last node (which forms the cycle) is omitted. This can be useful option to set when training, but it comes at the cost of not knowing which node formed the cycle (if there are multiple possibilities).

source
SDDP.RevisitingForwardPassType
RevisitingForwardPass(
     period::Int = 500;
     sub_pass::AbstractForwardPass = DefaultForwardPass(),
-)

A forward pass scheme that generate period new forward passes (using sub_pass), then revisits all previously explored forward passes. This can be useful to encourage convergence at a diversity of points in the state-space.

Set period = typemax(Int) to disable.

For example, if period = 2, then the forward passes will be revisited as follows: 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ....

source
SDDP.RiskAdjustedForwardPassType
RiskAdjustedForwardPass(;
+)

A forward pass scheme that generate period new forward passes (using sub_pass), then revisits all previously explored forward passes. This can be useful to encourage convergence at a diversity of points in the state-space.

Set period = typemax(Int) to disable.

For example, if period = 2, then the forward passes will be revisited as follows: 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ....

source
SDDP.RiskAdjustedForwardPassType
RiskAdjustedForwardPass(;
     forward_pass::AbstractForwardPass,
     risk_measure::AbstractRiskMeasure,
     resampling_probability::Float64,
     rejection_count::Int = 5,
-)

A forward pass that resamples a previous forward pass with resampling_probability probability, and otherwise samples a new forward pass using forward_pass.

The forward pass to revisit is chosen based on the risk-adjusted (using risk_measure) probability of the cumulative stage objectives.

Note that this objective corresponds to the first time we visited the trajectory. Subsequent visits may have improved things, but we don't have the mechanisms in-place to update it. Therefore, remove the forward pass from resampling consideration after rejection_count revisits.

source
SDDP.AlternativeForwardPassType
AlternativeForwardPass(
+)

A forward pass that resamples a previous forward pass with resampling_probability probability, and otherwise samples a new forward pass using forward_pass.

The forward pass to revisit is chosen based on the risk-adjusted (using risk_measure) probability of the cumulative stage objectives.

Note that this objective corresponds to the first time we visited the trajectory. Subsequent visits may have improved things, but we don't have the mechanisms in-place to update it. Therefore, remove the forward pass from resampling consideration after rejection_count revisits.

source
SDDP.AlternativeForwardPassType
AlternativeForwardPass(
     forward_model::SDDP.PolicyGraph{T};
     forward_pass::AbstractForwardPass = DefaultForwardPass(),
-)

A forward pass that simulates using forward_model, which may be different to the model used in the backwards pass.

When using this forward pass, you should almost always pass SDDP.AlternativePostIterationCallback to the post_iteration_callback argument of SDDP.train.

This forward pass is most useful when the forward_model is non-convex and we use a convex approximation of the model in the backward pass.

For example, in optimal power flow models, we can use an AC-OPF formulation as the forward_model and a DC-OPF formulation as the backward model.

For more details see the paper:

Rosemberg, A., and Street, A., and Garcia, J.D., and Valladão, D.M., and Silva, T., and Dowson, O. (2021). Assessing the cost of network simplifications in long-term hydrothermal dispatch planning models. IEEE Transactions on Sustainable Energy. 13(1), 196-206.

source
SDDP.AlternativePostIterationCallbackType
AlternativePostIterationCallback(forward_model::PolicyGraph)

A post-iteration callback that should be used whenever SDDP.AlternativeForwardPass is used.

source
SDDP.RegularizedForwardPassType
RegularizedForwardPass(;
+)

A forward pass that simulates using forward_model, which may be different to the model used in the backwards pass.

When using this forward pass, you should almost always pass SDDP.AlternativePostIterationCallback to the post_iteration_callback argument of SDDP.train.

This forward pass is most useful when the forward_model is non-convex and we use a convex approximation of the model in the backward pass.

For example, in optimal power flow models, we can use an AC-OPF formulation as the forward_model and a DC-OPF formulation as the backward model.

For more details see the paper:

Rosemberg, A., and Street, A., and Garcia, J.D., and Valladão, D.M., and Silva, T., and Dowson, O. (2021). Assessing the cost of network simplifications in long-term hydrothermal dispatch planning models. IEEE Transactions on Sustainable Energy. 13(1), 196-206.

source
SDDP.AlternativePostIterationCallbackType
AlternativePostIterationCallback(forward_model::PolicyGraph)

A post-iteration callback that should be used whenever SDDP.AlternativeForwardPass is used.

source
SDDP.RegularizedForwardPassType
RegularizedForwardPass(;
     rho::Float64 = 0.05,
     forward_pass::AbstractForwardPass = DefaultForwardPass(),
-)

A forward pass that regularizes the outgoing first-stage state variables with an L-infty trust-region constraint about the previous iteration's solution. Specifically, the bounds of the outgoing state variable x are updated from (l, u) to max(l, x^k - rho * (u - l)) <= x <= min(u, x^k + rho * (u - l)), where x^k is the optimal solution of x in the previous iteration. On the first iteration, the value of the state at the root node is used.

By default, rho is set to 5%, which seems to work well empirically.

Pass a different forward_pass to control the forward pass within the regularized forward pass.

This forward pass is largely intended to be used for investment problems in which the first stage makes a series of capacity decisions that then influence the rest of the graph. An error is thrown if the first stage problem is not deterministic, and states are silently skipped if they do not have finite bounds.

source

Risk Measures

SDDP.AbstractRiskMeasureType
AbstractRiskMeasure

The abstract type for the risk measure interface.

You need to define the following methods:

source
SDDP.adjust_probabilityFunction
adjust_probability(
+)

A forward pass that regularizes the outgoing first-stage state variables with an L-infty trust-region constraint about the previous iteration's solution. Specifically, the bounds of the outgoing state variable x are updated from (l, u) to max(l, x^k - rho * (u - l)) <= x <= min(u, x^k + rho * (u - l)), where x^k is the optimal solution of x in the previous iteration. On the first iteration, the value of the state at the root node is used.

By default, rho is set to 5%, which seems to work well empirically.

Pass a different forward_pass to control the forward pass within the regularized forward pass.

This forward pass is largely intended to be used for investment problems in which the first stage makes a series of capacity decisions that then influence the rest of the graph. An error is thrown if the first stage problem is not deterministic, and states are silently skipped if they do not have finite bounds.

source

Risk Measures

SDDP.AbstractRiskMeasureType
AbstractRiskMeasure

The abstract type for the risk measure interface.

You need to define the following methods:

source
SDDP.adjust_probabilityFunction
adjust_probability(
     measure::Expectation
     risk_adjusted_probability::Vector{Float64},
     original_probability::Vector{Float64},
     noise_support::Vector{Noise{T}},
     objective_realizations::Vector{Float64},
     is_minimization::Bool,
-) where {T}
source

Duality handlers

SDDP.AbstractDualityHandlerType
AbstractDualityHandler

The abstract type for the duality handler interface.

source
SDDP.ContinuousConicDualityType
ContinuousConicDuality()

Compute dual variables in the backward pass using conic duality, relaxing any binary or integer restrictions as necessary.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
+) where {T}
source

Duality handlers

SDDP.AbstractDualityHandlerType
AbstractDualityHandler

The abstract type for the duality handler interface.

source
SDDP.ContinuousConicDualityType
ContinuousConicDuality()

Compute dual variables in the backward pass using conic duality, relaxing any binary or integer restrictions as necessary.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w) ∩ S
     x̄ - x == 0          [λ]

where S ⊆ ℝ×ℤ, we relax integrality and using conic duality to solve for λ in the problem:

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w)
-    x̄ - x == 0          [λ]
source
SDDP.LagrangianDualityType
LagrangianDuality(;
+    x̄ - x == 0          [λ]
source
SDDP.LagrangianDualityType
LagrangianDuality(;
     method::LocalImprovementSearch.AbstractSearchMethod =
         LocalImprovementSearch.BFGS(100),
 )

Obtain dual variables in the backward pass using Lagrangian duality.

Arguments

  • method: the LocalImprovementSearch method for maximizing the Lagrangian dual problem.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w) ∩ S
     x̄ - x == 0          [λ]

where S ⊆ ℝ×ℤ, we solve the problem max L(λ), where:

L(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' h(x̄)
-        st (x̄, x′, u) in Xᵢ(w) ∩ S

and where h(x̄) = x̄ - x.

source
SDDP.StrengthenedConicDualityType
StrengthenedConicDuality()

Obtain dual variables in the backward pass using strengthened conic duality.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
+        st (x̄, x′, u) in Xᵢ(w) ∩ S

and where h(x̄) = x̄ - x.

source
SDDP.StrengthenedConicDualityType
StrengthenedConicDuality()

Obtain dual variables in the backward pass using strengthened conic duality.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w) ∩ S
     x̄ - x == 0          [λ]

we first obtain an estimate for λ using ContinuousConicDuality.

Then, we evaluate the Lagrangian function:

L(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' (x̄ - x`)
-        st (x̄, x′, u) in Xᵢ(w) ∩ S

to obtain a better estimate of the intercept.

source
SDDP.BanditDualityType
BanditDuality()

Formulates the problem of choosing a duality handler as a multi-armed bandit problem. The arms to choose between are:

Our problem isn't a typical multi-armed bandit for a two reasons:

  1. The reward distribution is non-stationary (each arm converges to 0 as it keeps getting pulled.
  2. The distribution of rewards is dependent on the history of the arms that were chosen.

We choose a very simple heuristic: pick the arm with the best mean + 1 standard deviation. That should ensure we consistently pick the arm with the best likelihood of improving the value function.

In future, we should consider discounting the rewards of earlier iterations, and focus more on the more-recent rewards.

source

Simulating the policy

SDDP.simulateFunction
simulate(
+        st (x̄, x′, u) in Xᵢ(w) ∩ S

to obtain a better estimate of the intercept.

source
SDDP.BanditDualityType
BanditDuality()

Formulates the problem of choosing a duality handler as a multi-armed bandit problem. The arms to choose between are:

Our problem isn't a typical multi-armed bandit for a two reasons:

  1. The reward distribution is non-stationary (each arm converges to 0 as it keeps getting pulled.
  2. The distribution of rewards is dependent on the history of the arms that were chosen.

We choose a very simple heuristic: pick the arm with the best mean + 1 standard deviation. That should ensure we consistently pick the arm with the best likelihood of improving the value function.

In future, we should consider discounting the rewards of earlier iterations, and focus more on the more-recent rewards.

source

Simulating the policy

SDDP.simulateFunction
simulate(
     model::PolicyGraph,
     number_replications::Int = 1,
     variables::Vector{Symbol} = Symbol[];
@@ -404,65 +404,65 @@
     custom_recorders = Dict{Symbol, Function}(
         :constraint_dual => sp -> JuMP.dual(sp[:my_constraint])
     )
-)

The value of the dual in the first stage of the second replication can be accessed as:

simulation_results[2][1][:constraint_dual]
source
SDDP.calculate_boundFunction
SDDP.calculate_bound(
+)

The value of the dual in the first stage of the second replication can be accessed as:

simulation_results[2][1][:constraint_dual]
source
SDDP.calculate_boundFunction
SDDP.calculate_bound(
     model::PolicyGraph,
     state::Dict{Symbol,Float64} = model.initial_root_state;
     risk_measure::AbstractRiskMeasure = Expectation(),
-)

Calculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is risk_measure.

source
SDDP.add_all_cutsFunction
add_all_cuts(model::PolicyGraph)

Add all cuts that may have been deleted back into the model.

Explanation

During the solve, SDDP.jl may decide to remove cuts for a variety of reasons.

These can include cuts that define the optimal value function, particularly around the extremes of the state-space (e.g., reservoirs empty).

This function ensures that all cuts discovered are added back into the model.

You should call this after train and before simulate.

source

Decision rules

SDDP.DecisionRuleType
DecisionRule(model::PolicyGraph{T}; node::T)

Create a decision rule for node node in model.

Example

rule = SDDP.DecisionRule(model; node = 1)
source
SDDP.evaluateFunction
evaluate(
+)

Calculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is risk_measure.

source
SDDP.add_all_cutsFunction
add_all_cuts(model::PolicyGraph)

Add all cuts that may have been deleted back into the model.

Explanation

During the solve, SDDP.jl may decide to remove cuts for a variety of reasons.

These can include cuts that define the optimal value function, particularly around the extremes of the state-space (e.g., reservoirs empty).

This function ensures that all cuts discovered are added back into the model.

You should call this after train and before simulate.

source

Decision rules

SDDP.DecisionRuleType
DecisionRule(model::PolicyGraph{T}; node::T)

Create a decision rule for node node in model.

Example

rule = SDDP.DecisionRule(model; node = 1)
source
SDDP.evaluateFunction
evaluate(
     rule::DecisionRule;
     incoming_state::Dict{Symbol,Float64},
     noise = nothing,
     controls_to_record = Symbol[],
-)

Evalute the decision rule rule at the point described by the incoming_state and noise.

If the node is deterministic, omit the noise argument.

Pass a list of symbols to controls_to_record to save the optimal primal solution corresponding to the names registered in the model.

source
evaluate(
+)

Evalute the decision rule rule at the point described by the incoming_state and noise.

If the node is deterministic, omit the noise argument.

Pass a list of symbols to controls_to_record to save the optimal primal solution corresponding to the names registered in the model.

source
evaluate(
     V::ValueFunction,
     point::Dict{Union{Symbol,String},<:Real}
     objective_state = nothing,
     belief_state = nothing
-)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
evalute(V::ValueFunction{Nothing, Nothing}; kwargs...)

Evalute the value function V at the point in the state-space specified by kwargs.

Examples

evaluate(V; volume = 1)
source
evaluate(
+)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
evalute(V::ValueFunction{Nothing, Nothing}; kwargs...)

Evalute the value function V at the point in the state-space specified by kwargs.

Examples

evaluate(V; volume = 1)
source
evaluate(
     model::PolicyGraph{T},
     validation_scenarios::ValidationScenarios{T,S},
 ) where {T,S}

Evaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
 train(model; iteration_limit = 100)
-simulations = evaluate(model, validation_scenarios)
source

Visualizing the policy

SDDP.SpaghettiPlotType
SDDP.SpaghettiPlot(; stages, scenarios)

Initialize a new SpaghettiPlot with stages stages and scenarios number of replications.

source
SDDP.add_spaghettiFunction
SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)

Description

Add a new figure to the SpaghettiPlot plt, where the y-value of the scenarioth line when x = stage is given by data_function(plt.simulations[scenario][stage]).

Keyword arguments

  • xlabel: set the xaxis label
  • ylabel: set the yaxis label
  • title: set the title of the plot
  • ymin: set the minimum y value
  • ymax: set the maximum y value
  • cumulative: plot the additive accumulation of the value across the stages
  • interpolate: interpolation method for lines between stages.

Defaults to "linear" see the d3 docs for all options.

Examples

simulations = simulate(model, 10)
+simulations = evaluate(model, validation_scenarios)
source

Visualizing the policy

SDDP.SpaghettiPlotType
SDDP.SpaghettiPlot(; stages, scenarios)

Initialize a new SpaghettiPlot with stages stages and scenarios number of replications.

source
SDDP.add_spaghettiFunction
SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)

Description

Add a new figure to the SpaghettiPlot plt, where the y-value of the scenarioth line when x = stage is given by data_function(plt.simulations[scenario][stage]).

Keyword arguments

  • xlabel: set the xaxis label
  • ylabel: set the yaxis label
  • title: set the title of the plot
  • ymin: set the minimum y value
  • ymax: set the maximum y value
  • cumulative: plot the additive accumulation of the value across the stages
  • interpolate: interpolation method for lines between stages.

Defaults to "linear" see the d3 docs for all options.

Examples

simulations = simulate(model, 10)
 plt = SDDP.spaghetti_plot(simulations)
 SDDP.add_spaghetti(plt; title = "Stage objective") do data
     return data[:stage_objective]
-end
source
SDDP.publication_plotFunction
SDDP.publication_plot(
+end
source
SDDP.publication_plotFunction
SDDP.publication_plot(
     data_function, simulations;
     quantile = [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0],
     kwargs...)

Create a Plots.jl recipe plot of the simulations.

See Plots.jl for the list of keyword arguments.

Examples

SDDP.publication_plot(simulations; title = "My title") do data
     return data[:stage_objective]
-end
source
SDDP.ValueFunctionType
ValueFunction

A representation of the value function. SDDP.jl uses the following unique representation of the value function that is undocumented in the literature.

It supports three types of state variables:

  1. x - convex "resource" states
  2. b - concave "belief" states
  3. y - concave "objective" states

In addition, we have three types of cuts:

  1. Single-cuts (also called "average" cuts in the literature), which involve the risk-adjusted expectation of the cost-to-go.
  2. Multi-cuts, which use a different cost-to-go term for each realization w.
  3. Risk-cuts, which correspond to the facets of the dual interpretation of a coherent risk measure.

Therefore, ValueFunction returns a JuMP model of the following form:

V(x, b, y) = min: μᵀb + νᵀy + θ
+end
source
SDDP.ValueFunctionType
ValueFunction

A representation of the value function. SDDP.jl uses the following unique representation of the value function that is undocumented in the literature.

It supports three types of state variables:

  1. x - convex "resource" states
  2. b - concave "belief" states
  3. y - concave "objective" states

In addition, we have three types of cuts:

  1. Single-cuts (also called "average" cuts in the literature), which involve the risk-adjusted expectation of the cost-to-go.
  2. Multi-cuts, which use a different cost-to-go term for each realization w.
  3. Risk-cuts, which correspond to the facets of the dual interpretation of a coherent risk measure.

Therefore, ValueFunction returns a JuMP model of the following form:

V(x, b, y) = min: μᵀb + νᵀy + θ
              s.t. # "Single" / "Average" cuts
                   μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J
                   # "Multi" cuts
                   μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K
                   # "Risk-set" cuts
-                  θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K
source
SDDP.evaluateMethod
evaluate(
+                  θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K
source
SDDP.evaluateMethod
evaluate(
     V::ValueFunction,
     point::Dict{Union{Symbol,String},<:Real}
     objective_state = nothing,
     belief_state = nothing
-)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
SDDP.plotFunction
plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)

The SpaghettiPlot plot plt to filename. If filename is not given, it will be saved to a temporary directory. If open = true, then a browser window will be opened to display the resulting HTML file.

source

Debugging the model

SDDP.write_subproblem_to_fileFunction
write_subproblem_to_file(
+)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
SDDP.plotFunction
plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)

The SpaghettiPlot plot plt to filename. If filename is not given, it will be saved to a temporary directory. If open = true, then a browser window will be opened to display the resulting HTML file.

source

Debugging the model

SDDP.write_subproblem_to_fileFunction
write_subproblem_to_file(
     node::Node,
     filename::String;
     throw_error::Bool = false,
-)

Write the subproblem contained in node to the file filename.

The throw_error is an argument used internally by SDDP.jl. If set, an error will be thrown.

Example

SDDP.write_subproblem_to_file(model[1], "subproblem_1.lp")
source
SDDP.deterministic_equivalentFunction
deterministic_equivalent(
+)

Write the subproblem contained in node to the file filename.

The throw_error is an argument used internally by SDDP.jl. If set, an error will be thrown.

Example

SDDP.write_subproblem_to_file(model[1], "subproblem_1.lp")
source
SDDP.deterministic_equivalentFunction
deterministic_equivalent(
     pg::PolicyGraph{T},
     optimizer = nothing;
     time_limit::Union{Real,Nothing} = 60.0,
-)

Form a JuMP model that represents the deterministic equivalent of the problem.

Examples

deterministic_equivalent(model)
deterministic_equivalent(model, HiGHS.Optimizer)
source

StochOptFormat

SDDP.write_to_fileFunction
write_to_file(
+)

Form a JuMP model that represents the deterministic equivalent of the problem.

Examples

deterministic_equivalent(model)
deterministic_equivalent(model, HiGHS.Optimizer)
source

StochOptFormat

SDDP.write_to_fileFunction
write_to_file(
     model::PolicyGraph,
     filename::String;
     compression::MOI.FileFormats.AbstractCompressionScheme =
         MOI.FileFormats.AutomaticCompression(),
     kwargs...
-)

Write model to filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.write(::IO, ::PolicyGraph) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.write(::IO, ::PolicyGraph).

Examples

write_to_file(model, "my_model.sof.json"; validation_scenarios = 10)
source
SDDP.read_from_fileFunction
read_from_file(
+)

Write model to filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.write(::IO, ::PolicyGraph) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.write(::IO, ::PolicyGraph).

Examples

write_to_file(model, "my_model.sof.json"; validation_scenarios = 10)
source
SDDP.read_from_fileFunction
read_from_file(
     filename::String;
     compression::MOI.FileFormats.AbstractCompressionScheme =
         MOI.FileFormats.AutomaticCompression(),
     kwargs...
-)::Tuple{PolicyGraph, ValidationScenarios}

Return a tuple containing a PolicyGraph object and a ValidationScenarios read from filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.read(::IO, ::Type{PolicyGraph}) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.read(::IO, ::Type{PolicyGraph}).

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
source
Base.writeMethod
Base.write(
+)::Tuple{PolicyGraph, ValidationScenarios}

Return a tuple containing a PolicyGraph object and a ValidationScenarios read from filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.read(::IO, ::Type{PolicyGraph}) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.read(::IO, ::Type{PolicyGraph}).

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
source
Base.writeMethod
Base.write(
     io::IO,
     model::PolicyGraph;
     validation_scenarios::Union{Nothing,Int,ValidationScenarios} = nothing,
@@ -478,15 +478,15 @@
         date = "2020-07-20",
         description = "Example problem for the SDDP.jl documentation",
     )
-end
source
Base.readMethod
Base.read(
+end
source
Base.readMethod
Base.read(
     io::IO,
     ::Type{PolicyGraph};
     bound::Float64 = 1e6,
 )::Tuple{PolicyGraph,ValidationScenarios}

Return a tuple containing a PolicyGraph object and a ValidationScenarios read from io in the StochOptFormat file format.

See also: evaluate.

Compatibility

Warning

This function is experimental. Things may change between commits. You should not rely on this functionality as a long-term file format (yet).

In addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:

  • Additive random variables in the constraints or in the objective
  • Multiplicative random variables in the objective

If your model uses something other than this, this function may throw an error or silently build a non-convex model.

Examples

open("my_model.sof.json", "r") do io
     model, validation_scenarios = read(io, PolicyGraph)
-end
source
SDDP.evaluateMethod
evaluate(
+end
source
SDDP.evaluateMethod
evaluate(
     model::PolicyGraph{T},
     validation_scenarios::ValidationScenarios{T,S},
 ) where {T,S}

Evaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
 train(model; iteration_limit = 100)
-simulations = evaluate(model, validation_scenarios)
source
SDDP.ValidationScenariosType
ValidationScenario{T,S}(scenarios::Vector{ValidationScenario{T,S}})

An AbstractSamplingScheme based on a vector of scenarios.

Each scenario is a vector of Tuple{T, S} where the first element is the node to visit and the second element is the realization of the stagewise-independent noise term. Pass nothing if the node is deterministic.

source
SDDP.ValidationScenarioType
ValidationScenario{T,S}(scenario::Vector{Tuple{T,S}})

A single scenario for testing.

See also: ValidationScenarios.

source
+simulations = evaluate(model, validation_scenarios)source
SDDP.ValidationScenariosType
ValidationScenario{T,S}(scenarios::Vector{ValidationScenario{T,S}})

An AbstractSamplingScheme based on a vector of scenarios.

Each scenario is a vector of Tuple{T, S} where the first element is the node to visit and the second element is the realization of the stagewise-independent noise term. Pass nothing if the node is deterministic.

source
SDDP.ValidationScenarioType
ValidationScenario{T,S}(scenario::Vector{Tuple{T,S}})

A single scenario for testing.

See also: ValidationScenarios.

source
diff --git a/dev/changelog/index.html b/dev/changelog/index.html index 5781cd247..b747c1750 100644 --- a/dev/changelog/index.html +++ b/dev/changelog/index.html @@ -1,2 +1,2 @@ -Release notes · SDDP.jl

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

  • Updated Plotting tools to use live plots (#563)
  • Added vale as a linter (#565)
  • Improved documentation for initializing a parallel scheme (#566)

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

  • Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)
  • Update to JuMP v0.23 (#514)
  • Added auto-regressive tutorial (#507)

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

  • A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#499) (#453)

Other

  • Documentation improvements (#447) (#448) (#450)

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

  • Fixed scoping bug in SDDP.@stageobjective (#407)
  • Fixed a bug when the initial point is infeasible (#411)
  • Set subproblems to silent by default (#409)

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

  • Documentation improvements (#367) (#369) (#370)

v0.3.7 (January 8, 2021)

Other

  • Documentation improvements (#362) (#363) (#365) (#366)
  • Bump copyright (#364)

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

  • Documentation improvements (#327) (#333) (#339) (#340)

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

+Release notes · SDDP.jl

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

  • Updated Plotting tools to use live plots (#563)
  • Added vale as a linter (#565)
  • Improved documentation for initializing a parallel scheme (#566)

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

  • Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)
  • Update to JuMP v0.23 (#514)
  • Added auto-regressive tutorial (#507)

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

  • A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#499) (#453)

Other

  • Documentation improvements (#447) (#448) (#450)

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

  • Fixed scoping bug in SDDP.@stageobjective (#407)
  • Fixed a bug when the initial point is infeasible (#411)
  • Set subproblems to silent by default (#409)

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

  • Documentation improvements (#367) (#369) (#370)

v0.3.7 (January 8, 2021)

Other

  • Documentation improvements (#362) (#363) (#365) (#366)
  • Bump copyright (#364)

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

  • Documentation improvements (#327) (#333) (#339) (#340)

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

diff --git a/dev/examples/FAST_hydro_thermal/index.html b/dev/examples/FAST_hydro_thermal/index.html index 5d605fe3f..567dc3a17 100644 --- a/dev/examples/FAST_hydro_thermal/index.html +++ b/dev/examples/FAST_hydro_thermal/index.html @@ -60,13 +60,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 1.000000e+01 2.658129e-03 5 1 - 20 0.000000e+00 1.000000e+01 7.234311e-02 104 1 + 1 0.000000e+00 1.000000e+01 2.280951e-03 5 1 + 20 0.000000e+00 1.000000e+01 1.544189e-02 104 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.234311e-02 +total time (s) : 1.544189e-02 total solves : 104 best bound : 1.000000e+01 simulation ci : 1.100000e+01 ± 4.474009e+00 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/FAST_production_management/index.html b/dev/examples/FAST_production_management/index.html index 940f4e790..599437916 100644 --- a/dev/examples/FAST_production_management/index.html +++ b/dev/examples/FAST_production_management/index.html @@ -31,4 +31,4 @@ end fast_production_management(cut_type = SDDP.SINGLE_CUT) -fast_production_management(cut_type = SDDP.MULTI_CUT)
Test Passed

This page was generated using Literate.jl.

+fast_production_management(cut_type = SDDP.MULTI_CUT)
Test Passed

This page was generated using Literate.jl.

diff --git a/dev/examples/FAST_quickstart/index.html b/dev/examples/FAST_quickstart/index.html index 125e2f194..2b4c98cd0 100644 --- a/dev/examples/FAST_quickstart/index.html +++ b/dev/examples/FAST_quickstart/index.html @@ -29,4 +29,4 @@ @test SDDP.calculate_bound(model) == -2 end -fast_quickstart()
Test Passed

This page was generated using Literate.jl.

+fast_quickstart()
Test Passed

This page was generated using Literate.jl.

diff --git a/dev/examples/Hydro_thermal/index.html b/dev/examples/Hydro_thermal/index.html index 33c35cd96..885c2697b 100644 --- a/dev/examples/Hydro_thermal/index.html +++ b/dev/examples/Hydro_thermal/index.html @@ -55,19 +55,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.610000e+02 1.053980e+02 2.112069e-01 447 1 - 2 2.247753e+01 1.105784e+02 1.480200e+00 7125 1 - 19 5.001305e+02 2.310660e+02 2.495041e+00 11664 1 - 37 2.046146e+02 2.353603e+02 3.500531e+00 15486 1 - 54 5.678596e+02 2.361410e+02 4.571336e+00 19509 1 - 67 1.350000e+01 2.363330e+02 5.572975e+00 22920 1 - 72 9.559445e+01 2.363530e+02 7.193695e+00 30522 1 - 89 3.000833e+01 2.364000e+02 8.201371e+00 33753 1 - 92 1.190877e+02 2.364041e+02 9.656465e+00 40749 1 - 100 3.549607e+01 2.364137e+02 1.007170e+01 41973 1 + 1 4.610000e+02 1.053980e+02 1.672618e-01 447 1 + 7 4.144003e+02 2.211882e+02 1.196267e+00 9384 1 + 38 2.386505e+02 2.354263e+02 2.213321e+00 15705 1 + 59 7.875685e+02 2.362676e+02 3.234388e+00 21156 1 + 72 9.559445e+01 2.363530e+02 4.553599e+00 30522 1 + 92 1.190877e+02 2.364041e+02 6.017122e+00 40749 1 + 100 3.549607e+01 2.364137e+02 6.274942e+00 41973 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.007170e+01 +total time (s) : 6.274942e+00 total solves : 41973 best bound : 2.364137e+02 simulation ci : 2.152381e+02 ± 4.478506e+01 @@ -75,4 +72,4 @@ -------------------------------------------------------------------

Simulating the policy

After training, we can simulate the policy using SDDP.simulate.

sims = SDDP.simulate(model, 100, [:g_t])
 mu = round(mean([s[1][:g_t] for s in sims]), digits = 2)
 println("On average, $(mu) units of thermal are used in the first stage.")
On average, 1.6 units of thermal are used in the first stage.

Extracting the water values

Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.

V = SDDP.ValueFunction(model[1])
-cost, price = SDDP.evaluate(V, x = 10)
(233.51884639362532, Dict(:x => -0.6430453552131385))

This page was generated using Literate.jl.

+cost, price = SDDP.evaluate(V, x = 10)
(233.51884639362532, Dict(:x => -0.6430453552131385))

This page was generated using Literate.jl.

diff --git a/dev/examples/SDDP.log b/dev/examples/SDDP.log index 0a9f38ca4..a18ea6fd1 100644 --- a/dev/examples/SDDP.log +++ b/dev/examples/SDDP.log @@ -25,11 +25,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 1.000000e+01 2.658129e-03 5 1 - 20 0.000000e+00 1.000000e+01 7.234311e-02 104 1 + 1 0.000000e+00 1.000000e+01 2.280951e-03 5 1 + 20 0.000000e+00 1.000000e+01 1.544189e-02 104 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.234311e-02 +total time (s) : 1.544189e-02 total solves : 104 best bound : 1.000000e+01 simulation ci : 1.100000e+01 ± 4.474009e+00 @@ -61,17 +61,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 -4.260000e+01 -2.396000e+01 1.107407e-02 52 1 - 10 -2.396000e+01 -2.396000e+01 1.697993e-02 92 1 - 15 -5.320000e+00 -2.396000e+01 2.333307e-02 132 1 - 20 -4.260000e+01 -2.396000e+01 3.006291e-02 172 1 - 25 -2.396000e+01 -2.396000e+01 3.943610e-02 224 1 - 30 -4.260000e+01 -2.396000e+01 4.719210e-02 264 1 - 35 -2.396000e+01 -2.396000e+01 5.684710e-02 304 1 - 40 -2.396000e+01 -2.396000e+01 6.614709e-02 344 1 + 5 -4.260000e+01 -2.396000e+01 7.133007e-03 52 1 + 10 -2.396000e+01 -2.396000e+01 1.086497e-02 92 1 + 15 -5.320000e+00 -2.396000e+01 1.478791e-02 132 1 + 20 -4.260000e+01 -2.396000e+01 1.906395e-02 172 1 + 25 -2.396000e+01 -2.396000e+01 2.456999e-02 224 1 + 30 -4.260000e+01 -2.396000e+01 2.970195e-02 264 1 + 35 -2.396000e+01 -2.396000e+01 3.512287e-02 304 1 + 40 -2.396000e+01 -2.396000e+01 4.095292e-02 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.614709e-02 +total time (s) : 4.095292e-02 total solves : 344 best bound : -2.396000e+01 simulation ci : -2.264814e+01 ± 3.665804e+00 @@ -81,21 +81,21 @@ numeric issues : 0 ──────────────────────────────────────────────────────────────────────────────── Time Allocations ─────────────────────── ──────────────────────── - Tot / % measured: 75.7ms / 80.3% 7.69MiB / 85.9% + Tot / % measured: 49.3ms / 75.2% 7.69MiB / 85.9% Section ncalls time %tot avg alloc %tot avg ──────────────────────────────────────────────────────────────────────────────── - backward_pass 40 37.4ms 61.6% 936μs 5.60MiB 84.7% 143KiB - solve_subproblem 160 20.6ms 34.0% 129μs 768KiB 11.4% 4.80KiB - get_dual_solution 160 1.05ms 1.7% 6.58μs 160KiB 2.4% 1.00KiB - prepare_backward... 80 88.4μs 0.1% 1.11μs 12.5KiB 0.2% 160B - forward_pass 40 15.1ms 24.8% 378μs 785KiB 11.6% 19.6KiB - solve_subproblem 120 13.6ms 22.4% 114μs 579KiB 8.6% 4.83KiB - get_dual_solution 120 243μs 0.4% 2.03μs 65.6KiB 1.0% 560B - sample_scenario 40 267μs 0.4% 6.68μs 36.9KiB 0.5% 944B - calculate_bound 40 8.20ms 13.5% 205μs 227KiB 3.4% 5.67KiB - get_dual_solution 40 79.8μs 0.1% 2.00μs 21.9KiB 0.3% 560B - get_dual_solution 36 58.5μs 0.1% 1.62μs 19.7KiB 0.3% 560B + backward_pass 40 23.8ms 64.2% 596μs 5.60MiB 84.7% 143KiB + solve_subproblem 160 11.8ms 31.7% 73.6μs 768KiB 11.4% 4.80KiB + get_dual_solution 160 568μs 1.5% 3.55μs 160KiB 2.4% 1.00KiB + prepare_backward... 80 57.6μs 0.2% 720ns 12.5KiB 0.2% 160B + forward_pass 40 7.90ms 21.3% 197μs 785KiB 11.6% 19.6KiB + solve_subproblem 120 7.05ms 19.0% 58.8μs 579KiB 8.6% 4.83KiB + get_dual_solution 120 102μs 0.3% 849ns 65.6KiB 1.0% 560B + sample_scenario 40 180μs 0.5% 4.49μs 36.9KiB 0.5% 944B + calculate_bound 40 5.37ms 14.5% 134μs 227KiB 3.4% 5.67KiB + get_dual_solution 40 43.1μs 0.1% 1.08μs 21.9KiB 0.3% 560B + get_dual_solution 36 26.5μs 0.1% 736ns 19.7KiB 0.3% 560B ──────────────────────────────────────────────────────────────────────────────── ------------------------------------------------------------------- @@ -123,17 +123,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 -2.396000e+01 -2.396000e+01 9.634972e-03 52 1 - 10 -2.396000e+01 -2.396000e+01 1.653099e-02 92 1 - 15 -4.260000e+01 -2.396000e+01 2.387404e-02 132 1 - 20 -4.260000e+01 -2.396000e+01 3.270102e-02 172 1 - 25 -5.320000e+00 -2.396000e+01 4.389501e-02 224 1 - 30 -5.320000e+00 -2.396000e+01 5.470300e-02 264 1 - 35 -2.396000e+01 -2.396000e+01 1.128469e-01 304 1 - 40 -5.320000e+00 -2.396000e+01 1.258509e-01 344 1 + 5 -2.396000e+01 -2.396000e+01 6.838799e-03 52 1 + 10 -2.396000e+01 -2.396000e+01 1.101780e-02 92 1 + 15 -4.260000e+01 -2.396000e+01 1.582098e-02 132 1 + 20 -4.260000e+01 -2.396000e+01 2.139497e-02 172 1 + 25 -5.320000e+00 -2.396000e+01 2.855301e-02 224 1 + 30 -5.320000e+00 -2.396000e+01 3.566790e-02 264 1 + 35 -2.396000e+01 -2.396000e+01 4.368091e-02 304 1 + 40 -5.320000e+00 -2.396000e+01 5.257487e-02 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.258509e-01 +total time (s) : 5.257487e-02 total solves : 344 best bound : -2.396000e+01 simulation ci : -1.957570e+01 ± 4.663827e+00 @@ -143,21 +143,21 @@ numeric issues : 0 ──────────────────────────────────────────────────────────────────────────────── Time Allocations ─────────────────────── ──────────────────────── - Tot / % measured: 132ms / 91.3% 13.4MiB / 92.4% + Tot / % measured: 57.0ms / 85.8% 13.4MiB / 92.4% Section ncalls time %tot avg alloc %tot avg ──────────────────────────────────────────────────────────────────────────────── - backward_pass 40 97.8ms 81.2% 2.45ms 11.3MiB 91.8% 290KiB - solve_subproblem 160 20.5ms 17.0% 128μs 770KiB 6.1% 4.81KiB - get_dual_solution 160 1.09ms 0.9% 6.82μs 160KiB 1.3% 1.00KiB - prepare_backward... 80 106μs 0.1% 1.32μs 12.5KiB 0.1% 160B - forward_pass 40 13.7ms 11.3% 342μs 785KiB 6.2% 19.6KiB - solve_subproblem 120 12.2ms 10.1% 102μs 579KiB 4.6% 4.83KiB - get_dual_solution 120 219μs 0.2% 1.83μs 65.6KiB 0.5% 560B - sample_scenario 40 276μs 0.2% 6.89μs 36.8KiB 0.3% 941B - calculate_bound 40 8.85ms 7.4% 221μs 229KiB 1.8% 5.72KiB - get_dual_solution 40 80.0μs 0.1% 2.00μs 21.9KiB 0.2% 560B - get_dual_solution 36 67.3μs 0.1% 1.87μs 19.7KiB 0.2% 560B + backward_pass 40 35.0ms 71.7% 876μs 11.4MiB 91.8% 291KiB + solve_subproblem 160 12.3ms 25.2% 76.9μs 770KiB 6.1% 4.81KiB + get_dual_solution 160 575μs 1.2% 3.60μs 160KiB 1.3% 1.00KiB + prepare_backward... 80 61.7μs 0.1% 771ns 12.5KiB 0.1% 160B + forward_pass 40 7.70ms 15.8% 192μs 785KiB 6.2% 19.6KiB + solve_subproblem 120 6.83ms 14.0% 56.9μs 579KiB 4.6% 4.83KiB + get_dual_solution 120 111μs 0.2% 928ns 65.6KiB 0.5% 560B + sample_scenario 40 181μs 0.4% 4.53μs 36.8KiB 0.3% 941B + calculate_bound 40 6.09ms 12.5% 152μs 229KiB 1.8% 5.72KiB + get_dual_solution 40 49.1μs 0.1% 1.23μs 21.9KiB 0.2% 560B + get_dual_solution 36 24.6μs 0.1% 683ns 19.7KiB 0.2% 560B ──────────────────────────────────────────────────────────────────────────────── ------------------------------------------------------------------- @@ -185,11 +185,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 -2.500000e+00 2.443075e-03 5 1 - 40 -2.000000e+00 -2.000000e+00 3.936005e-02 208 1 + 1 0.000000e+00 -2.500000e+00 1.903057e-03 5 1 + 40 -2.000000e+00 -2.000000e+00 2.530003e-02 208 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.936005e-02 +total time (s) : 2.530003e-02 total solves : 208 best bound : -2.000000e+00 simulation ci : -1.912500e+00 ± 1.209829e-01 @@ -221,19 +221,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.610000e+02 1.053980e+02 2.112069e-01 447 1 - 2 2.247753e+01 1.105784e+02 1.480200e+00 7125 1 - 19 5.001305e+02 2.310660e+02 2.495041e+00 11664 1 - 37 2.046146e+02 2.353603e+02 3.500531e+00 15486 1 - 54 5.678596e+02 2.361410e+02 4.571336e+00 19509 1 - 67 1.350000e+01 2.363330e+02 5.572975e+00 22920 1 - 72 9.559445e+01 2.363530e+02 7.193695e+00 30522 1 - 89 3.000833e+01 2.364000e+02 8.201371e+00 33753 1 - 92 1.190877e+02 2.364041e+02 9.656465e+00 40749 1 - 100 3.549607e+01 2.364137e+02 1.007170e+01 41973 1 + 1 4.610000e+02 1.053980e+02 1.672618e-01 447 1 + 7 4.144003e+02 2.211882e+02 1.196267e+00 9384 1 + 38 2.386505e+02 2.354263e+02 2.213321e+00 15705 1 + 59 7.875685e+02 2.362676e+02 3.234388e+00 21156 1 + 72 9.559445e+01 2.363530e+02 4.553599e+00 30522 1 + 92 1.190877e+02 2.364041e+02 6.017122e+00 40749 1 + 100 3.549607e+01 2.364137e+02 6.274942e+00 41973 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.007170e+01 +total time (s) : 6.274942e+00 total solves : 41973 best bound : 2.364137e+02 simulation ci : 2.152381e+02 ± 4.478506e+01 @@ -266,19 +263,19 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.803403e+00 -4.448049e+00 4.677169e-01 1900 1 - 20 -3.800496e+00 -4.400793e+00 6.814821e-01 3300 1 - 30 -4.183675e+00 -4.383006e+00 9.143960e-01 4700 1 - 40 -4.471246e+00 -4.372586e+00 1.160897e+00 6100 1 - 50 -4.387360e+00 -4.364331e+00 1.413388e+00 7500 1 - 60 -4.955582e+00 -4.359563e+00 1.733061e+00 8900 1 - 70 -4.008356e+00 -4.357128e+00 1.990283e+00 10300 1 - 80 -4.078852e+00 -4.355430e+00 2.387869e+00 12200 1 - 90 -4.083367e+00 -4.353973e+00 2.664239e+00 13600 1 - 100 -4.610569e+00 -4.352379e+00 2.982219e+00 15000 1 + 10 -4.803403e+00 -4.448049e+00 2.795870e-01 1900 1 + 20 -3.800496e+00 -4.400793e+00 4.447761e-01 3300 1 + 30 -4.183675e+00 -4.383006e+00 5.797939e-01 4700 1 + 40 -4.471246e+00 -4.372586e+00 7.217591e-01 6100 1 + 50 -4.387360e+00 -4.364331e+00 8.710771e-01 7500 1 + 60 -4.955582e+00 -4.359563e+00 1.026326e+00 8900 1 + 70 -4.008356e+00 -4.357128e+00 1.221396e+00 10300 1 + 80 -4.078852e+00 -4.355430e+00 1.464553e+00 12200 1 + 90 -4.083367e+00 -4.353973e+00 1.629871e+00 13600 1 + 100 -4.610569e+00 -4.352379e+00 1.796398e+00 15000 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.982219e+00 +total time (s) : 1.796398e+00 total solves : 15000 best bound : -4.352379e+00 simulation ci : -4.343962e+00 ± 8.572490e-02 @@ -310,18 +307,18 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -1.433090e+00 -1.475071e+00 1.324730e-01 1050 1 - 20 -1.332851e+00 -1.471658e+00 2.114601e-01 1600 1 - 30 -1.749788e+00 -1.471489e+00 3.764710e-01 2650 1 - 40 -1.708433e+00 -1.471241e+00 4.645541e-01 3200 1 - 50 -1.556006e+00 -1.471138e+00 7.030370e-01 4250 1 - 60 -1.439099e+00 -1.471079e+00 7.905171e-01 4800 1 - 70 -1.518223e+00 -1.471079e+00 9.709320e-01 5850 1 - 80 -1.638423e+00 -1.471075e+00 1.058508e+00 6400 1 - 85 -1.552892e+00 -1.471075e+00 1.103363e+00 6675 1 + 10 -1.433090e+00 -1.475071e+00 6.740212e-02 1050 1 + 20 -1.332851e+00 -1.471658e+00 1.067061e-01 1600 1 + 30 -1.749788e+00 -1.471489e+00 1.950490e-01 2650 1 + 40 -1.708433e+00 -1.471241e+00 2.398951e-01 3200 1 + 50 -1.556006e+00 -1.471138e+00 3.334711e-01 4250 1 + 60 -1.439099e+00 -1.471079e+00 3.828590e-01 4800 1 + 70 -1.518223e+00 -1.471079e+00 5.200610e-01 5850 1 + 80 -1.638423e+00 -1.471075e+00 5.707490e-01 6400 1 + 85 -1.552892e+00 -1.471075e+00 5.964820e-01 6675 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.103363e+00 +total time (s) : 5.964820e-01 total solves : 6675 best bound : -1.471075e+00 simulation ci : -1.456520e+00 ± 4.264305e-02 @@ -355,14 +352,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.455904e+05 3.147347e+05 1.321006e-02 54 1 - 20 3.336455e+05 3.402383e+05 2.419806e-02 104 1 - 30 3.337559e+05 3.403155e+05 3.792405e-02 158 1 - 40 3.337559e+05 3.403155e+05 5.038786e-02 208 1 - 48 3.337559e+05 3.403155e+05 6.127095e-02 248 1 + 10 3.455904e+05 3.147347e+05 8.907795e-03 54 1 + 20 3.336455e+05 3.402383e+05 1.595378e-02 104 1 + 30 3.337559e+05 3.403155e+05 2.475882e-02 158 1 + 40 3.337559e+05 3.403155e+05 3.307700e-02 208 1 + 48 3.337559e+05 3.403155e+05 4.082084e-02 248 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.127095e-02 +total time (s) : 4.082084e-02 total solves : 248 best bound : 3.403155e+05 simulation ci : 1.298476e+08 ± 1.785863e+08 @@ -397,14 +394,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.403329e+05 3.509666e+05 2.340293e-02 92 1 - 20 4.055335e+05 4.054833e+05 4.399490e-02 172 1 - 30 3.959476e+05 4.067125e+05 6.681895e-02 264 1 - 40 3.959476e+05 4.067125e+05 9.112597e-02 344 1 - 47 4.497721e+05 4.067125e+05 1.094100e-01 400 1 + 10 4.403329e+05 3.509666e+05 1.446986e-02 92 1 + 20 4.055335e+05 4.054833e+05 2.680182e-02 172 1 + 30 3.959476e+05 4.067125e+05 4.148293e-02 264 1 + 40 3.959476e+05 4.067125e+05 5.748487e-02 344 1 + 47 4.497721e+05 4.067125e+05 6.961894e-02 400 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.094100e-01 +total time (s) : 6.961894e-02 total solves : 400 best bound : 4.067125e+05 simulation ci : 2.870354e+07 ± 3.809366e+07 @@ -438,11 +435,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 8.316000e+03 0.000000e+00 1.641929e-01 14 1 - 40 8.021230e+03 4.074139e+03 3.871439e-01 776 1 + 1 8.316000e+03 0.000000e+00 1.270111e-01 14 1 + 40 8.021230e+03 4.074139e+03 3.083441e-01 776 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.871439e-01 +total time (s) : 3.083441e-01 total solves : 776 best bound : 4.074139e+03 simulation ci : 4.270757e+03 ± 6.295085e+02 @@ -475,11 +472,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 3.000000e+04 6.166667e+04 2.236040e-01 8 1 - 40L 5.500000e+04 6.250000e+04 6.454720e-01 344 1 + 1L 3.000000e+04 6.166667e+04 1.674521e-01 8 1 + 40L 5.500000e+04 6.250000e+04 4.686260e-01 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.454720e-01 +total time (s) : 4.686260e-01 total solves : 344 best bound : 6.250000e+04 simulation ci : 5.948750e+04 ± 5.671019e+03 @@ -512,11 +509,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.100000e+05 6.250000e+04 5.578041e-03 8 1 - 20 5.500000e+04 6.250000e+04 8.753991e-02 172 1 + 1 1.100000e+05 6.250000e+04 4.128933e-03 8 1 + 20 5.500000e+04 6.250000e+04 6.006908e-02 172 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 8.753991e-02 +total time (s) : 6.006908e-02 total solves : 172 best bound : 6.250000e+04 simulation ci : 6.250000e+04 ± 8.865729e+03 @@ -548,11 +545,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.000000e+04 6.250000e+04 6.616831e-03 5 1 - 10 4.000000e+04 6.250000e+04 3.158283e-02 62 1 + 1 3.000000e+04 6.250000e+04 5.136967e-03 5 1 + 10 4.000000e+04 6.250000e+04 2.192712e-02 62 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.158283e-02 +total time (s) : 2.192712e-02 total solves : 62 best bound : 6.250000e+04 simulation ci : 6.350000e+04 ± 1.453942e+04 @@ -585,11 +582,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 9.000000e+00 6.429219e-02 6 1 - 20L 9.000000e+00 9.000000e+00 1.416581e-01 123 1 + 1L 6.000000e+00 9.000000e+00 5.192113e-02 6 1 + 20L 9.000000e+00 9.000000e+00 1.030860e-01 123 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.416581e-01 +total time (s) : 1.030860e-01 total solves : 123 best bound : 9.000000e+00 simulation ci : 8.850000e+00 ± 2.940000e-01 @@ -621,18 +618,18 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 0.000000e+00 7.223982e-02 2.024889e-02 87 1 - 10 -1.109375e+01 2.605769e-01 3.028989e-02 142 1 - 15 0.000000e+00 1.417586e+00 4.188991e-02 197 1 - 20 1.080025e-12 1.514085e+00 5.375195e-02 252 1 - 25 4.864000e+01 1.514085e+00 1.593828e-01 339 1 - 30 4.864000e+01 1.514085e+00 1.729090e-01 394 1 - 35 -4.263256e-14 1.514085e+00 1.867809e-01 449 1 - 40 -8.870299e+00 1.514085e+00 2.012670e-01 504 1 - 44 4.864000e+01 1.514085e+00 2.164669e-01 548 1 + 5 0.000000e+00 7.223982e-02 1.305914e-02 87 1 + 10 -1.109375e+01 2.605769e-01 1.986408e-02 142 1 + 15 0.000000e+00 1.417586e+00 2.725315e-02 197 1 + 20 1.080025e-12 1.514085e+00 3.567910e-02 252 1 + 25 4.864000e+01 1.514085e+00 1.238091e-01 339 1 + 30 4.864000e+01 1.514085e+00 1.336060e-01 394 1 + 35 -4.263256e-14 1.514085e+00 1.434832e-01 449 1 + 40 -8.870299e+00 1.514085e+00 1.544340e-01 504 1 + 44 4.864000e+01 1.514085e+00 1.637661e-01 548 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.164669e-01 +total time (s) : 1.637661e-01 total solves : 548 best bound : 1.514085e+00 simulation ci : 3.658316e+00 ± 6.622759e+00 @@ -664,14 +661,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.735160e+01 1.397033e+00 2.560239e-01 278 1 - 20 1.102316e+01 1.288927e+00 2.896209e-01 428 1 - 30 -5.003795e+01 1.278410e+00 3.469839e-01 706 1 - 40 -1.787775e+01 1.278410e+00 3.896420e-01 856 1 - 46 1.111084e+01 1.278410e+00 4.167969e-01 946 1 + 10 2.735160e+01 1.397033e+00 2.061160e-01 278 1 + 20 1.102316e+01 1.288927e+00 2.298601e-01 428 1 + 30 -5.003795e+01 1.278410e+00 2.969160e-01 706 1 + 40 -1.787775e+01 1.278410e+00 3.245740e-01 856 1 + 46 1.111084e+01 1.278410e+00 3.417661e-01 946 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.167969e-01 +total time (s) : 3.417661e-01 total solves : 946 best bound : 1.278410e+00 simulation ci : 1.179501e+00 ± 6.843349e+00 @@ -703,13 +700,13 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 1.111084e+01 1.278410e+00 6.419611e-02 278 1 - 20 8.362212e+00 1.278410e+00 1.165061e-01 428 1 - 30 8.859026e+00 1.278410e+00 1.973350e-01 706 1 - 40 8.288929e+00 1.278410e+00 3.121021e-01 856 1 + 10 1.111084e+01 1.278410e+00 4.049778e-02 278 1 + 20 8.362212e+00 1.278410e+00 7.501578e-02 428 1 + 30 8.859026e+00 1.278410e+00 1.288450e-01 706 1 + 40 8.288929e+00 1.278410e+00 1.809900e-01 856 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.121021e-01 +total time (s) : 1.809900e-01 total solves : 856 best bound : 1.278410e+00 simulation ci : 3.734142e+00 ± 6.085569e+00 @@ -743,19 +740,19 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.345457e+01 1.065473e+01 1.731448e+00 2924 1 - 20 9.845555e+00 1.230410e+01 1.970252e+00 3576 1 - 30 2.844000e+01 1.501681e+01 2.322809e+00 4300 1 - 40 1.135141e+01 1.631382e+01 2.610750e+00 4944 1 - 50 3.598122e+01 1.763877e+01 3.661174e+00 8140 1 - 60 1.052329e+01 1.772819e+01 4.158501e+00 9072 1 - 70 6.571294e+01 1.922883e+01 4.867149e+00 10372 1 - 80 1.575331e+01 1.954102e+01 5.215514e+00 10984 1 - 90 4.004597e+01 1.969810e+01 5.655994e+00 11684 1 - 100 1.325329e+01 1.999494e+01 6.338120e+00 12672 1 + 10 2.345457e+01 1.065473e+01 1.224722e+00 2924 1 + 20 9.845555e+00 1.230410e+01 1.373981e+00 3576 1 + 30 2.844000e+01 1.501681e+01 1.566975e+00 4300 1 + 40 1.135141e+01 1.631382e+01 1.786906e+00 4944 1 + 50 3.598122e+01 1.763877e+01 2.433231e+00 8140 1 + 60 1.052329e+01 1.772819e+01 2.785973e+00 9072 1 + 70 6.571294e+01 1.922883e+01 3.289745e+00 10372 1 + 80 1.575331e+01 1.954102e+01 3.553972e+00 10984 1 + 90 4.004597e+01 1.969810e+01 3.883928e+00 11684 1 + 100 1.325329e+01 1.999494e+01 4.414739e+00 12672 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 6.338120e+00 +total time (s) : 4.414739e+00 total solves : 12672 best bound : 1.999494e+01 simulation ci : 2.304322e+01 ± 5.358983e+00 @@ -790,17 +787,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 5.000000e+00 9.529528e+00 2.249851e-01 235 1 - 10 6.000000e+00 9.060778e+00 2.648990e-01 310 1 - 15 5.000000e+00 9.060778e+00 3.059101e-01 385 1 - 20 1.000000e+01 9.060778e+00 3.485739e-01 460 1 - 25 4.000000e+00 9.060778e+00 4.658220e-01 695 1 - 30 1.000000e+01 9.060778e+00 5.100660e-01 770 1 - 35 1.000000e+01 9.060778e+00 5.549750e-01 845 1 - 40 1.000000e+01 9.060778e+00 6.021819e-01 920 1 + 5 5.000000e+00 9.529528e+00 1.671011e-01 235 1 + 10 6.000000e+00 9.060778e+00 1.955440e-01 310 1 + 15 5.000000e+00 9.060778e+00 2.252462e-01 385 1 + 20 1.000000e+01 9.060778e+00 2.561061e-01 460 1 + 25 4.000000e+00 9.060778e+00 3.375940e-01 695 1 + 30 1.000000e+01 9.060778e+00 3.697150e-01 770 1 + 35 1.000000e+01 9.060778e+00 4.025002e-01 845 1 + 40 1.000000e+01 9.060778e+00 4.361911e-01 920 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.021819e-01 +total time (s) : 4.361911e-01 total solves : 920 best bound : 9.060778e+00 simulation ci : 7.500000e+00 ± 8.274012e-01 @@ -835,14 +832,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 8.000000e+00 7.326151e+00 2.472630e-01 510 1 - 20 9.000000e+00 6.895833e+00 3.471880e-01 720 1 - 30 2.000000e+00 6.837694e+00 5.872600e-01 1230 1 - 40 1.200000e+01 6.837694e+00 6.908810e-01 1440 1 - 46 2.000000e+00 6.837694e+00 7.547910e-01 1566 1 + 10 8.000000e+00 7.326151e+00 1.185770e-01 510 1 + 20 9.000000e+00 6.895833e+00 1.867509e-01 720 1 + 30 2.000000e+00 6.837694e+00 3.818009e-01 1230 1 + 40 1.200000e+01 6.837694e+00 4.552438e-01 1440 1 + 46 2.000000e+00 6.837694e+00 5.003798e-01 1566 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.547910e-01 +total time (s) : 5.003798e-01 total solves : 1566 best bound : 6.837694e+00 simulation ci : 6.043478e+00 ± 8.172778e-01 @@ -876,13 +873,13 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.299661e+06 2.074790e+06 1.013655e+00 920 1 - 20 1.049667e+06 2.078257e+06 1.455700e+00 1340 1 - 30 4.986275e+04 2.078257e+06 2.512509e+00 2260 1 - 40 4.965301e+04 2.078257e+06 2.988097e+00 2680 1 + 10 2.299661e+06 2.074790e+06 7.858400e-01 920 1 + 20 1.049667e+06 2.078257e+06 1.112053e+00 1340 1 + 30 4.986275e+04 2.078257e+06 1.931308e+00 2260 1 + 40 4.965301e+04 2.078257e+06 2.296945e+00 2680 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.988097e+00 +total time (s) : 2.296945e+00 total solves : 2680 best bound : 2.078257e+06 simulation ci : 1.628720e+06 ± 4.829316e+05 @@ -916,14 +913,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10L 2.299674e+06 2.079240e+06 1.848254e+00 920 1 - 20L 1.799859e+06 2.079457e+06 3.375839e+00 1340 1 - 30L 3.549779e+06 2.079457e+06 5.618303e+00 2260 1 - 40L 2.998530e+05 2.079457e+06 7.207332e+00 2680 1 - 43L 1.299465e+06 2.079457e+06 7.621044e+00 2806 1 + 10L 2.299674e+06 2.079240e+06 1.467866e+00 920 1 + 20L 1.799859e+06 2.079457e+06 2.664228e+00 1340 1 + 30L 3.549779e+06 2.079457e+06 4.478271e+00 2260 1 + 40L 2.998530e+05 2.079457e+06 5.799426e+00 2680 1 + 43L 1.299465e+06 2.079457e+06 6.154734e+00 2806 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.621044e+00 +total time (s) : 6.154734e+00 total solves : 2806 best bound : 2.079457e+06 simulation ci : 2.036666e+06 ± 5.358445e+05 @@ -955,12 +952,12 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 0.000000e+00 1.189487e+02 1.059837e+00 3552 1 - 200 2.500000e+00 1.191658e+02 1.498708e+00 5858 1 - 287 2.500000e+01 1.191667e+02 1.758297e+00 6763 1 + 100 0.000000e+00 1.189487e+02 7.221689e-01 3552 1 + 200 2.500000e+00 1.191658e+02 9.743721e-01 5858 1 + 287 2.500000e+01 1.191667e+02 1.101237e+00 6763 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.758297e+00 +total time (s) : 1.101237e+00 total solves : 6763 best bound : 1.191667e+02 simulation ci : 2.238676e+01 ± 3.878136e+00 @@ -992,12 +989,12 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 1.500000e+02 1.191155e+02 4.206419e-01 1912 1 - 200 2.500000e+00 1.191666e+02 8.327479e-01 3828 1 - 207 5.000000e+01 1.191666e+02 8.553789e-01 3901 1 + 100 1.500000e+02 1.191155e+02 2.564540e-01 1912 1 + 200 2.500000e+00 1.191666e+02 5.028541e-01 3828 1 + 207 5.000000e+01 1.191666e+02 5.154240e-01 3901 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 8.553789e-01 +total time (s) : 5.154240e-01 total solves : 3901 best bound : 1.191666e+02 simulation ci : 1.985507e+01 ± 4.295996e+00 @@ -1028,13 +1025,13 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 1.000000e+01 1.997380e+01 1.151390e-01 1002 1 - 20 2.800000e+01 2.000000e+01 1.654048e-01 1284 1 - 30 8.000000e+00 2.000000e+01 3.178349e-01 2282 1 - 40 2.000000e+00 2.000000e+01 3.560450e-01 2464 1 + 10 1.000000e+01 1.997380e+01 5.251503e-02 1002 1 + 20 2.800000e+01 2.000000e+01 8.010602e-02 1284 1 + 30 8.000000e+00 2.000000e+01 1.654611e-01 2282 1 + 40 2.000000e+00 2.000000e+01 1.871161e-01 2464 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.560450e-01 +total time (s) : 1.871161e-01 total solves : 2464 best bound : 2.000000e+01 simulation ci : 1.720000e+01 ± 4.196227e+00 @@ -1065,11 +1062,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 7.000000e+00 1.992188e+00 3.556967e-03 15 1 - 40 1.000000e+00 2.000000e+00 7.783103e-02 612 1 + 1 7.000000e+00 1.992188e+00 2.629042e-03 15 1 + 40 1.000000e+00 2.000000e+00 4.314399e-02 612 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.783103e-02 +total time (s) : 4.314399e-02 total solves : 612 best bound : 2.000000e+00 simulation ci : 2.025000e+00 ± 4.627975e-01 @@ -1102,82 +1099,118 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.412500e+00 5.484827e+00 3.432472e-01 1650 1 - 20 4.406250e+00 4.472007e+00 5.534871e-01 3000 1 - 30 4.875000e+00 4.446365e+00 7.405791e-01 4350 1 - 40 3.975000e+00 4.440303e+00 9.366941e-01 5700 1 - 50 4.062500e+00 4.438364e+00 1.197326e+00 7350 1 - 60 4.162500e+00 4.096084e+00 1.439869e+00 8700 1 - 70 5.575000e+00 4.089659e+00 1.656491e+00 10050 1 - 80 4.612500e+00 4.089204e+00 1.939506e+00 11700 1 - 90 4.312500e+00 4.088025e+00 2.164094e+00 13050 1 - 100 2.775000e+00 4.086832e+00 2.494948e+00 14700 1 - 110 4.375000e+00 4.086730e+00 2.715922e+00 16050 1 - 120 4.350000e+00 4.086313e+00 2.952486e+00 17400 1 - 130 2.987500e+00 4.086047e+00 3.180989e+00 18750 1 - 140 4.268750e+00 4.085989e+00 3.416775e+00 20100 1 - 150 3.631250e+00 4.085945e+00 3.667883e+00 21450 1 - 160 4.475000e+00 4.085747e+00 3.913039e+00 22800 1 - 170 4.175000e+00 4.085718e+00 4.148523e+00 24150 1 - 180 4.662500e+00 4.085706e+00 4.386923e+00 25500 1 - 190 3.893750e+00 4.085706e+00 4.628960e+00 26850 1 - 200 3.068750e+00 4.085689e+00 4.978849e+00 28500 1 - 210 2.950000e+00 4.085670e+00 5.236434e+00 29850 1 - 220 4.312500e+00 4.085656e+00 5.499782e+00 31200 1 - 230 3.975000e+00 4.085385e+00 5.784419e+00 32550 1 - 240 5.112500e+00 4.085365e+00 6.080950e+00 33900 1 - 250 3.350000e+00 4.085331e+00 6.345343e+00 35250 1 - 260 2.812500e+00 4.085150e+00 6.599374e+00 36600 1 - 270 5.000000e+00 4.085119e+00 6.876791e+00 37950 1 - 280 4.781250e+00 4.085051e+00 7.154335e+00 39300 1 - 290 3.125000e+00 4.084915e+00 7.449913e+00 40650 1 - 300 4.875000e+00 4.084899e+00 7.835534e+00 42300 1 - 310 4.781250e+00 4.084887e+00 8.113704e+00 43650 1 - 320 3.093750e+00 4.084887e+00 8.400054e+00 45000 1 - 330 5.212500e+00 4.084874e+00 8.701667e+00 46350 1 - 340 4.375000e+00 4.084871e+00 8.984442e+00 47700 1 - 350 3.612500e+00 4.084867e+00 9.255274e+00 49050 1 - 360 5.125000e+00 4.084862e+00 9.540174e+00 50400 1 - 370 2.775000e+00 4.084862e+00 9.833343e+00 51750 1 - 380 4.375000e+00 4.084862e+00 1.014810e+01 53100 1 - 390 5.087500e+00 4.084862e+00 1.042538e+01 54450 1 - 400 2.925000e+00 4.084862e+00 1.080535e+01 56100 1 - 410 5.250000e+00 4.084862e+00 1.109867e+01 57450 1 - 420 3.862500e+00 4.084862e+00 1.140318e+01 58800 1 - 430 4.500000e+00 4.084862e+00 1.169591e+01 60150 1 - 440 2.737500e+00 4.084862e+00 1.197902e+01 61500 1 - 450 5.375000e+00 4.084862e+00 1.227756e+01 62850 1 - 460 3.125000e+00 4.084862e+00 1.259802e+01 64200 1 - 470 5.000000e+00 4.084832e+00 1.290254e+01 65550 1 - 480 5.000000e+00 4.084824e+00 1.318937e+01 66900 1 - 490 4.562500e+00 4.084824e+00 1.345930e+01 68250 1 - 500 4.500000e+00 4.084824e+00 1.389648e+01 69900 1 - 510 3.850000e+00 4.084824e+00 1.419217e+01 71250 1 - 520 3.493750e+00 4.084824e+00 1.449167e+01 72600 1 - 530 4.712500e+00 4.084819e+00 1.480202e+01 73950 1 - 540 5.100000e+00 4.084816e+00 1.511662e+01 75300 1 - 550 4.062500e+00 4.084816e+00 1.543988e+01 76650 1 - 560 4.412500e+00 4.084816e+00 1.575876e+01 78000 1 - 570 4.487500e+00 4.084816e+00 1.609040e+01 79350 1 - 580 4.862500e+00 4.084816e+00 1.639804e+01 80700 1 - 590 4.000000e+00 4.084816e+00 1.672969e+01 82050 1 - 600 2.950000e+00 4.084814e+00 1.716352e+01 83700 1 - 610 2.812500e+00 4.084812e+00 1.746830e+01 85050 1 - 620 4.425000e+00 4.084806e+00 1.778480e+01 86400 1 - 630 5.175000e+00 4.084804e+00 1.809781e+01 87750 1 - 640 4.556250e+00 4.084800e+00 1.839636e+01 89100 1 - 650 2.818750e+00 4.084796e+00 1.870496e+01 90450 1 - 660 3.187500e+00 4.084794e+00 1.900543e+01 91800 1 - 670 4.312500e+00 4.084765e+00 1.933784e+01 93150 1 - 680 5.750000e+00 4.084762e+00 1.963982e+01 94500 1 - 690 4.425000e+00 4.084754e+00 1.994470e+01 95850 1 - 692 5.250000e+00 4.084754e+00 2.001155e+01 96120 1 + 10 3.412500e+00 5.484827e+00 2.254689e-01 1650 1 + 20 4.406250e+00 4.472007e+00 3.383160e-01 3000 1 + 30 4.875000e+00 4.446365e+00 4.322689e-01 4350 1 + 40 3.975000e+00 4.440303e+00 5.358810e-01 5700 1 + 50 4.062500e+00 4.438364e+00 6.833451e-01 7350 1 + 60 4.162500e+00 4.096084e+00 7.914360e-01 8700 1 + 70 5.575000e+00 4.089659e+00 9.444220e-01 10050 1 + 80 4.612500e+00 4.089204e+00 1.110530e+00 11700 1 + 90 4.312500e+00 4.088025e+00 1.240632e+00 13050 1 + 100 2.775000e+00 4.086832e+00 1.411161e+00 14700 1 + 110 4.375000e+00 4.086730e+00 1.570415e+00 16050 1 + 120 4.350000e+00 4.086313e+00 1.705659e+00 17400 1 + 130 2.987500e+00 4.086047e+00 1.836985e+00 18750 1 + 140 4.268750e+00 4.085989e+00 1.978179e+00 20100 1 + 150 3.631250e+00 4.085945e+00 2.130887e+00 21450 1 + 160 4.475000e+00 4.085747e+00 2.269605e+00 22800 1 + 170 4.175000e+00 4.085718e+00 2.408035e+00 24150 1 + 180 4.662500e+00 4.085706e+00 2.547780e+00 25500 1 + 190 3.893750e+00 4.085706e+00 2.693811e+00 26850 1 + 200 3.068750e+00 4.085689e+00 2.910456e+00 28500 1 + 210 2.950000e+00 4.085670e+00 3.060071e+00 29850 1 + 220 4.312500e+00 4.085656e+00 3.217498e+00 31200 1 + 230 3.975000e+00 4.085385e+00 3.372842e+00 32550 1 + 240 5.112500e+00 4.085365e+00 3.535348e+00 33900 1 + 250 3.350000e+00 4.085331e+00 3.712220e+00 35250 1 + 260 2.812500e+00 4.085150e+00 3.870849e+00 36600 1 + 270 5.000000e+00 4.085119e+00 4.031426e+00 37950 1 + 280 4.781250e+00 4.085051e+00 4.200393e+00 39300 1 + 290 3.125000e+00 4.084915e+00 4.390983e+00 40650 1 + 300 4.875000e+00 4.084899e+00 4.613061e+00 42300 1 + 310 4.781250e+00 4.084887e+00 4.782663e+00 43650 1 + 320 3.093750e+00 4.084887e+00 4.958242e+00 45000 1 + 330 5.212500e+00 4.084874e+00 5.152423e+00 46350 1 + 340 4.375000e+00 4.084871e+00 5.326854e+00 47700 1 + 350 3.612500e+00 4.084867e+00 5.496131e+00 49050 1 + 360 5.125000e+00 4.084862e+00 5.673648e+00 50400 1 + 370 2.775000e+00 4.084862e+00 5.843172e+00 51750 1 + 380 4.375000e+00 4.084862e+00 6.044040e+00 53100 1 + 390 5.087500e+00 4.084862e+00 6.215716e+00 54450 1 + 400 2.925000e+00 4.084862e+00 6.445403e+00 56100 1 + 410 5.250000e+00 4.084862e+00 6.621290e+00 57450 1 + 420 3.862500e+00 4.084862e+00 6.815795e+00 58800 1 + 430 4.500000e+00 4.084862e+00 6.997721e+00 60150 1 + 440 2.737500e+00 4.084862e+00 7.180048e+00 61500 1 + 450 5.375000e+00 4.084862e+00 7.362429e+00 62850 1 + 460 3.125000e+00 4.084862e+00 7.542587e+00 64200 1 + 470 5.000000e+00 4.084832e+00 7.754080e+00 65550 1 + 480 5.000000e+00 4.084824e+00 7.932283e+00 66900 1 + 490 4.562500e+00 4.084824e+00 8.097970e+00 68250 1 + 500 4.500000e+00 4.084824e+00 8.343433e+00 69900 1 + 510 3.850000e+00 4.084824e+00 8.557755e+00 71250 1 + 520 3.493750e+00 4.084824e+00 8.743363e+00 72600 1 + 530 4.712500e+00 4.084819e+00 8.935032e+00 73950 1 + 540 5.100000e+00 4.084816e+00 9.129475e+00 75300 1 + 550 4.062500e+00 4.084816e+00 9.338104e+00 76650 1 + 560 4.412500e+00 4.084816e+00 9.537376e+00 78000 1 + 570 4.487500e+00 4.084816e+00 9.731502e+00 79350 1 + 580 4.862500e+00 4.084816e+00 9.921464e+00 80700 1 + 590 4.000000e+00 4.084816e+00 1.013443e+01 82050 1 + 600 2.950000e+00 4.084814e+00 1.038858e+01 83700 1 + 610 2.812500e+00 4.084812e+00 1.058024e+01 85050 1 + 620 4.425000e+00 4.084806e+00 1.077855e+01 86400 1 + 630 5.175000e+00 4.084804e+00 1.098634e+01 87750 1 + 640 4.556250e+00 4.084800e+00 1.117329e+01 89100 1 + 650 2.818750e+00 4.084796e+00 1.136360e+01 90450 1 + 660 3.187500e+00 4.084794e+00 1.154789e+01 91800 1 + 670 4.312500e+00 4.084765e+00 1.175926e+01 93150 1 + 680 5.750000e+00 4.084762e+00 1.195140e+01 94500 1 + 690 4.425000e+00 4.084754e+00 1.214363e+01 95850 1 + 700 3.975000e+00 4.084754e+00 1.241234e+01 97500 1 + 710 3.693750e+00 4.084754e+00 1.262139e+01 98850 1 + 720 2.600000e+00 4.084753e+00 1.280806e+01 100200 1 + 730 3.875000e+00 4.084743e+00 1.300440e+01 101550 1 + 740 4.387500e+00 4.084743e+00 1.320735e+01 102900 1 + 750 4.875000e+00 4.084743e+00 1.339323e+01 104250 1 + 760 4.675000e+00 4.084743e+00 1.360433e+01 105600 1 + 770 4.275000e+00 4.084743e+00 1.378093e+01 106950 1 + 780 3.512500e+00 4.084743e+00 1.395776e+01 108300 1 + 790 4.881250e+00 4.084743e+00 1.415336e+01 109650 1 + 800 5.225000e+00 4.084743e+00 1.443056e+01 111300 1 + 810 3.493750e+00 4.084743e+00 1.463764e+01 112650 1 + 820 4.875000e+00 4.084743e+00 1.484532e+01 114000 1 + 830 4.125000e+00 4.084743e+00 1.504573e+01 115350 1 + 840 3.925000e+00 4.084743e+00 1.526631e+01 116700 1 + 850 3.900000e+00 4.084743e+00 1.545965e+01 118050 1 + 860 3.450000e+00 4.084743e+00 1.565771e+01 119400 1 + 870 4.437500e+00 4.084743e+00 1.585214e+01 120750 1 + 880 4.106250e+00 4.084743e+00 1.606597e+01 122100 1 + 890 4.518750e+00 4.084743e+00 1.625164e+01 123450 1 + 900 3.750000e+00 4.084743e+00 1.653199e+01 125100 1 + 910 4.112500e+00 4.084743e+00 1.673681e+01 126450 1 + 920 4.762500e+00 4.084743e+00 1.697430e+01 127800 1 + 930 5.525000e+00 4.084743e+00 1.718263e+01 129150 1 + 940 3.325000e+00 4.084743e+00 1.738831e+01 130500 1 + 950 4.475000e+00 4.084743e+00 1.759303e+01 131850 1 + 960 4.918750e+00 4.084743e+00 1.782661e+01 133200 1 + 970 3.856250e+00 4.084743e+00 1.802650e+01 134550 1 + 980 3.131250e+00 4.084743e+00 1.823838e+01 135900 1 + 990 2.900000e+00 4.084743e+00 1.844719e+01 137250 1 + 1000 4.900000e+00 4.084743e+00 1.875922e+01 138900 1 + 1010 3.437500e+00 4.084743e+00 1.897145e+01 140250 1 + 1020 4.350000e+00 4.084743e+00 1.918633e+01 141600 1 + 1030 4.100000e+00 4.084743e+00 1.941215e+01 142950 1 + 1040 2.437500e+00 4.084743e+00 1.962467e+01 144300 1 + 1050 4.418750e+00 4.084743e+00 1.983910e+01 145650 1 + 1058 3.950000e+00 4.084743e+00 2.000340e+01 146730 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.001155e+01 -total solves : 96120 -best bound : 4.084754e+00 -simulation ci : 4.105699e+00 ± 5.584226e-02 +total time (s) : 2.000340e+01 +total solves : 146730 +best bound : 4.084743e+00 +simulation ci : 4.101235e+00 ± 4.453673e-02 numeric issues : 0 ------------------------------------------------------------------- @@ -1207,25 +1240,26 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.058125e+00 4.910287e+00 4.178090e-01 1650 1 - 20 4.456250e+00 4.054763e+00 1.031706e+00 3000 1 - 30 3.600000e+00 4.051318e+00 1.745694e+00 4350 1 - 40 4.781250e+00 4.048923e+00 2.988870e+00 6000 1 - 50 5.343750e+00 4.042879e+00 4.143216e+00 7350 1 - 60 3.312500e+00 4.041955e+00 6.177441e+00 9000 1 - 70 4.875000e+00 4.040976e+00 7.572112e+00 10350 1 - 80 3.287500e+00 4.040025e+00 1.005774e+01 12000 1 - 90 3.075000e+00 4.039898e+00 1.198613e+01 13350 1 - 100 5.037500e+00 4.039822e+00 1.512891e+01 15000 1 - 110 3.937500e+00 4.039709e+00 1.750257e+01 16350 1 - 120 4.250000e+00 4.039709e+00 1.995563e+01 17700 1 - 121 5.625000e+00 4.039709e+00 2.020632e+01 17835 1 + 10 4.875000e+00 4.086989e+00 2.782941e-01 1650 1 + 20 5.625000e+00 4.067007e+00 7.786269e-01 3000 1 + 30 4.275000e+00 4.044851e+00 1.437763e+00 4350 1 + 40 6.000000e+00 4.040268e+00 2.244983e+00 5700 1 + 50 5.225000e+00 4.037899e+00 3.653688e+00 7350 1 + 60 4.425000e+00 4.037712e+00 4.793282e+00 8700 1 + 70 4.612500e+00 4.037493e+00 6.681571e+00 10350 1 + 80 4.112500e+00 4.037498e+00 8.147209e+00 11700 1 + 90 4.281250e+00 4.037323e+00 1.045427e+01 13350 1 + 100 3.450000e+00 4.037312e+00 1.215458e+01 14700 1 + 110 3.668750e+00 4.037275e+00 1.410500e+01 16050 1 + 120 3.187500e+00 4.037273e+00 1.613232e+01 17400 1 + 130 4.431250e+00 4.037268e+00 1.833190e+01 18750 1 + 138 3.068750e+00 4.037268e+00 2.010973e+01 19830 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.020632e+01 -total solves : 17835 -best bound : 4.039709e+00 -simulation ci : 4.121193e+00 ± 1.328739e-01 +total time (s) : 2.010973e+01 +total solves : 19830 +best bound : 4.037268e+00 +simulation ci : 4.112689e+00 ± 1.244438e-01 numeric issues : 0 ------------------------------------------------------------------- @@ -1256,21 +1290,18 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.750267e+00 1.163701e+00 5.554860e-01 1680 1 - 20 3.644642e+00 1.164225e+00 7.315691e-01 2560 1 - 30 3.064816e+00 1.164649e+00 1.303535e+00 4240 1 - 40 3.096506e+00 1.164760e+00 1.473254e+00 5120 1 - 50 2.765327e+00 1.164760e+00 2.104778e+00 6800 1 - 60 3.179709e+00 1.164760e+00 2.295238e+00 7680 1 - 70 2.897036e+00 1.164760e+00 2.863767e+00 9360 1 - 80 3.339910e+00 1.164760e+00 3.082773e+00 10240 1 - 86 3.218883e+00 1.164760e+00 3.189717e+00 10768 1 + 10 2.836537e+00 1.166329e+00 3.452880e-01 1680 1 + 20 3.351530e+00 1.167299e+00 4.753141e-01 2560 1 + 30 3.895803e+00 1.167410e+00 8.361731e-01 4240 1 + 40 2.911076e+00 1.167410e+00 9.411321e-01 5120 1 + 50 3.906713e+00 1.167410e+00 1.314685e+00 6800 1 + 60 3.431394e+00 1.167410e+00 1.422767e+00 7680 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.189717e+00 -total solves : 10768 -best bound : 1.164760e+00 -simulation ci : 3.201625e+00 ± 7.807606e-02 +total time (s) : 1.422767e+00 +total solves : 7680 +best bound : 1.167410e+00 +simulation ci : 3.208796e+00 ± 1.181263e-01 numeric issues : 0 ------------------------------------------------------------------- @@ -1302,16 +1333,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.700000e+01 -5.809615e+01 6.410384e-02 78 1 - 20 -9.800000e+01 -5.809615e+01 1.340530e-01 148 1 - 30 -4.000000e+01 -5.809615e+01 2.188058e-01 226 1 - 40 -4.700000e+01 -5.809615e+01 2.932930e-01 296 1 + 10 -4.700000e+01 -5.809615e+01 8.398294e-02 78 1 + 20 -4.000000e+01 -5.809615e+01 1.402409e-01 148 1 + 30 -9.800000e+01 -5.809615e+01 2.061980e-01 226 1 + 40 -4.000000e+01 -5.809615e+01 2.635651e-01 296 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.932930e-01 +total time (s) : 2.635651e-01 total solves : 296 best bound : -5.809615e+01 -simulation ci : -6.286250e+01 ± 8.312391e+00 +simulation ci : -5.206250e+01 ± 6.856606e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -1343,16 +1374,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.700000e+01 -6.196125e+01 7.824302e-02 138 1 - 20 -9.800000e+01 -6.196125e+01 1.504829e-01 258 1 - 30 -4.000000e+01 -6.196125e+01 2.505190e-01 396 1 - 40 -4.700000e+01 -6.196125e+01 3.234499e-01 516 1 + 10 -6.300000e+01 -6.196125e+01 5.639505e-02 138 1 + 20 -4.000000e+01 -6.196125e+01 1.121299e-01 258 1 + 30 -4.000000e+01 -6.196125e+01 1.874909e-01 396 1 + 40 -4.700000e+01 -6.196125e+01 2.442739e-01 516 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.234499e-01 +total time (s) : 2.442739e-01 total solves : 516 best bound : -6.196125e+01 -simulation ci : -5.676250e+01 ± 6.342977e+00 +simulation ci : -5.711250e+01 ± 5.414443e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -1384,16 +1415,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.000000e+01 -6.546793e+01 1.386349e-01 462 1 - 20 -8.200000e+01 -6.546793e+01 2.457199e-01 852 1 - 30 -4.700000e+01 -6.546793e+01 4.443669e-01 1314 1 - 40 -4.000000e+01 -6.546793e+01 5.555439e-01 1704 1 + 10 -5.600000e+01 -6.546793e+01 9.024096e-02 462 1 + 20 -5.600000e+01 -6.546793e+01 1.641209e-01 852 1 + 30 -7.000000e+01 -6.546793e+01 3.035190e-01 1314 1 + 40 -8.200000e+01 -6.546793e+01 3.777130e-01 1704 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.555439e-01 +total time (s) : 3.777130e-01 total solves : 1704 best bound : -6.546793e+01 -simulation ci : -5.901250e+01 ± 5.394280e+00 +simulation ci : -6.286250e+01 ± 4.631696e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -1424,14 +1455,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 1.414815e+01 7.273006e-02 11 1 - 40L 6.000000e+00 8.000000e+00 6.659911e-01 602 1 + 1L 1.200000e+01 1.414815e+01 6.028605e-02 11 1 + 40L 6.000000e+00 8.000000e+00 5.141962e-01 602 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.659911e-01 +total time (s) : 5.141962e-01 total solves : 602 best bound : 8.000000e+00 -simulation ci : 7.875000e+00 ± 9.341687e-01 +simulation ci : 8.625000e+00 ± 8.978763e-01 numeric issues : 0 ------------------------------------------------------------------- @@ -1462,14 +1493,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 -9.800000e+04 4.922260e+05 1.489201e-01 6 1 - 20 1.093500e+05 1.083900e+05 1.828170e-01 126 1 + 1 -9.800000e+04 4.922260e+05 1.193850e-01 6 1 + 20 1.093500e+05 1.083900e+05 1.346772e-01 126 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.828170e-01 +total time (s) : 1.346772e-01 total solves : 126 best bound : 1.083900e+05 -simulation ci : 5.235835e+04 ± 2.689531e+04 +simulation ci : 9.548810e+04 ± 3.405034e+04 numeric issues : 0 ------------------------------------------------------------------- diff --git a/dev/examples/SDDP_0.0.log b/dev/examples/SDDP_0.0.log index c08860098..143fab5e9 100644 --- a/dev/examples/SDDP_0.0.log +++ b/dev/examples/SDDP_0.0.log @@ -19,11 +19,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 0.000000e+00 1.277995e-02 36 1 - 10 0.000000e+00 0.000000e+00 9.096503e-02 660 1 + 1 0.000000e+00 0.000000e+00 9.142876e-03 36 1 + 10 0.000000e+00 0.000000e+00 7.928395e-02 660 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 9.096503e-02 +total time (s) : 7.928395e-02 total solves : 660 best bound : 0.000000e+00 simulation ci : 0.000000e+00 ± 0.000000e+00 diff --git a/dev/examples/SDDP_0.0625.log b/dev/examples/SDDP_0.0625.log index a99356ec0..f456c12d8 100644 --- a/dev/examples/SDDP_0.0625.log +++ b/dev/examples/SDDP_0.0625.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 7.187500e+01 5.937500e+01 5.764961e-03 6075 1 - 10 7.812500e+01 5.938557e+01 1.271439e-01 6699 1 + 1 7.187500e+01 5.937500e+01 3.389835e-03 6075 1 + 10 7.812500e+01 5.938557e+01 6.928802e-02 6699 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.271439e-01 +total time (s) : 6.928802e-02 total solves : 6699 best bound : 5.938557e+01 simulation ci : 6.668502e+01 ± 1.171871e+01 diff --git a/dev/examples/SDDP_0.125.log b/dev/examples/SDDP_0.125.log index f7c8e512c..5e63c63ed 100644 --- a/dev/examples/SDDP_0.125.log +++ b/dev/examples/SDDP_0.125.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.300000e+02 1.129545e+02 5.549908e-03 3391 1 - 10 8.812500e+01 1.129771e+02 1.324220e-01 4015 1 + 1 1.300000e+02 1.129545e+02 3.108978e-03 3391 1 + 10 8.812500e+01 1.129771e+02 6.695819e-02 4015 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.324220e-01 +total time (s) : 6.695819e-02 total solves : 4015 best bound : 1.129771e+02 simulation ci : 1.146875e+02 ± 2.642851e+01 diff --git a/dev/examples/SDDP_0.25.log b/dev/examples/SDDP_0.25.log index d1a3c0801..6b5e0dc5e 100644 --- a/dev/examples/SDDP_0.25.log +++ b/dev/examples/SDDP_0.25.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.250000e+02 2.030682e+02 5.362034e-03 2049 1 - 10 1.487500e+02 2.052545e+02 1.354401e-01 2673 1 + 1 2.250000e+02 2.030682e+02 3.034115e-03 2049 1 + 10 1.487500e+02 2.052545e+02 6.790113e-02 2673 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.354401e-01 +total time (s) : 6.790113e-02 total solves : 2673 best bound : 2.052545e+02 simulation ci : 2.246299e+02 ± 2.511808e+01 diff --git a/dev/examples/SDDP_0.375.log b/dev/examples/SDDP_0.375.log index 50fbca62a..7da864a56 100644 --- a/dev/examples/SDDP_0.375.log +++ b/dev/examples/SDDP_0.375.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.562500e+02 2.773056e+02 6.232977e-03 4062 1 - 10 3.625000e+02 2.796065e+02 2.071030e-01 4686 1 + 1 1.562500e+02 2.773056e+02 3.480196e-03 4062 1 + 10 3.625000e+02 2.796065e+02 6.888819e-02 4686 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.071030e-01 +total time (s) : 6.888819e-02 total solves : 4686 best bound : 2.796065e+02 simulation ci : 2.556250e+02 ± 6.667722e+01 diff --git a/dev/examples/SDDP_0.5.log b/dev/examples/SDDP_0.5.log index 2250adc09..0808936ea 100644 --- a/dev/examples/SDDP_0.5.log +++ b/dev/examples/SDDP_0.5.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 5.675000e+02 3.346337e+02 5.506992e-03 1378 1 - 10 4.370755e+02 3.468727e+02 1.294410e-01 2002 1 + 1 5.675000e+02 3.346337e+02 3.026962e-03 1378 1 + 10 4.370755e+02 3.468727e+02 6.502700e-02 2002 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.294410e-01 +total time (s) : 6.502700e-02 total solves : 2002 best bound : 3.468727e+02 simulation ci : 3.604575e+02 ± 7.003395e+01 diff --git a/dev/examples/SDDP_0.625.log b/dev/examples/SDDP_0.625.log index 49b34e9cb..c301d74a0 100644 --- a/dev/examples/SDDP_0.625.log +++ b/dev/examples/SDDP_0.625.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.775000e+02 4.069295e+02 6.358147e-03 4733 1 - 10 6.445745e+02 4.078859e+02 1.411211e-01 5357 1 + 1 6.775000e+02 4.069295e+02 3.355026e-03 4733 1 + 10 6.445745e+02 4.078859e+02 1.111910e-01 5357 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.411211e-01 +total time (s) : 1.111910e-01 total solves : 5357 best bound : 4.078859e+02 simulation ci : 5.066792e+02 ± 1.075729e+02 diff --git a/dev/examples/SDDP_0.75.log b/dev/examples/SDDP_0.75.log index ab44dcd6b..5d0949192 100644 --- a/dev/examples/SDDP_0.75.log +++ b/dev/examples/SDDP_0.75.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 5.658333e+02 4.640036e+02 7.082939e-03 2720 1 - 10 5.225000e+02 4.660290e+02 1.464138e-01 3344 1 + 1 5.658333e+02 4.640036e+02 3.440142e-03 2720 1 + 10 5.225000e+02 4.660290e+02 7.340717e-02 3344 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.464138e-01 +total time (s) : 7.340717e-02 total solves : 3344 best bound : 4.660290e+02 simulation ci : 5.192132e+02 ± 9.666502e+01 diff --git a/dev/examples/SDDP_0.875.log b/dev/examples/SDDP_0.875.log index 7b683eb5e..2f5fa941b 100644 --- a/dev/examples/SDDP_0.875.log +++ b/dev/examples/SDDP_0.875.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.506250e+02 5.205513e+02 7.408857e-03 5404 1 - 10 2.631250e+02 5.213230e+02 1.474149e-01 6028 1 + 1 2.506250e+02 5.205513e+02 4.287958e-03 5404 1 + 10 2.631250e+02 5.213230e+02 8.214092e-02 6028 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.474149e-01 +total time (s) : 8.214092e-02 total solves : 6028 best bound : 5.213230e+02 simulation ci : 3.949359e+02 ± 1.019543e+02 diff --git a/dev/examples/SDDP_1.0.log b/dev/examples/SDDP_1.0.log index 317c7c086..3d91ce8ab 100644 --- a/dev/examples/SDDP_1.0.log +++ b/dev/examples/SDDP_1.0.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 8.650000e+02 5.500000e+02 5.427122e-03 707 1 - 10 2.950000e+02 5.736664e+02 1.132510e-01 1331 1 + 1 8.650000e+02 5.500000e+02 2.943039e-03 707 1 + 10 2.950000e+02 5.736664e+02 5.830884e-02 1331 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.132510e-01 +total time (s) : 5.830884e-02 total solves : 1331 best bound : 5.736664e+02 simulation ci : 5.385000e+02 ± 1.500112e+02 diff --git a/dev/examples/StochDynamicProgramming.jl_multistock/index.html b/dev/examples/StochDynamicProgramming.jl_multistock/index.html index 77aab5124..83d93f543 100644 --- a/dev/examples/StochDynamicProgramming.jl_multistock/index.html +++ b/dev/examples/StochDynamicProgramming.jl_multistock/index.html @@ -76,21 +76,21 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.803403e+00 -4.448049e+00 4.677169e-01 1900 1 - 20 -3.800496e+00 -4.400793e+00 6.814821e-01 3300 1 - 30 -4.183675e+00 -4.383006e+00 9.143960e-01 4700 1 - 40 -4.471246e+00 -4.372586e+00 1.160897e+00 6100 1 - 50 -4.387360e+00 -4.364331e+00 1.413388e+00 7500 1 - 60 -4.955582e+00 -4.359563e+00 1.733061e+00 8900 1 - 70 -4.008356e+00 -4.357128e+00 1.990283e+00 10300 1 - 80 -4.078852e+00 -4.355430e+00 2.387869e+00 12200 1 - 90 -4.083367e+00 -4.353973e+00 2.664239e+00 13600 1 - 100 -4.610569e+00 -4.352379e+00 2.982219e+00 15000 1 + 10 -4.803403e+00 -4.448049e+00 2.795870e-01 1900 1 + 20 -3.800496e+00 -4.400793e+00 4.447761e-01 3300 1 + 30 -4.183675e+00 -4.383006e+00 5.797939e-01 4700 1 + 40 -4.471246e+00 -4.372586e+00 7.217591e-01 6100 1 + 50 -4.387360e+00 -4.364331e+00 8.710771e-01 7500 1 + 60 -4.955582e+00 -4.359563e+00 1.026326e+00 8900 1 + 70 -4.008356e+00 -4.357128e+00 1.221396e+00 10300 1 + 80 -4.078852e+00 -4.355430e+00 1.464553e+00 12200 1 + 90 -4.083367e+00 -4.353973e+00 1.629871e+00 13600 1 + 100 -4.610569e+00 -4.352379e+00 1.796398e+00 15000 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.982219e+00 +total time (s) : 1.796398e+00 total solves : 15000 best bound : -4.352379e+00 simulation ci : -4.343962e+00 ± 8.572490e-02 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/StochDynamicProgramming.jl_stock/index.html b/dev/examples/StochDynamicProgramming.jl_stock/index.html index dac6023ec..f0d63c9c7 100644 --- a/dev/examples/StochDynamicProgramming.jl_stock/index.html +++ b/dev/examples/StochDynamicProgramming.jl_stock/index.html @@ -53,20 +53,20 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -1.433090e+00 -1.475071e+00 1.324730e-01 1050 1 - 20 -1.332851e+00 -1.471658e+00 2.114601e-01 1600 1 - 30 -1.749788e+00 -1.471489e+00 3.764710e-01 2650 1 - 40 -1.708433e+00 -1.471241e+00 4.645541e-01 3200 1 - 50 -1.556006e+00 -1.471138e+00 7.030370e-01 4250 1 - 60 -1.439099e+00 -1.471079e+00 7.905171e-01 4800 1 - 70 -1.518223e+00 -1.471079e+00 9.709320e-01 5850 1 - 80 -1.638423e+00 -1.471075e+00 1.058508e+00 6400 1 - 85 -1.552892e+00 -1.471075e+00 1.103363e+00 6675 1 + 10 -1.433090e+00 -1.475071e+00 6.740212e-02 1050 1 + 20 -1.332851e+00 -1.471658e+00 1.067061e-01 1600 1 + 30 -1.749788e+00 -1.471489e+00 1.950490e-01 2650 1 + 40 -1.708433e+00 -1.471241e+00 2.398951e-01 3200 1 + 50 -1.556006e+00 -1.471138e+00 3.334711e-01 4250 1 + 60 -1.439099e+00 -1.471079e+00 3.828590e-01 4800 1 + 70 -1.518223e+00 -1.471079e+00 5.200610e-01 5850 1 + 80 -1.638423e+00 -1.471075e+00 5.707490e-01 6400 1 + 85 -1.552892e+00 -1.471075e+00 5.964820e-01 6675 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.103363e+00 +total time (s) : 5.964820e-01 total solves : 6675 best bound : -1.471075e+00 simulation ci : -1.456520e+00 ± 4.264305e-02 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html b/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html index e5c20d5c3..7f4a9adee 100644 --- a/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html +++ b/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html @@ -81,16 +81,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.455904e+05 3.147347e+05 1.321006e-02 54 1 - 20 3.336455e+05 3.402383e+05 2.419806e-02 104 1 - 30 3.337559e+05 3.403155e+05 3.792405e-02 158 1 - 40 3.337559e+05 3.403155e+05 5.038786e-02 208 1 - 48 3.337559e+05 3.403155e+05 6.127095e-02 248 1 + 10 3.455904e+05 3.147347e+05 8.907795e-03 54 1 + 20 3.336455e+05 3.402383e+05 1.595378e-02 104 1 + 30 3.337559e+05 3.403155e+05 2.475882e-02 158 1 + 40 3.337559e+05 3.403155e+05 3.307700e-02 208 1 + 48 3.337559e+05 3.403155e+05 4.082084e-02 248 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.127095e-02 +total time (s) : 4.082084e-02 total solves : 248 best bound : 3.403155e+05 simulation ci : 1.298476e+08 ± 1.785863e+08 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html b/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html index b23b2d533..8fc3d822c 100644 --- a/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html +++ b/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html @@ -77,16 +77,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.403329e+05 3.509666e+05 2.340293e-02 92 1 - 20 4.055335e+05 4.054833e+05 4.399490e-02 172 1 - 30 3.959476e+05 4.067125e+05 6.681895e-02 264 1 - 40 3.959476e+05 4.067125e+05 9.112597e-02 344 1 - 47 4.497721e+05 4.067125e+05 1.094100e-01 400 1 + 10 4.403329e+05 3.509666e+05 1.446986e-02 92 1 + 20 4.055335e+05 4.054833e+05 2.680182e-02 172 1 + 30 3.959476e+05 4.067125e+05 4.148293e-02 264 1 + 40 3.959476e+05 4.067125e+05 5.748487e-02 344 1 + 47 4.497721e+05 4.067125e+05 6.961894e-02 400 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.094100e-01 +total time (s) : 6.961894e-02 total solves : 400 best bound : 4.067125e+05 simulation ci : 2.870354e+07 ± 3.809366e+07 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/agriculture_mccardle_farm/index.html b/dev/examples/agriculture_mccardle_farm/index.html index 6508635c3..e407d5ca5 100644 --- a/dev/examples/agriculture_mccardle_farm/index.html +++ b/dev/examples/agriculture_mccardle_farm/index.html @@ -120,4 +120,4 @@ @test SDDP.calculate_bound(model) ≈ 4074.1391 atol = 1e-5 end -test_mccardle_farm_model()
Test Passed

This page was generated using Literate.jl.

+test_mccardle_farm_model()
Test Passed

This page was generated using Literate.jl.

diff --git a/dev/examples/air_conditioning/index.html b/dev/examples/air_conditioning/index.html index 1755d6aeb..17d3fe819 100644 --- a/dev/examples/air_conditioning/index.html +++ b/dev/examples/air_conditioning/index.html @@ -62,11 +62,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 3.000000e+04 6.166667e+04 2.236040e-01 8 1 - 40L 5.500000e+04 6.250000e+04 6.454720e-01 344 1 + 1L 3.000000e+04 6.166667e+04 1.674521e-01 8 1 + 40L 5.500000e+04 6.250000e+04 4.686260e-01 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.454720e-01 +total time (s) : 4.686260e-01 total solves : 344 best bound : 6.250000e+04 simulation ci : 5.948750e+04 ± 5.671019e+03 @@ -99,13 +99,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.100000e+05 6.250000e+04 5.578041e-03 8 1 - 20 5.500000e+04 6.250000e+04 8.753991e-02 172 1 + 1 1.100000e+05 6.250000e+04 4.128933e-03 8 1 + 20 5.500000e+04 6.250000e+04 6.006908e-02 172 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 8.753991e-02 +total time (s) : 6.006908e-02 total solves : 172 best bound : 6.250000e+04 simulation ci : 6.250000e+04 ± 8.865729e+03 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/air_conditioning_forward/index.html b/dev/examples/air_conditioning_forward/index.html index 19d088a37..f1227f56a 100644 --- a/dev/examples/air_conditioning_forward/index.html +++ b/dev/examples/air_conditioning_forward/index.html @@ -33,4 +33,4 @@ iteration_limit = 10, ) Test.@test isapprox(SDDP.calculate_bound(non_convex), 62_500.0, atol = 0.1) -Test.@test isapprox(SDDP.calculate_bound(convex), 62_500.0, atol = 0.1)
Test Passed

This page was generated using Literate.jl.

+Test.@test isapprox(SDDP.calculate_bound(convex), 62_500.0, atol = 0.1)
Test Passed

This page was generated using Literate.jl.

diff --git a/dev/examples/all_blacks/index.html b/dev/examples/all_blacks/index.html index 15132fa03..ba8d592f4 100644 --- a/dev/examples/all_blacks/index.html +++ b/dev/examples/all_blacks/index.html @@ -57,13 +57,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 9.000000e+00 6.429219e-02 6 1 - 20L 9.000000e+00 9.000000e+00 1.416581e-01 123 1 + 1L 6.000000e+00 9.000000e+00 5.192113e-02 6 1 + 20L 9.000000e+00 9.000000e+00 1.030860e-01 123 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.416581e-01 +total time (s) : 1.030860e-01 total solves : 123 best bound : 9.000000e+00 simulation ci : 8.850000e+00 ± 2.940000e-01 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/asset_management_simple/index.html b/dev/examples/asset_management_simple/index.html index 43a4755fc..4d422624c 100644 --- a/dev/examples/asset_management_simple/index.html +++ b/dev/examples/asset_management_simple/index.html @@ -70,20 +70,20 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 0.000000e+00 7.223982e-02 2.024889e-02 87 1 - 10 -1.109375e+01 2.605769e-01 3.028989e-02 142 1 - 15 0.000000e+00 1.417586e+00 4.188991e-02 197 1 - 20 1.080025e-12 1.514085e+00 5.375195e-02 252 1 - 25 4.864000e+01 1.514085e+00 1.593828e-01 339 1 - 30 4.864000e+01 1.514085e+00 1.729090e-01 394 1 - 35 -4.263256e-14 1.514085e+00 1.867809e-01 449 1 - 40 -8.870299e+00 1.514085e+00 2.012670e-01 504 1 - 44 4.864000e+01 1.514085e+00 2.164669e-01 548 1 + 5 0.000000e+00 7.223982e-02 1.305914e-02 87 1 + 10 -1.109375e+01 2.605769e-01 1.986408e-02 142 1 + 15 0.000000e+00 1.417586e+00 2.725315e-02 197 1 + 20 1.080025e-12 1.514085e+00 3.567910e-02 252 1 + 25 4.864000e+01 1.514085e+00 1.238091e-01 339 1 + 30 4.864000e+01 1.514085e+00 1.336060e-01 394 1 + 35 -4.263256e-14 1.514085e+00 1.434832e-01 449 1 + 40 -8.870299e+00 1.514085e+00 1.544340e-01 504 1 + 44 4.864000e+01 1.514085e+00 1.637661e-01 548 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.164669e-01 +total time (s) : 1.637661e-01 total solves : 548 best bound : 1.514085e+00 simulation ci : 3.658316e+00 ± 6.622759e+00 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/asset_management_stagewise/index.html b/dev/examples/asset_management_stagewise/index.html index b00f86afa..64e8b23f7 100644 --- a/dev/examples/asset_management_stagewise/index.html +++ b/dev/examples/asset_management_stagewise/index.html @@ -87,14 +87,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.735160e+01 1.397033e+00 2.560239e-01 278 1 - 20 1.102316e+01 1.288927e+00 2.896209e-01 428 1 - 30 -5.003795e+01 1.278410e+00 3.469839e-01 706 1 - 40 -1.787775e+01 1.278410e+00 3.896420e-01 856 1 - 46 1.111084e+01 1.278410e+00 4.167969e-01 946 1 + 10 2.735160e+01 1.397033e+00 2.061160e-01 278 1 + 20 1.102316e+01 1.288927e+00 2.298601e-01 428 1 + 30 -5.003795e+01 1.278410e+00 2.969160e-01 706 1 + 40 -1.787775e+01 1.278410e+00 3.245740e-01 856 1 + 46 1.111084e+01 1.278410e+00 3.417661e-01 946 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.167969e-01 +total time (s) : 3.417661e-01 total solves : 946 best bound : 1.278410e+00 simulation ci : 1.179501e+00 ± 6.843349e+00 @@ -126,15 +126,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 1.111084e+01 1.278410e+00 6.419611e-02 278 1 - 20 8.362212e+00 1.278410e+00 1.165061e-01 428 1 - 30 8.859026e+00 1.278410e+00 1.973350e-01 706 1 - 40 8.288929e+00 1.278410e+00 3.121021e-01 856 1 + 10 1.111084e+01 1.278410e+00 4.049778e-02 278 1 + 20 8.362212e+00 1.278410e+00 7.501578e-02 428 1 + 30 8.859026e+00 1.278410e+00 1.288450e-01 706 1 + 40 8.288929e+00 1.278410e+00 1.809900e-01 856 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.121021e-01 +total time (s) : 1.809900e-01 total solves : 856 best bound : 1.278410e+00 simulation ci : 3.734142e+00 ± 6.085569e+00 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/belief/index.html b/dev/examples/belief/index.html index f1f477388..3d04258ef 100644 --- a/dev/examples/belief/index.html +++ b/dev/examples/belief/index.html @@ -89,21 +89,21 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.345457e+01 1.065473e+01 1.731448e+00 2924 1 - 20 9.845555e+00 1.230410e+01 1.970252e+00 3576 1 - 30 2.844000e+01 1.501681e+01 2.322809e+00 4300 1 - 40 1.135141e+01 1.631382e+01 2.610750e+00 4944 1 - 50 3.598122e+01 1.763877e+01 3.661174e+00 8140 1 - 60 1.052329e+01 1.772819e+01 4.158501e+00 9072 1 - 70 6.571294e+01 1.922883e+01 4.867149e+00 10372 1 - 80 1.575331e+01 1.954102e+01 5.215514e+00 10984 1 - 90 4.004597e+01 1.969810e+01 5.655994e+00 11684 1 - 100 1.325329e+01 1.999494e+01 6.338120e+00 12672 1 + 10 2.345457e+01 1.065473e+01 1.224722e+00 2924 1 + 20 9.845555e+00 1.230410e+01 1.373981e+00 3576 1 + 30 2.844000e+01 1.501681e+01 1.566975e+00 4300 1 + 40 1.135141e+01 1.631382e+01 1.786906e+00 4944 1 + 50 3.598122e+01 1.763877e+01 2.433231e+00 8140 1 + 60 1.052329e+01 1.772819e+01 2.785973e+00 9072 1 + 70 6.571294e+01 1.922883e+01 3.289745e+00 10372 1 + 80 1.575331e+01 1.954102e+01 3.553972e+00 10984 1 + 90 4.004597e+01 1.969810e+01 3.883928e+00 11684 1 + 100 1.325329e+01 1.999494e+01 4.414739e+00 12672 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 6.338120e+00 +total time (s) : 4.414739e+00 total solves : 12672 best bound : 1.999494e+01 simulation ci : 2.304322e+01 ± 5.358983e+00 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/biobjective_hydro/index.html b/dev/examples/biobjective_hydro/index.html index fabef6a6a..bb423ae84 100644 --- a/dev/examples/biobjective_hydro/index.html +++ b/dev/examples/biobjective_hydro/index.html @@ -76,11 +76,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 0.000000e+00 1.277995e-02 36 1 - 10 0.000000e+00 0.000000e+00 9.096503e-02 660 1 + 1 0.000000e+00 0.000000e+00 9.142876e-03 36 1 + 10 0.000000e+00 0.000000e+00 7.928395e-02 660 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 9.096503e-02 +total time (s) : 7.928395e-02 total solves : 660 best bound : 0.000000e+00 simulation ci : 0.000000e+00 ± 0.000000e+00 @@ -109,11 +109,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 8.650000e+02 5.500000e+02 5.427122e-03 707 1 - 10 2.950000e+02 5.736664e+02 1.132510e-01 1331 1 + 1 8.650000e+02 5.500000e+02 2.943039e-03 707 1 + 10 2.950000e+02 5.736664e+02 5.830884e-02 1331 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.132510e-01 +total time (s) : 5.830884e-02 total solves : 1331 best bound : 5.736664e+02 simulation ci : 5.385000e+02 ± 1.500112e+02 @@ -142,11 +142,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 5.675000e+02 3.346337e+02 5.506992e-03 1378 1 - 10 4.370755e+02 3.468727e+02 1.294410e-01 2002 1 + 1 5.675000e+02 3.346337e+02 3.026962e-03 1378 1 + 10 4.370755e+02 3.468727e+02 6.502700e-02 2002 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.294410e-01 +total time (s) : 6.502700e-02 total solves : 2002 best bound : 3.468727e+02 simulation ci : 3.604575e+02 ± 7.003395e+01 @@ -175,11 +175,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.250000e+02 2.030682e+02 5.362034e-03 2049 1 - 10 1.487500e+02 2.052545e+02 1.354401e-01 2673 1 + 1 2.250000e+02 2.030682e+02 3.034115e-03 2049 1 + 10 1.487500e+02 2.052545e+02 6.790113e-02 2673 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.354401e-01 +total time (s) : 6.790113e-02 total solves : 2673 best bound : 2.052545e+02 simulation ci : 2.246299e+02 ± 2.511808e+01 @@ -208,11 +208,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 5.658333e+02 4.640036e+02 7.082939e-03 2720 1 - 10 5.225000e+02 4.660290e+02 1.464138e-01 3344 1 + 1 5.658333e+02 4.640036e+02 3.440142e-03 2720 1 + 10 5.225000e+02 4.660290e+02 7.340717e-02 3344 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.464138e-01 +total time (s) : 7.340717e-02 total solves : 3344 best bound : 4.660290e+02 simulation ci : 5.192132e+02 ± 9.666502e+01 @@ -241,11 +241,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.300000e+02 1.129545e+02 5.549908e-03 3391 1 - 10 8.812500e+01 1.129771e+02 1.324220e-01 4015 1 + 1 1.300000e+02 1.129545e+02 3.108978e-03 3391 1 + 10 8.812500e+01 1.129771e+02 6.695819e-02 4015 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.324220e-01 +total time (s) : 6.695819e-02 total solves : 4015 best bound : 1.129771e+02 simulation ci : 1.146875e+02 ± 2.642851e+01 @@ -274,11 +274,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.562500e+02 2.773056e+02 6.232977e-03 4062 1 - 10 3.625000e+02 2.796065e+02 2.071030e-01 4686 1 + 1 1.562500e+02 2.773056e+02 3.480196e-03 4062 1 + 10 3.625000e+02 2.796065e+02 6.888819e-02 4686 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.071030e-01 +total time (s) : 6.888819e-02 total solves : 4686 best bound : 2.796065e+02 simulation ci : 2.556250e+02 ± 6.667722e+01 @@ -307,11 +307,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.775000e+02 4.069295e+02 6.358147e-03 4733 1 - 10 6.445745e+02 4.078859e+02 1.411211e-01 5357 1 + 1 6.775000e+02 4.069295e+02 3.355026e-03 4733 1 + 10 6.445745e+02 4.078859e+02 1.111910e-01 5357 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.411211e-01 +total time (s) : 1.111910e-01 total solves : 5357 best bound : 4.078859e+02 simulation ci : 5.066792e+02 ± 1.075729e+02 @@ -340,11 +340,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.506250e+02 5.205513e+02 7.408857e-03 5404 1 - 10 2.631250e+02 5.213230e+02 1.474149e-01 6028 1 + 1 2.506250e+02 5.205513e+02 4.287958e-03 5404 1 + 10 2.631250e+02 5.213230e+02 8.214092e-02 6028 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.474149e-01 +total time (s) : 8.214092e-02 total solves : 6028 best bound : 5.213230e+02 simulation ci : 3.949359e+02 ± 1.019543e+02 @@ -373,13 +373,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 7.187500e+01 5.937500e+01 5.764961e-03 6075 1 - 10 7.812500e+01 5.938557e+01 1.271439e-01 6699 1 + 1 7.187500e+01 5.937500e+01 3.389835e-03 6075 1 + 10 7.812500e+01 5.938557e+01 6.928802e-02 6699 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.271439e-01 +total time (s) : 6.928802e-02 total solves : 6699 best bound : 5.938557e+01 simulation ci : 6.668502e+01 ± 1.171871e+01 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/booking_management/index.html b/dev/examples/booking_management/index.html index 9b02f090d..c28c91627 100644 --- a/dev/examples/booking_management/index.html +++ b/dev/examples/booking_management/index.html @@ -92,4 +92,4 @@ end end -booking_management(SDDP.ContinuousConicDuality())
Test Passed

New version of HiGHS stalls booking_management(SDDP.LagrangianDuality())


This page was generated using Literate.jl.

+booking_management(SDDP.ContinuousConicDuality())
Test Passed

New version of HiGHS stalls booking_management(SDDP.LagrangianDuality())


This page was generated using Literate.jl.

diff --git a/dev/examples/generation_expansion/index.html b/dev/examples/generation_expansion/index.html index 302075e5c..d03907961 100644 --- a/dev/examples/generation_expansion/index.html +++ b/dev/examples/generation_expansion/index.html @@ -111,13 +111,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.299661e+06 2.074790e+06 1.013655e+00 920 1 - 20 1.049667e+06 2.078257e+06 1.455700e+00 1340 1 - 30 4.986275e+04 2.078257e+06 2.512509e+00 2260 1 - 40 4.965301e+04 2.078257e+06 2.988097e+00 2680 1 + 10 2.299661e+06 2.074790e+06 7.858400e-01 920 1 + 20 1.049667e+06 2.078257e+06 1.112053e+00 1340 1 + 30 4.986275e+04 2.078257e+06 1.931308e+00 2260 1 + 40 4.965301e+04 2.078257e+06 2.296945e+00 2680 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.988097e+00 +total time (s) : 2.296945e+00 total solves : 2680 best bound : 2.078257e+06 simulation ci : 1.628720e+06 ± 4.829316e+05 @@ -151,16 +151,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10L 2.299674e+06 2.079240e+06 1.848254e+00 920 1 - 20L 1.799859e+06 2.079457e+06 3.375839e+00 1340 1 - 30L 3.549779e+06 2.079457e+06 5.618303e+00 2260 1 - 40L 2.998530e+05 2.079457e+06 7.207332e+00 2680 1 - 43L 1.299465e+06 2.079457e+06 7.621044e+00 2806 1 + 10L 2.299674e+06 2.079240e+06 1.467866e+00 920 1 + 20L 1.799859e+06 2.079457e+06 2.664228e+00 1340 1 + 30L 3.549779e+06 2.079457e+06 4.478271e+00 2260 1 + 40L 2.998530e+05 2.079457e+06 5.799426e+00 2680 1 + 43L 1.299465e+06 2.079457e+06 6.154734e+00 2806 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.621044e+00 +total time (s) : 6.154734e+00 total solves : 2806 best bound : 2.079457e+06 simulation ci : 2.036666e+06 ± 5.358445e+05 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/hydro_valley/index.html b/dev/examples/hydro_valley/index.html index e640885fa..b1dd2dc04 100644 --- a/dev/examples/hydro_valley/index.html +++ b/dev/examples/hydro_valley/index.html @@ -274,4 +274,4 @@ ### = $835 end -test_hydro_valley_model()
Test Passed

This page was generated using Literate.jl.

+test_hydro_valley_model()
Test Passed

This page was generated using Literate.jl.

diff --git a/dev/examples/infinite_horizon_hydro_thermal/index.html b/dev/examples/infinite_horizon_hydro_thermal/index.html index a080415be..b70c76c73 100644 --- a/dev/examples/infinite_horizon_hydro_thermal/index.html +++ b/dev/examples/infinite_horizon_hydro_thermal/index.html @@ -88,12 +88,12 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 0.000000e+00 1.189487e+02 1.059837e+00 3552 1 - 200 2.500000e+00 1.191658e+02 1.498708e+00 5858 1 - 287 2.500000e+01 1.191667e+02 1.758297e+00 6763 1 + 100 0.000000e+00 1.189487e+02 7.221689e-01 3552 1 + 200 2.500000e+00 1.191658e+02 9.743721e-01 5858 1 + 287 2.500000e+01 1.191667e+02 1.101237e+00 6763 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.758297e+00 +total time (s) : 1.101237e+00 total solves : 6763 best bound : 1.191667e+02 simulation ci : 2.238676e+01 ± 3.878136e+00 @@ -126,16 +126,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 1.500000e+02 1.191155e+02 4.206419e-01 1912 1 - 200 2.500000e+00 1.191666e+02 8.327479e-01 3828 1 - 207 5.000000e+01 1.191666e+02 8.553789e-01 3901 1 + 100 1.500000e+02 1.191155e+02 2.564540e-01 1912 1 + 200 2.500000e+00 1.191666e+02 5.028541e-01 3828 1 + 207 5.000000e+01 1.191666e+02 5.154240e-01 3901 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 8.553789e-01 +total time (s) : 5.154240e-01 total solves : 3901 best bound : 1.191666e+02 simulation ci : 1.985507e+01 ± 4.295996e+00 numeric issues : 0 ------------------------------------------------------------------- -Confidence_interval = 125.04 ± 13.82

This page was generated using Literate.jl.

+Confidence_interval = 125.04 ± 13.82

This page was generated using Literate.jl.

diff --git a/dev/examples/infinite_horizon_trivial/index.html b/dev/examples/infinite_horizon_trivial/index.html index c943abcc8..07e57f710 100644 --- a/dev/examples/infinite_horizon_trivial/index.html +++ b/dev/examples/infinite_horizon_trivial/index.html @@ -45,15 +45,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 1.000000e+01 1.997380e+01 1.151390e-01 1002 1 - 20 2.800000e+01 2.000000e+01 1.654048e-01 1284 1 - 30 8.000000e+00 2.000000e+01 3.178349e-01 2282 1 - 40 2.000000e+00 2.000000e+01 3.560450e-01 2464 1 + 10 1.000000e+01 1.997380e+01 5.251503e-02 1002 1 + 20 2.800000e+01 2.000000e+01 8.010602e-02 1284 1 + 30 8.000000e+00 2.000000e+01 1.654611e-01 2282 1 + 40 2.000000e+00 2.000000e+01 1.871161e-01 2464 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.560450e-01 +total time (s) : 1.871161e-01 total solves : 2464 best bound : 2.000000e+01 simulation ci : 1.720000e+01 ± 4.196227e+00 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/no_strong_duality/index.html b/dev/examples/no_strong_duality/index.html index 08097a488..d87a8f22e 100644 --- a/dev/examples/no_strong_duality/index.html +++ b/dev/examples/no_strong_duality/index.html @@ -44,13 +44,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 7.000000e+00 1.992188e+00 3.556967e-03 15 1 - 40 1.000000e+00 2.000000e+00 7.783103e-02 612 1 + 1 7.000000e+00 1.992188e+00 2.629042e-03 15 1 + 40 1.000000e+00 2.000000e+00 4.314399e-02 612 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.783103e-02 +total time (s) : 4.314399e-02 total solves : 612 best bound : 2.000000e+00 simulation ci : 2.025000e+00 ± 4.627975e-01 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/objective_state_newsvendor/index.html b/dev/examples/objective_state_newsvendor/index.html index 1010facde..8853da728 100644 --- a/dev/examples/objective_state_newsvendor/index.html +++ b/dev/examples/objective_state_newsvendor/index.html @@ -89,82 +89,118 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.412500e+00 5.484827e+00 3.432472e-01 1650 1 - 20 4.406250e+00 4.472007e+00 5.534871e-01 3000 1 - 30 4.875000e+00 4.446365e+00 7.405791e-01 4350 1 - 40 3.975000e+00 4.440303e+00 9.366941e-01 5700 1 - 50 4.062500e+00 4.438364e+00 1.197326e+00 7350 1 - 60 4.162500e+00 4.096084e+00 1.439869e+00 8700 1 - 70 5.575000e+00 4.089659e+00 1.656491e+00 10050 1 - 80 4.612500e+00 4.089204e+00 1.939506e+00 11700 1 - 90 4.312500e+00 4.088025e+00 2.164094e+00 13050 1 - 100 2.775000e+00 4.086832e+00 2.494948e+00 14700 1 - 110 4.375000e+00 4.086730e+00 2.715922e+00 16050 1 - 120 4.350000e+00 4.086313e+00 2.952486e+00 17400 1 - 130 2.987500e+00 4.086047e+00 3.180989e+00 18750 1 - 140 4.268750e+00 4.085989e+00 3.416775e+00 20100 1 - 150 3.631250e+00 4.085945e+00 3.667883e+00 21450 1 - 160 4.475000e+00 4.085747e+00 3.913039e+00 22800 1 - 170 4.175000e+00 4.085718e+00 4.148523e+00 24150 1 - 180 4.662500e+00 4.085706e+00 4.386923e+00 25500 1 - 190 3.893750e+00 4.085706e+00 4.628960e+00 26850 1 - 200 3.068750e+00 4.085689e+00 4.978849e+00 28500 1 - 210 2.950000e+00 4.085670e+00 5.236434e+00 29850 1 - 220 4.312500e+00 4.085656e+00 5.499782e+00 31200 1 - 230 3.975000e+00 4.085385e+00 5.784419e+00 32550 1 - 240 5.112500e+00 4.085365e+00 6.080950e+00 33900 1 - 250 3.350000e+00 4.085331e+00 6.345343e+00 35250 1 - 260 2.812500e+00 4.085150e+00 6.599374e+00 36600 1 - 270 5.000000e+00 4.085119e+00 6.876791e+00 37950 1 - 280 4.781250e+00 4.085051e+00 7.154335e+00 39300 1 - 290 3.125000e+00 4.084915e+00 7.449913e+00 40650 1 - 300 4.875000e+00 4.084899e+00 7.835534e+00 42300 1 - 310 4.781250e+00 4.084887e+00 8.113704e+00 43650 1 - 320 3.093750e+00 4.084887e+00 8.400054e+00 45000 1 - 330 5.212500e+00 4.084874e+00 8.701667e+00 46350 1 - 340 4.375000e+00 4.084871e+00 8.984442e+00 47700 1 - 350 3.612500e+00 4.084867e+00 9.255274e+00 49050 1 - 360 5.125000e+00 4.084862e+00 9.540174e+00 50400 1 - 370 2.775000e+00 4.084862e+00 9.833343e+00 51750 1 - 380 4.375000e+00 4.084862e+00 1.014810e+01 53100 1 - 390 5.087500e+00 4.084862e+00 1.042538e+01 54450 1 - 400 2.925000e+00 4.084862e+00 1.080535e+01 56100 1 - 410 5.250000e+00 4.084862e+00 1.109867e+01 57450 1 - 420 3.862500e+00 4.084862e+00 1.140318e+01 58800 1 - 430 4.500000e+00 4.084862e+00 1.169591e+01 60150 1 - 440 2.737500e+00 4.084862e+00 1.197902e+01 61500 1 - 450 5.375000e+00 4.084862e+00 1.227756e+01 62850 1 - 460 3.125000e+00 4.084862e+00 1.259802e+01 64200 1 - 470 5.000000e+00 4.084832e+00 1.290254e+01 65550 1 - 480 5.000000e+00 4.084824e+00 1.318937e+01 66900 1 - 490 4.562500e+00 4.084824e+00 1.345930e+01 68250 1 - 500 4.500000e+00 4.084824e+00 1.389648e+01 69900 1 - 510 3.850000e+00 4.084824e+00 1.419217e+01 71250 1 - 520 3.493750e+00 4.084824e+00 1.449167e+01 72600 1 - 530 4.712500e+00 4.084819e+00 1.480202e+01 73950 1 - 540 5.100000e+00 4.084816e+00 1.511662e+01 75300 1 - 550 4.062500e+00 4.084816e+00 1.543988e+01 76650 1 - 560 4.412500e+00 4.084816e+00 1.575876e+01 78000 1 - 570 4.487500e+00 4.084816e+00 1.609040e+01 79350 1 - 580 4.862500e+00 4.084816e+00 1.639804e+01 80700 1 - 590 4.000000e+00 4.084816e+00 1.672969e+01 82050 1 - 600 2.950000e+00 4.084814e+00 1.716352e+01 83700 1 - 610 2.812500e+00 4.084812e+00 1.746830e+01 85050 1 - 620 4.425000e+00 4.084806e+00 1.778480e+01 86400 1 - 630 5.175000e+00 4.084804e+00 1.809781e+01 87750 1 - 640 4.556250e+00 4.084800e+00 1.839636e+01 89100 1 - 650 2.818750e+00 4.084796e+00 1.870496e+01 90450 1 - 660 3.187500e+00 4.084794e+00 1.900543e+01 91800 1 - 670 4.312500e+00 4.084765e+00 1.933784e+01 93150 1 - 680 5.750000e+00 4.084762e+00 1.963982e+01 94500 1 - 690 4.425000e+00 4.084754e+00 1.994470e+01 95850 1 - 692 5.250000e+00 4.084754e+00 2.001155e+01 96120 1 + 10 3.412500e+00 5.484827e+00 2.254689e-01 1650 1 + 20 4.406250e+00 4.472007e+00 3.383160e-01 3000 1 + 30 4.875000e+00 4.446365e+00 4.322689e-01 4350 1 + 40 3.975000e+00 4.440303e+00 5.358810e-01 5700 1 + 50 4.062500e+00 4.438364e+00 6.833451e-01 7350 1 + 60 4.162500e+00 4.096084e+00 7.914360e-01 8700 1 + 70 5.575000e+00 4.089659e+00 9.444220e-01 10050 1 + 80 4.612500e+00 4.089204e+00 1.110530e+00 11700 1 + 90 4.312500e+00 4.088025e+00 1.240632e+00 13050 1 + 100 2.775000e+00 4.086832e+00 1.411161e+00 14700 1 + 110 4.375000e+00 4.086730e+00 1.570415e+00 16050 1 + 120 4.350000e+00 4.086313e+00 1.705659e+00 17400 1 + 130 2.987500e+00 4.086047e+00 1.836985e+00 18750 1 + 140 4.268750e+00 4.085989e+00 1.978179e+00 20100 1 + 150 3.631250e+00 4.085945e+00 2.130887e+00 21450 1 + 160 4.475000e+00 4.085747e+00 2.269605e+00 22800 1 + 170 4.175000e+00 4.085718e+00 2.408035e+00 24150 1 + 180 4.662500e+00 4.085706e+00 2.547780e+00 25500 1 + 190 3.893750e+00 4.085706e+00 2.693811e+00 26850 1 + 200 3.068750e+00 4.085689e+00 2.910456e+00 28500 1 + 210 2.950000e+00 4.085670e+00 3.060071e+00 29850 1 + 220 4.312500e+00 4.085656e+00 3.217498e+00 31200 1 + 230 3.975000e+00 4.085385e+00 3.372842e+00 32550 1 + 240 5.112500e+00 4.085365e+00 3.535348e+00 33900 1 + 250 3.350000e+00 4.085331e+00 3.712220e+00 35250 1 + 260 2.812500e+00 4.085150e+00 3.870849e+00 36600 1 + 270 5.000000e+00 4.085119e+00 4.031426e+00 37950 1 + 280 4.781250e+00 4.085051e+00 4.200393e+00 39300 1 + 290 3.125000e+00 4.084915e+00 4.390983e+00 40650 1 + 300 4.875000e+00 4.084899e+00 4.613061e+00 42300 1 + 310 4.781250e+00 4.084887e+00 4.782663e+00 43650 1 + 320 3.093750e+00 4.084887e+00 4.958242e+00 45000 1 + 330 5.212500e+00 4.084874e+00 5.152423e+00 46350 1 + 340 4.375000e+00 4.084871e+00 5.326854e+00 47700 1 + 350 3.612500e+00 4.084867e+00 5.496131e+00 49050 1 + 360 5.125000e+00 4.084862e+00 5.673648e+00 50400 1 + 370 2.775000e+00 4.084862e+00 5.843172e+00 51750 1 + 380 4.375000e+00 4.084862e+00 6.044040e+00 53100 1 + 390 5.087500e+00 4.084862e+00 6.215716e+00 54450 1 + 400 2.925000e+00 4.084862e+00 6.445403e+00 56100 1 + 410 5.250000e+00 4.084862e+00 6.621290e+00 57450 1 + 420 3.862500e+00 4.084862e+00 6.815795e+00 58800 1 + 430 4.500000e+00 4.084862e+00 6.997721e+00 60150 1 + 440 2.737500e+00 4.084862e+00 7.180048e+00 61500 1 + 450 5.375000e+00 4.084862e+00 7.362429e+00 62850 1 + 460 3.125000e+00 4.084862e+00 7.542587e+00 64200 1 + 470 5.000000e+00 4.084832e+00 7.754080e+00 65550 1 + 480 5.000000e+00 4.084824e+00 7.932283e+00 66900 1 + 490 4.562500e+00 4.084824e+00 8.097970e+00 68250 1 + 500 4.500000e+00 4.084824e+00 8.343433e+00 69900 1 + 510 3.850000e+00 4.084824e+00 8.557755e+00 71250 1 + 520 3.493750e+00 4.084824e+00 8.743363e+00 72600 1 + 530 4.712500e+00 4.084819e+00 8.935032e+00 73950 1 + 540 5.100000e+00 4.084816e+00 9.129475e+00 75300 1 + 550 4.062500e+00 4.084816e+00 9.338104e+00 76650 1 + 560 4.412500e+00 4.084816e+00 9.537376e+00 78000 1 + 570 4.487500e+00 4.084816e+00 9.731502e+00 79350 1 + 580 4.862500e+00 4.084816e+00 9.921464e+00 80700 1 + 590 4.000000e+00 4.084816e+00 1.013443e+01 82050 1 + 600 2.950000e+00 4.084814e+00 1.038858e+01 83700 1 + 610 2.812500e+00 4.084812e+00 1.058024e+01 85050 1 + 620 4.425000e+00 4.084806e+00 1.077855e+01 86400 1 + 630 5.175000e+00 4.084804e+00 1.098634e+01 87750 1 + 640 4.556250e+00 4.084800e+00 1.117329e+01 89100 1 + 650 2.818750e+00 4.084796e+00 1.136360e+01 90450 1 + 660 3.187500e+00 4.084794e+00 1.154789e+01 91800 1 + 670 4.312500e+00 4.084765e+00 1.175926e+01 93150 1 + 680 5.750000e+00 4.084762e+00 1.195140e+01 94500 1 + 690 4.425000e+00 4.084754e+00 1.214363e+01 95850 1 + 700 3.975000e+00 4.084754e+00 1.241234e+01 97500 1 + 710 3.693750e+00 4.084754e+00 1.262139e+01 98850 1 + 720 2.600000e+00 4.084753e+00 1.280806e+01 100200 1 + 730 3.875000e+00 4.084743e+00 1.300440e+01 101550 1 + 740 4.387500e+00 4.084743e+00 1.320735e+01 102900 1 + 750 4.875000e+00 4.084743e+00 1.339323e+01 104250 1 + 760 4.675000e+00 4.084743e+00 1.360433e+01 105600 1 + 770 4.275000e+00 4.084743e+00 1.378093e+01 106950 1 + 780 3.512500e+00 4.084743e+00 1.395776e+01 108300 1 + 790 4.881250e+00 4.084743e+00 1.415336e+01 109650 1 + 800 5.225000e+00 4.084743e+00 1.443056e+01 111300 1 + 810 3.493750e+00 4.084743e+00 1.463764e+01 112650 1 + 820 4.875000e+00 4.084743e+00 1.484532e+01 114000 1 + 830 4.125000e+00 4.084743e+00 1.504573e+01 115350 1 + 840 3.925000e+00 4.084743e+00 1.526631e+01 116700 1 + 850 3.900000e+00 4.084743e+00 1.545965e+01 118050 1 + 860 3.450000e+00 4.084743e+00 1.565771e+01 119400 1 + 870 4.437500e+00 4.084743e+00 1.585214e+01 120750 1 + 880 4.106250e+00 4.084743e+00 1.606597e+01 122100 1 + 890 4.518750e+00 4.084743e+00 1.625164e+01 123450 1 + 900 3.750000e+00 4.084743e+00 1.653199e+01 125100 1 + 910 4.112500e+00 4.084743e+00 1.673681e+01 126450 1 + 920 4.762500e+00 4.084743e+00 1.697430e+01 127800 1 + 930 5.525000e+00 4.084743e+00 1.718263e+01 129150 1 + 940 3.325000e+00 4.084743e+00 1.738831e+01 130500 1 + 950 4.475000e+00 4.084743e+00 1.759303e+01 131850 1 + 960 4.918750e+00 4.084743e+00 1.782661e+01 133200 1 + 970 3.856250e+00 4.084743e+00 1.802650e+01 134550 1 + 980 3.131250e+00 4.084743e+00 1.823838e+01 135900 1 + 990 2.900000e+00 4.084743e+00 1.844719e+01 137250 1 + 1000 4.900000e+00 4.084743e+00 1.875922e+01 138900 1 + 1010 3.437500e+00 4.084743e+00 1.897145e+01 140250 1 + 1020 4.350000e+00 4.084743e+00 1.918633e+01 141600 1 + 1030 4.100000e+00 4.084743e+00 1.941215e+01 142950 1 + 1040 2.437500e+00 4.084743e+00 1.962467e+01 144300 1 + 1050 4.418750e+00 4.084743e+00 1.983910e+01 145650 1 + 1058 3.950000e+00 4.084743e+00 2.000340e+01 146730 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.001155e+01 -total solves : 96120 -best bound : 4.084754e+00 -simulation ci : 4.105699e+00 ± 5.584226e-02 +total time (s) : 2.000340e+01 +total solves : 146730 +best bound : 4.084743e+00 +simulation ci : 4.101235e+00 ± 4.453673e-02 numeric issues : 0 ------------------------------------------------------------------- @@ -194,24 +230,25 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.058125e+00 4.910287e+00 4.178090e-01 1650 1 - 20 4.456250e+00 4.054763e+00 1.031706e+00 3000 1 - 30 3.600000e+00 4.051318e+00 1.745694e+00 4350 1 - 40 4.781250e+00 4.048923e+00 2.988870e+00 6000 1 - 50 5.343750e+00 4.042879e+00 4.143216e+00 7350 1 - 60 3.312500e+00 4.041955e+00 6.177441e+00 9000 1 - 70 4.875000e+00 4.040976e+00 7.572112e+00 10350 1 - 80 3.287500e+00 4.040025e+00 1.005774e+01 12000 1 - 90 3.075000e+00 4.039898e+00 1.198613e+01 13350 1 - 100 5.037500e+00 4.039822e+00 1.512891e+01 15000 1 - 110 3.937500e+00 4.039709e+00 1.750257e+01 16350 1 - 120 4.250000e+00 4.039709e+00 1.995563e+01 17700 1 - 121 5.625000e+00 4.039709e+00 2.020632e+01 17835 1 + 10 4.875000e+00 4.086989e+00 2.782941e-01 1650 1 + 20 5.625000e+00 4.067007e+00 7.786269e-01 3000 1 + 30 4.275000e+00 4.044851e+00 1.437763e+00 4350 1 + 40 6.000000e+00 4.040268e+00 2.244983e+00 5700 1 + 50 5.225000e+00 4.037899e+00 3.653688e+00 7350 1 + 60 4.425000e+00 4.037712e+00 4.793282e+00 8700 1 + 70 4.612500e+00 4.037493e+00 6.681571e+00 10350 1 + 80 4.112500e+00 4.037498e+00 8.147209e+00 11700 1 + 90 4.281250e+00 4.037323e+00 1.045427e+01 13350 1 + 100 3.450000e+00 4.037312e+00 1.215458e+01 14700 1 + 110 3.668750e+00 4.037275e+00 1.410500e+01 16050 1 + 120 3.187500e+00 4.037273e+00 1.613232e+01 17400 1 + 130 4.431250e+00 4.037268e+00 1.833190e+01 18750 1 + 138 3.068750e+00 4.037268e+00 2.010973e+01 19830 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.020632e+01 -total solves : 17835 -best bound : 4.039709e+00 -simulation ci : 4.121193e+00 ± 1.328739e-01 +total time (s) : 2.010973e+01 +total solves : 19830 +best bound : 4.037268e+00 +simulation ci : 4.112689e+00 ± 1.244438e-01 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/sldp_example_one/index.html b/dev/examples/sldp_example_one/index.html index 9503b7a23..f4f70aab3 100644 --- a/dev/examples/sldp_example_one/index.html +++ b/dev/examples/sldp_example_one/index.html @@ -61,20 +61,17 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.750267e+00 1.163701e+00 5.554860e-01 1680 1 - 20 3.644642e+00 1.164225e+00 7.315691e-01 2560 1 - 30 3.064816e+00 1.164649e+00 1.303535e+00 4240 1 - 40 3.096506e+00 1.164760e+00 1.473254e+00 5120 1 - 50 2.765327e+00 1.164760e+00 2.104778e+00 6800 1 - 60 3.179709e+00 1.164760e+00 2.295238e+00 7680 1 - 70 2.897036e+00 1.164760e+00 2.863767e+00 9360 1 - 80 3.339910e+00 1.164760e+00 3.082773e+00 10240 1 - 86 3.218883e+00 1.164760e+00 3.189717e+00 10768 1 + 10 2.836537e+00 1.166329e+00 3.452880e-01 1680 1 + 20 3.351530e+00 1.167299e+00 4.753141e-01 2560 1 + 30 3.895803e+00 1.167410e+00 8.361731e-01 4240 1 + 40 2.911076e+00 1.167410e+00 9.411321e-01 5120 1 + 50 3.906713e+00 1.167410e+00 1.314685e+00 6800 1 + 60 3.431394e+00 1.167410e+00 1.422767e+00 7680 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.189717e+00 -total solves : 10768 -best bound : 1.164760e+00 -simulation ci : 3.201625e+00 ± 7.807606e-02 +total time (s) : 1.422767e+00 +total solves : 7680 +best bound : 1.167410e+00 +simulation ci : 3.208796e+00 ± 1.181263e-01 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/sldp_example_two/index.html b/dev/examples/sldp_example_two/index.html index 31cf010b8..ab9ff7e2b 100644 --- a/dev/examples/sldp_example_two/index.html +++ b/dev/examples/sldp_example_two/index.html @@ -88,16 +88,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.700000e+01 -5.809615e+01 6.410384e-02 78 1 - 20 -9.800000e+01 -5.809615e+01 1.340530e-01 148 1 - 30 -4.000000e+01 -5.809615e+01 2.188058e-01 226 1 - 40 -4.700000e+01 -5.809615e+01 2.932930e-01 296 1 + 10 -4.700000e+01 -5.809615e+01 8.398294e-02 78 1 + 20 -4.000000e+01 -5.809615e+01 1.402409e-01 148 1 + 30 -9.800000e+01 -5.809615e+01 2.061980e-01 226 1 + 40 -4.000000e+01 -5.809615e+01 2.635651e-01 296 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.932930e-01 +total time (s) : 2.635651e-01 total solves : 296 best bound : -5.809615e+01 -simulation ci : -6.286250e+01 ± 8.312391e+00 +simulation ci : -5.206250e+01 ± 6.856606e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -129,16 +129,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.700000e+01 -6.196125e+01 7.824302e-02 138 1 - 20 -9.800000e+01 -6.196125e+01 1.504829e-01 258 1 - 30 -4.000000e+01 -6.196125e+01 2.505190e-01 396 1 - 40 -4.700000e+01 -6.196125e+01 3.234499e-01 516 1 + 10 -6.300000e+01 -6.196125e+01 5.639505e-02 138 1 + 20 -4.000000e+01 -6.196125e+01 1.121299e-01 258 1 + 30 -4.000000e+01 -6.196125e+01 1.874909e-01 396 1 + 40 -4.700000e+01 -6.196125e+01 2.442739e-01 516 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.234499e-01 +total time (s) : 2.442739e-01 total solves : 516 best bound : -6.196125e+01 -simulation ci : -5.676250e+01 ± 6.342977e+00 +simulation ci : -5.711250e+01 ± 5.414443e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -170,15 +170,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.000000e+01 -6.546793e+01 1.386349e-01 462 1 - 20 -8.200000e+01 -6.546793e+01 2.457199e-01 852 1 - 30 -4.700000e+01 -6.546793e+01 4.443669e-01 1314 1 - 40 -4.000000e+01 -6.546793e+01 5.555439e-01 1704 1 + 10 -5.600000e+01 -6.546793e+01 9.024096e-02 462 1 + 20 -5.600000e+01 -6.546793e+01 1.641209e-01 852 1 + 30 -7.000000e+01 -6.546793e+01 3.035190e-01 1314 1 + 40 -8.200000e+01 -6.546793e+01 3.777130e-01 1704 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.555439e-01 +total time (s) : 3.777130e-01 total solves : 1704 best bound : -6.546793e+01 -simulation ci : -5.901250e+01 ± 5.394280e+00 +simulation ci : -6.286250e+01 ± 4.631696e+00 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/stochastic_all_blacks/index.html b/dev/examples/stochastic_all_blacks/index.html index 7beb1a086..dcac1fb21 100644 --- a/dev/examples/stochastic_all_blacks/index.html +++ b/dev/examples/stochastic_all_blacks/index.html @@ -73,13 +73,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 1.414815e+01 7.273006e-02 11 1 - 40L 6.000000e+00 8.000000e+00 6.659911e-01 602 1 + 1L 1.200000e+01 1.414815e+01 6.028605e-02 11 1 + 40L 6.000000e+00 8.000000e+00 5.141962e-01 602 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.659911e-01 +total time (s) : 5.141962e-01 total solves : 602 best bound : 8.000000e+00 -simulation ci : 7.875000e+00 ± 9.341687e-01 +simulation ci : 8.625000e+00 ± 8.978763e-01 numeric issues : 0 --------------------------------------------------------------------

This page was generated using Literate.jl.

+-------------------------------------------------------------------

This page was generated using Literate.jl.

diff --git a/dev/examples/the_farmers_problem/index.html b/dev/examples/the_farmers_problem/index.html index b59ae0c50..2743beb0b 100644 --- a/dev/examples/the_farmers_problem/index.html +++ b/dev/examples/the_farmers_problem/index.html @@ -121,13 +121,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 -9.800000e+04 4.922260e+05 1.489201e-01 6 1 - 20 1.093500e+05 1.083900e+05 1.828170e-01 126 1 + 1 -9.800000e+04 4.922260e+05 1.193850e-01 6 1 + 20 1.093500e+05 1.083900e+05 1.346772e-01 126 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.828170e-01 +total time (s) : 1.346772e-01 total solves : 126 best bound : 1.083900e+05 -simulation ci : 5.235835e+04 ± 2.689531e+04 +simulation ci : 9.548810e+04 ± 3.405034e+04 numeric issues : 0 --------------------------------------------------------------------

Checking the policy

Birge and Louveaux report that the optimal objective value is $108,390. Check that we got the correct solution using SDDP.calculate_bound:

@assert isapprox(SDDP.calculate_bound(model), 108_390.0, atol = 0.1)

This page was generated using Literate.jl.

+-------------------------------------------------------------------

Checking the policy

Birge and Louveaux report that the optimal objective value is $108,390. Check that we got the correct solution using SDDP.calculate_bound:

@assert isapprox(SDDP.calculate_bound(model), 108_390.0, atol = 0.1)

This page was generated using Literate.jl.

diff --git a/dev/examples/vehicle_location/index.html b/dev/examples/vehicle_location/index.html index f241b0922..0ceb41604 100644 --- a/dev/examples/vehicle_location/index.html +++ b/dev/examples/vehicle_location/index.html @@ -104,4 +104,4 @@ end # TODO(odow): find out why this fails -# vehicle_location_model(SDDP.ContinuousConicDuality())
vehicle_location_model (generic function with 1 method)

This page was generated using Literate.jl.

+# vehicle_location_model(SDDP.ContinuousConicDuality())
vehicle_location_model (generic function with 1 method)

This page was generated using Literate.jl.

diff --git a/dev/explanation/risk/index.html b/dev/explanation/risk/index.html index 946faf54b..7b08ebb40 100644 --- a/dev/explanation/risk/index.html +++ b/dev/explanation/risk/index.html @@ -519,7 +519,7 @@ | | | Adding cut : 100 volume_out + cost_to_go ≥ 29998.59466753869 | Finished iteration | | lower_bound = 14998.594667538693 -Upper bound = 10674.3276220125 ± 877.7527375844818

Finally, evaluate the decision rule:

evaluate_policy(
+Upper bound = 10149.565554802874 ± 814.5843881517737

Finally, evaluate the decision rule:

evaluate_policy(
     model;
     node = 1,
     incoming_state = Dict(:volume => 150.0),
@@ -532,4 +532,4 @@
   :volume_in          => 150.0
   :thermal_generation => 125.0
   :hydro_generation   => 25.0
-  :cost_to_go         => 9998.59
Info

For this trivial example, the risk-averse policy isn't very different from the policy obtained using the expectation risk-measure. If you try it on some bigger/more interesting problems, you should see the expected cost increase, and the upper tail of the policy decrease.


This page was generated using Literate.jl.

+ :cost_to_go => 9998.59
Info

For this trivial example, the risk-averse policy isn't very different from the policy obtained using the expectation risk-measure. If you try it on some bigger/more interesting problems, you should see the expected cost increase, and the upper tail of the policy decrease.


This page was generated using Literate.jl.

diff --git a/dev/explanation/theory_intro.ipynb b/dev/explanation/theory_intro.ipynb index 0ea2104ad..da27f88a7 100644 --- a/dev/explanation/theory_intro.ipynb +++ b/dev/explanation/theory_intro.ipynb @@ -329,19 +329,32 @@ "$V_i(x, \\omega)$ can be formulated as a linear program (this also works for\n", "convex programs, but the math is more involved), then we can make some\n", "progress by noticing that $x$ only appears as a right-hand side term of the\n", - "fishing constraint $\\bar{x} = x$." + "fishing constraint $\\bar{x} = x$. Therefore, $V_i(x, \\cdot)$ is convex with\n", + "respect to $x$ for fixed $\\omega$. (If you have not seen this result before,\n", + "try to prove it.)" ], "metadata": {} }, { "cell_type": "markdown", "source": [ - "Therefore, $V_i(x, \\cdot)$ is convex with respect to $x$ for fixed $\\omega$.\n", - "Moreover, if we implement the constraint $\\bar{x} = x$ by setting the lower-\n", - "and upper bounds of $\\bar{x}$ to $x$, then the reduced cost of the decision\n", - "variable $\\bar{x}$ is a subgradient of the function $V_i$ with respect to $x$!\n", - "(This is the algorithmic simplification that leads us to add $\\bar{x}$ and the\n", - "fishing constraint $\\bar{x} = x$.)" + "The fishing constraint $\\bar{x} = x$ has an associated dual variable. The\n", + "economic interpretation of this dual variable is that it represents the change\n", + "in the objective function if the right-hand side $x$ is increased on the scale\n", + "of one unit. In other words, and with a slight abuse of notation, it is the\n", + "value $\\frac{d}{dx} V_i(x, \\omega)$. (Because $V_i$ is not differentiable, it\n", + "is a [subgradient](https://en.wikipedia.org/wiki/Subderivative) instead of a\n", + "derivative.)" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "If we implement the constraint $\\bar{x} = x$ by setting the lower- and upper\n", + "bounds of $\\bar{x}$ to $x$, then the [reduced cost](https://en.wikipedia.org/wiki/Reduced_cost)\n", + "of the decision variable $\\bar{x}$ is the subgradient, and we do not need to\n", + "explicitly add the fishing constraint as a row to the constraint matrix." ], "metadata": {} }, diff --git a/dev/explanation/theory_intro.jl b/dev/explanation/theory_intro.jl index e04797801..dbe3cab85 100644 --- a/dev/explanation/theory_intro.jl +++ b/dev/explanation/theory_intro.jl @@ -188,14 +188,22 @@ end # $V_i(x, \omega)$ can be formulated as a linear program (this also works for # convex programs, but the math is more involved), then we can make some # progress by noticing that $x$ only appears as a right-hand side term of the -# fishing constraint $\bar{x} = x$. - -# Therefore, $V_i(x, \cdot)$ is convex with respect to $x$ for fixed $\omega$. -# Moreover, if we implement the constraint $\bar{x} = x$ by setting the lower- -# and upper bounds of $\bar{x}$ to $x$, then the reduced cost of the decision -# variable $\bar{x}$ is a subgradient of the function $V_i$ with respect to $x$! -# (This is the algorithmic simplification that leads us to add $\bar{x}$ and the -# fishing constraint $\bar{x} = x$.) +# fishing constraint $\bar{x} = x$. Therefore, $V_i(x, \cdot)$ is convex with +# respect to $x$ for fixed $\omega$. (If you have not seen this result before, +# try to prove it.) + +# The fishing constraint $\bar{x} = x$ has an associated dual variable. The +# economic interpretation of this dual variable is that it represents the change +# in the objective function if the right-hand side $x$ is increased on the scale +# of one unit. In other words, and with a slight abuse of notation, it is the +# value $\frac{d}{dx} V_i(x, \omega)$. (Because $V_i$ is not differentiable, it +# is a [subgradient](https://en.wikipedia.org/wiki/Subderivative) instead of a +# derivative.) + +# If we implement the constraint $\bar{x} = x$ by setting the lower- and upper +# bounds of $\bar{x}$ to $x$, then the [reduced cost](https://en.wikipedia.org/wiki/Reduced_cost) +# of the decision variable $\bar{x}$ is the subgradient, and we do not need to +# explicitly add the fishing constraint as a row to the constraint matrix. # !!! tip # The subproblem can have binary and integer variables, but you'll need to diff --git a/dev/explanation/theory_intro/index.html b/dev/explanation/theory_intro/index.html index 97d79fc12..0b136aa72 100644 --- a/dev/explanation/theory_intro/index.html +++ b/dev/explanation/theory_intro/index.html @@ -85,7 +85,7 @@ & x^\prime = T_i(\bar{x}, u, \omega) \\ & u \in U_i(\bar{x}, \omega) \\ & \bar{x} = x, -\end{aligned}\]

where our decision rule, $\pi_i(x, \omega)$, solves this optimization problem and returns a $u^*$ corresponding to an optimal solution. Moreover, we alluded to the fact that the cost-to-go term (the nasty recursive expectation) makes this problem intractable to solve.

However, if, excluding the cost-to-go term (i.e., the SP formulation), $V_i(x, \omega)$ can be formulated as a linear program (this also works for convex programs, but the math is more involved), then we can make some progress by noticing that $x$ only appears as a right-hand side term of the fishing constraint $\bar{x} = x$.

Therefore, $V_i(x, \cdot)$ is convex with respect to $x$ for fixed $\omega$. Moreover, if we implement the constraint $\bar{x} = x$ by setting the lower- and upper bounds of $\bar{x}$ to $x$, then the reduced cost of the decision variable $\bar{x}$ is a subgradient of the function $V_i$ with respect to $x$! (This is the algorithmic simplification that leads us to add $\bar{x}$ and the fishing constraint $\bar{x} = x$.)

Tip

The subproblem can have binary and integer variables, but you'll need to use Lagrangian duality to compute a subgradient!

Stochastic dual dynamic programming converts this problem into a tractable form by applying Kelley's cutting plane algorithm to the $V_j$ functions in the cost-to-go term:

\[\begin{aligned} +\end{aligned}\]

where our decision rule, $\pi_i(x, \omega)$, solves this optimization problem and returns a $u^*$ corresponding to an optimal solution. Moreover, we alluded to the fact that the cost-to-go term (the nasty recursive expectation) makes this problem intractable to solve.

However, if, excluding the cost-to-go term (i.e., the SP formulation), $V_i(x, \omega)$ can be formulated as a linear program (this also works for convex programs, but the math is more involved), then we can make some progress by noticing that $x$ only appears as a right-hand side term of the fishing constraint $\bar{x} = x$. Therefore, $V_i(x, \cdot)$ is convex with respect to $x$ for fixed $\omega$. (If you have not seen this result before, try to prove it.)

The fishing constraint $\bar{x} = x$ has an associated dual variable. The economic interpretation of this dual variable is that it represents the change in the objective function if the right-hand side $x$ is increased on the scale of one unit. In other words, and with a slight abuse of notation, it is the value $\frac{d}{dx} V_i(x, \omega)$. (Because $V_i$ is not differentiable, it is a subgradient instead of a derivative.)

If we implement the constraint $\bar{x} = x$ by setting the lower- and upper bounds of $\bar{x}$ to $x$, then the reduced cost of the decision variable $\bar{x}$ is the subgradient, and we do not need to explicitly add the fishing constraint as a row to the constraint matrix.

Tip

The subproblem can have binary and integer variables, but you'll need to use Lagrangian duality to compute a subgradient!

Stochastic dual dynamic programming converts this problem into a tractable form by applying Kelley's cutting plane algorithm to the $V_j$ functions in the cost-to-go term:

\[\begin{aligned} V_i^K(x, \omega) = \min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \theta\\ & x^\prime = T_i(\bar{x}, u, \omega) \\ & u \in U_i(\bar{x}, \omega) \\ @@ -196,9 +196,9 @@ return error("We should never get here because P should sum to 1.0.") end

sample_uncertainty (generic function with 1 method)
Note

rand() samples a uniform random variable in [0, 1).

For example:

for i in 1:3
     println("ω = ", sample_uncertainty(model.nodes[1].uncertainty))
-end
ω = 0.0
-ω = 0.0
-ω = 50.0

It's also going to be useful to define a function that generates a random walk through the nodes of the graph:

function sample_next_node(model::PolicyGraph, current::Int)
+end
ω = 50.0
+ω = 50.0
+ω = 100.0

It's also going to be useful to define a function that generates a random walk through the nodes of the graph:

function sample_next_node(model::PolicyGraph, current::Int)
     if length(model.arcs[current]) == 0
         # No outgoing arcs!
         return nothing
@@ -363,20 +363,20 @@
 end
train (generic function with 1 method)

Using our model we defined earlier, we can go:

train(model; iteration_limit = 3, replications = 100)
Starting iteration 1
 | Forward Pass
 | | Visiting node 1
-| | | ω = 100.0
+| | | ω = 0.0
 | | | x = Dict(:volume => 200.0)
 | | | x′ = Dict(:volume => 0.0)
 | | | C(x, u, ω) = 0.0
 | | Visiting node 2
-| | | ω = 50.0
+| | | ω = 100.0
 | | | x = Dict(:volume => 0.0)
 | | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 10000.0
+| | | C(x, u, ω) = 5000.0
 | | Visiting node 3
-| | | ω = 50.0
+| | | ω = 100.0
 | | | x = Dict(:volume => 0.0)
 | | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 15000.0
+| | | C(x, u, ω) = 7500.0
 | Backward pass
 | | Visiting node 3
 | | | Skipping node because the cost-to-go is 0
@@ -412,10 +412,10 @@
 | | | x′ = Dict(:volume => 150.0)
 | | | C(x, u, ω) = 0.0
 | | Visiting node 2
-| | | ω = 50.0
+| | | ω = 0.0
 | | | x = Dict(:volume => 150.0)
 | | | x′ = Dict(:volume => 100.0)
-| | | C(x, u, ω) = 5000.0
+| | | C(x, u, ω) = 10000.0
 | | Visiting node 3
 | | | ω = 100.0
 | | | x = Dict(:volume => 100.0)
@@ -451,34 +451,34 @@
 Starting iteration 3
 | Forward Pass
 | | Visiting node 1
-| | | ω = 100.0
+| | | ω = 50.0
 | | | x = Dict(:volume => 200.0)
 | | | x′ = Dict(:volume => 200.0)
-| | | C(x, u, ω) = 2499.9999999999986
+| | | C(x, u, ω) = 4999.999999999998
 | | Visiting node 2
-| | | ω = 0.0
+| | | ω = 100.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 124.99999999999997)
-| | | C(x, u, ω) = 7499.999999999998
+| | | x′ = Dict(:volume => 150.0)
+| | | C(x, u, ω) = 0.0
 | | Visiting node 3
-| | | ω = 50.0
-| | | x = Dict(:volume => 124.99999999999997)
-| | | x′ = Dict(:volume => 24.99999999999997)
+| | | ω = 100.0
+| | | x = Dict(:volume => 150.0)
+| | | x′ = Dict(:volume => 100.0)
 | | | C(x, u, ω) = 0.0
 | Backward pass
 | | Visiting node 3
 | | | Skipping node because the cost-to-go is 0
 | | Visiting node 2
 | | | Solving φ = 0.0
-| | | | V = 3750.000000000004
-| | | | dVdx′ = Dict(:volume => -150.0)
+| | | | V = 0.0
+| | | | dVdx′ = Dict(:volume => 0.0)
 | | | Solving φ = 50.0
 | | | | V = 0.0
 | | | | dVdx′ = Dict(:volume => 0.0)
 | | | Solving φ = 100.0
 | | | | V = 0.0
 | | | | dVdx′ = Dict(:volume => 0.0)
-| | | Adding cut : 50 volume_out + cost_to_go ≥ 7500
+| | | Adding cut : cost_to_go ≥ 0
 | | Visiting node 1
 | | | Solving φ = 0.0
 | | | | V = 7500.0
@@ -488,12 +488,12 @@
 | | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 100.0
 | | | | V = 0.0
-| | | | dVdx′ = Dict(:volume => -50.0)
-| | | Adding cut : 83.33333333333331 volume_out + cost_to_go ≥ 19999.999999999996
+| | | | dVdx′ = Dict(:volume => 0.0)
+| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 16666.666666666664
 | Finished iteration
 | | lower_bound = 8333.333333333332
 Termination status: iteration limit
-Upper bound = 8250.0 ± 961.2602208149531

Success! We trained a policy for a finite horizon multistage stochastic program using stochastic dual dynamic programming.

Implementation: evaluating the policy

A final step is the ability to evaluate the policy at a given point.

function evaluate_policy(
+Upper bound = 10112.5 ± 836.5408256805538

Success! We trained a policy for a finite horizon multistage stochastic program using stochastic dual dynamic programming.

Implementation: evaluating the policy

A final step is the ability to evaluate the policy at a given point.

function evaluate_policy(
     model::PolicyGraph;
     node::Int,
     incoming_state::Dict{Symbol,Float64},
@@ -547,10 +547,10 @@
 | | | x′ = Dict(:volume => 0.0)
 | | | C(x, u, ω) = 5000.0
 | | Visiting node 3
-| | | ω = 50.0
+| | | ω = 0.0
 | | | x = Dict(:volume => 0.0)
 | | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 15000.0
+| | | C(x, u, ω) = 22500.0
 | | Visiting node 2
 | | | ω = 100.0
 | | | x = Dict(:volume => 0.0)
@@ -571,6 +571,16 @@
 | | | x = Dict(:volume => 0.0)
 | | | x′ = Dict(:volume => 0.0)
 | | | C(x, u, ω) = 22500.0
+| | Visiting node 2
+| | | ω = 0.0
+| | | x = Dict(:volume => 0.0)
+| | | x′ = Dict(:volume => 0.0)
+| | | C(x, u, ω) = 15000.0
+| | Visiting node 3
+| | | ω = 0.0
+| | | x = Dict(:volume => 0.0)
+| | | x′ = Dict(:volume => 0.0)
+| | | C(x, u, ω) = 22500.0
 | Backward pass
 | | Visiting node 3
 | | | Solving φ = 0.0
@@ -638,67 +648,6 @@
 | | | | V = 25625.0
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Adding cut : 150 volume_out + cost_to_go ≥ 33125
-| | Visiting node 1
-| | | Solving φ = 0.0
-| | | | V = 40625.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 50.0
-| | | | V = 33125.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 100.0
-| | | | V = 25625.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 150 volume_out + cost_to_go ≥ 40625
-| Finished iteration
-| | lower_bound = 15625.0
-Starting iteration 2
-| Forward Pass
-| | Visiting node 1
-| | | ω = 50.0
-| | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 200.0)
-| | | C(x, u, ω) = 5000.0
-| | Visiting node 2
-| | | ω = 50.0
-| | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 200.0)
-| | | C(x, u, ω) = 10000.0
-| | Visiting node 3
-| | | ω = 50.0
-| | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 100.0)
-| | | C(x, u, ω) = 0.0
-| | Visiting node 2
-| | | ω = 50.0
-| | | x = Dict(:volume => 100.0)
-| | | x′ = Dict(:volume => 150.0)
-| | | C(x, u, ω) = 15000.0
-| | Visiting node 3
-| | | ω = 50.0
-| | | x = Dict(:volume => 150.0)
-| | | x′ = Dict(:volume => 50.0)
-| | | C(x, u, ω) = 0.0
-| | Visiting node 2
-| | | ω = 0.0
-| | | x = Dict(:volume => 50.0)
-| | | x′ = Dict(:volume => 50.0)
-| | | C(x, u, ω) = 15000.0
-| | Visiting node 3
-| | | ω = 50.0
-| | | x = Dict(:volume => 50.0)
-| | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 7500.0
-| | Visiting node 2
-| | | ω = 0.0
-| | | x = Dict(:volume => 0.0)
-| | | x′ = Dict(:volume => -0.0)
-| | | C(x, u, ω) = 15000.0
-| | Visiting node 3
-| | | ω = 50.0
-| | | x = Dict(:volume => -0.0)
-| | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 15000.0
-| Backward pass
 | | Visiting node 3
 | | | Solving φ = 0.0
 | | | | V = 48125.0
@@ -721,140 +670,159 @@
 | | | | V = 27812.5
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Adding cut : 150 volume_out + cost_to_go ≥ 35312.5
-| | Visiting node 3
+| | Visiting node 1
 | | | Solving φ = 0.0
-| | | | V = 50312.5
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 50.0
 | | | | V = 42812.5
 | | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 100.0
-| | | | V = 35312.5
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 75 volume_out + cost_to_go ≥ 21406.25
-| | Visiting node 2
-| | | Solving φ = 0.0
-| | | | V = 36406.25
-| | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 28906.25
+| | | | V = 35312.5
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 100.0
-| | | | V = 21406.25
+| | | | V = 27812.5
 | | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 150 volume_out + cost_to_go ≥ 36406.25
+| | | Adding cut : 150 volume_out + cost_to_go ≥ 42812.5
+| Finished iteration
+| | lower_bound = 17812.5
+Starting iteration 2
+| Forward Pass
+| | Visiting node 1
+| | | ω = 50.0
+| | | x = Dict(:volume => 200.0)
+| | | x′ = Dict(:volume => 200.0)
+| | | C(x, u, ω) = 4999.999999999998
+| | Visiting node 2
+| | | ω = 50.0
+| | | x = Dict(:volume => 200.0)
+| | | x′ = Dict(:volume => 200.0)
+| | | C(x, u, ω) = 10000.0
+| | Visiting node 3
+| | | ω = 100.0
+| | | x = Dict(:volume => 200.0)
+| | | x′ = Dict(:volume => 150.0)
+| | | C(x, u, ω) = 0.0
+| Backward pass
 | | Visiting node 3
 | | | Solving φ = 0.0
-| | | | V = 43906.25
+| | | | V = 27812.5
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 36406.25
+| | | | V = 20312.5
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 100.0
-| | | | V = 28906.25
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 75 volume_out + cost_to_go ≥ 21953.124999999996
-| | Visiting node 2
-| | | Solving φ = 0.0
-| | | | V = 21953.124999999996
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 50.0
-| | | | V = 18203.124999999996
-| | | | dVdx′ = Dict(:volume => -75.0)
-| | | Solving φ = 100.0
-| | | | V = 14453.124999999996
-| | | | dVdx′ = Dict(:volume => -75.0)
-| | | Adding cut : 100 volume_out + cost_to_go ≥ 33203.125
-| | Visiting node 3
-| | | Solving φ = 0.0
-| | | | V = 38203.125
-| | | | dVdx′ = Dict(:volume => -100.0)
-| | | Solving φ = 50.0
-| | | | V = 33203.125
-| | | | dVdx′ = Dict(:volume => -100.0)
-| | | Solving φ = 100.0
-| | | | V = 28203.125
+| | | | V = 15312.5
 | | | | dVdx′ = Dict(:volume => -100.0)
-| | | Adding cut : 49.99999999999999 volume_out + cost_to_go ≥ 21601.5625
+| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 20572.916666666664
 | | Visiting node 2
 | | | Solving φ = 0.0
-| | | | V = 19101.5625
-| | | | dVdx′ = Dict(:volume => -49.99999999999999)
+| | | | V = 17239.583333333332
+| | | | dVdx′ = Dict(:volume => -66.66666666666666)
 | | | Solving φ = 50.0
-| | | | V = 16601.5625
-| | | | dVdx′ = Dict(:volume => -49.99999999999999)
+| | | | V = 13906.25
+| | | | dVdx′ = Dict(:volume => -66.66666666666666)
 | | | Solving φ = 100.0
-| | | | V = 14101.5625
-| | | | dVdx′ = Dict(:volume => -49.99999999999999)
-| | | Adding cut : 49.99999999999999 volume_out + cost_to_go ≥ 26601.5625
+| | | | V = 10572.916666666664
+| | | | dVdx′ = Dict(:volume => -66.66666666666666)
+| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 27239.58333333333
 | | Visiting node 1
 | | | Solving φ = 0.0
-| | | | V = 28203.125
+| | | | V = 25468.749999999996
 | | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 50.0
-| | | | V = 23203.125
-| | | | dVdx′ = Dict(:volume => -100.0)
+| | | | V = 20572.916666666664
+| | | | dVdx′ = Dict(:volume => -66.66666666666666)
 | | | Solving φ = 100.0
-| | | | V = 19101.562499999996
-| | | | dVdx′ = Dict(:volume => -49.999999999999986)
-| | | Adding cut : 83.33333333333331 volume_out + cost_to_go ≥ 40169.27083333333
+| | | | V = 17239.583333333332
+| | | | dVdx′ = Dict(:volume => -66.66666666666666)
+| | | Adding cut : 77.77777777777776 volume_out + cost_to_go ≥ 36649.30555555555
 | Finished iteration
-| | lower_bound = 28502.604166666664
+| | lower_bound = 26093.749999999996
 Starting iteration 3
 | Forward Pass
 | | Visiting node 1
-| | | ω = 50.0
+| | | ω = 100.0
 | | | x = Dict(:volume => 200.0)
 | | | x′ = Dict(:volume => 200.0)
-| | | C(x, u, ω) = 5000.0
+| | | C(x, u, ω) = 2500.0
 | | Visiting node 2
-| | | ω = 0.0
+| | | ω = 50.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 132.03125)
-| | | C(x, u, ω) = 8203.125
+| | | x′ = Dict(:volume => 100.00000000000001)
+| | | C(x, u, ω) = 0.0
 | | Visiting node 3
 | | | ω = 50.0
-| | | x = Dict(:volume => 132.03125)
-| | | x′ = Dict(:volume => 32.03125)
+| | | x = Dict(:volume => 100.00000000000001)
+| | | x′ = Dict(:volume => -0.0)
+| | | C(x, u, ω) = 0.0
+| | Visiting node 2
+| | | ω = 100.0
+| | | x = Dict(:volume => -0.0)
+| | | x′ = Dict(:volume => 96.87500000000004)
+| | | C(x, u, ω) = 14687.500000000007
+| | Visiting node 3
+| | | ω = 100.0
+| | | x = Dict(:volume => 96.87500000000004)
+| | | x′ = Dict(:volume => 46.87500000000006)
 | | | C(x, u, ω) = 0.0
 | Backward pass
 | | Visiting node 3
 | | | Solving φ = 0.0
-| | | | V = 46601.5625
-| | | | dVdx′ = Dict(:volume => -150.00000000000003)
+| | | | V = 43281.249999999985
+| | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 40000.0
-| | | | dVdx′ = Dict(:volume => -100.0)
+| | | | V = 35781.24999999999
+| | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 100.0
-| | | | V = 35000.0
+| | | | V = 30781.249999999993
 | | | | dVdx′ = Dict(:volume => -100.0)
-| | | Adding cut : 58.333333333333336 volume_out + cost_to_go ≥ 22135.416666666668
+| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 21432.291666666664
 | | Visiting node 2
 | | | Solving φ = 0.0
-| | | | V = 24830.729166666668
+| | | | V = 29401.041666666657
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 20266.927083333336
-| | | | dVdx′ = Dict(:volume => -58.333333333333336)
+| | | | V = 21901.041666666657
+| | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 100.0
-| | | | V = 17500.0
-| | | | dVdx′ = Dict(:volume => -49.99999999999999)
-| | | Adding cut : 86.11111111111111 volume_out + cost_to_go ≥ 32235.243055555555
+| | | | V = 18307.29166666666
+| | | | dVdx′ = Dict(:volume => -66.66666666666666)
+| | | Adding cut : 122.22222222222221 volume_out + cost_to_go ≥ 35043.402777777774
+| | Visiting node 3
+| | | Solving φ = 0.0
+| | | | V = 50312.499999999985
+| | | | dVdx′ = Dict(:volume => -149.99999999999997)
+| | | Solving φ = 50.0
+| | | | V = 43932.29166666667
+| | | | dVdx′ = Dict(:volume => -122.22222222222219)
+| | | Solving φ = 100.0
+| | | | V = 37821.18055555556
+| | | | dVdx′ = Dict(:volume => -122.22222222222219)
+| | | Adding cut : 65.74074074074072 volume_out + cost_to_go ≥ 22010.99537037037
+| | Visiting node 2
+| | | Solving φ = 0.0
+| | | | V = 29510.995370370365
+| | | | dVdx′ = Dict(:volume => -150.0)
+| | | Solving φ = 50.0
+| | | | V = 22010.99537037037
+| | | | dVdx′ = Dict(:volume => -150.0)
+| | | Solving φ = 100.0
+| | | | V = 18723.958333333332
+| | | | dVdx′ = Dict(:volume => -65.74074074074072)
+| | | Adding cut : 121.91358024691357 volume_out + cost_to_go ≥ 35606.674382716046
 | | Visiting node 1
 | | | Solving φ = 0.0
-| | | | V = 28203.125
+| | | | V = 27287.884078212286
 | | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 50.0
-| | | | V = 23624.131944444467
-| | | | dVdx′ = Dict(:volume => -86.11111111111111)
+| | | | V = 22287.88407821229
+| | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 100.0
-| | | | V = 19318.576388888905
-| | | | dVdx′ = Dict(:volume => -86.11111111111111)
-| | | Adding cut : 90.74074074074073 volume_out + cost_to_go ≥ 41863.425925925934
+| | | | V = 17287.884078212286
+| | | | dVdx′ = Dict(:volume => -100.0)
+| | | Adding cut : 99.99999999999999 volume_out + cost_to_go ≥ 42287.884078212286
 | Finished iteration
-| | lower_bound = 28715.27777777779
+| | lower_bound = 27287.884078212286
 Termination status: iteration limit
-Upper bound = 33530.46875000001 ± 6749.804392785007

Success! We trained a policy for an infinite horizon multistage stochastic program using stochastic dual dynamic programming. Note how some of the forward passes are different lengths!

evaluate_policy(
+Upper bound = 30909.04329608938 ± 6597.858161502896

Success! We trained a policy for an infinite horizon multistage stochastic program using stochastic dual dynamic programming. Note how some of the forward passes are different lengths!

evaluate_policy(
     model;
     node = 3,
     incoming_state = Dict(:volume => 100.0),
@@ -867,4 +835,4 @@
   :volume_in          => 100.0
   :thermal_generation => 40.0
   :hydro_generation   => 110.0
-  :cost_to_go         => 22135.4

This page was generated using Literate.jl.

+ :cost_to_go => 22011.0

This page was generated using Literate.jl.

diff --git a/dev/guides/access_previous_variables/index.html b/dev/guides/access_previous_variables/index.html index c2a73c85c..5339192be 100644 --- a/dev/guides/access_previous_variables/index.html +++ b/dev/guides/access_previous_variables/index.html @@ -59,4 +59,4 @@ # Maximize quantity of sold items. @stageobjective(sp, sell) endA policy graph with 10 nodes. - Node indices: 1, ..., 10
Warning

You must initialize the same number of state variables in every stage, even if they are not used in that stage.

+ Node indices: 1, ..., 10
Warning

You must initialize the same number of state variables in every stage, even if they are not used in that stage.

diff --git a/dev/guides/add_a_multidimensional_state_variable/index.html b/dev/guides/add_a_multidimensional_state_variable/index.html index 56dad89a5..fdd4c9093 100644 --- a/dev/guides/add_a_multidimensional_state_variable/index.html +++ b/dev/guides/add_a_multidimensional_state_variable/index.html @@ -15,4 +15,4 @@ end; Lower bound of outgoing x is: 0.0 Lower bound of outgoing y[1] is: 1.0 -Lower bound of outgoing z[3, :B] is: 3.0 +Lower bound of outgoing z[3, :B] is: 3.0 diff --git a/dev/guides/add_a_risk_measure/index.html b/dev/guides/add_a_risk_measure/index.html index 50723da33..3385f01a7 100644 --- a/dev/guides/add_a_risk_measure/index.html +++ b/dev/guides/add_a_risk_measure/index.html @@ -36,7 +36,7 @@ 0.0 0.0 0.0 - 0.0

Expectation

SDDP.ExpectationType
Expectation()

The Expectation risk measure. Identical to taking the expectation with respect to the nominal distribution.

source
julia> using SDDP
julia> SDDP.adjust_probability( + 0.0

Expectation

SDDP.ExpectationType
Expectation()

The Expectation risk measure. Identical to taking the expectation with respect to the nominal distribution.

source
julia> using SDDP
julia> SDDP.adjust_probability( SDDP.Expectation(), risk_adjusted_probability, nominal_probability, @@ -47,7 +47,7 @@ 0.1 0.2 0.3 - 0.4

SDDP.Expectation is the default risk measure in SDDP.jl.

Worst-case

SDDP.WorstCaseType
WorstCase()

The worst-case risk measure. Places all of the probability weight on the worst outcome.

source
julia> SDDP.adjust_probability(
+ 0.4

SDDP.Expectation is the default risk measure in SDDP.jl.

Worst-case

SDDP.WorstCaseType
WorstCase()

The worst-case risk measure. Places all of the probability weight on the worst outcome.

source
julia> SDDP.adjust_probability(
            SDDP.WorstCase(),
            risk_adjusted_probability,
            nominal_probability,
@@ -58,7 +58,7 @@
  0.0
  0.0
  1.0
- 0.0

Average value at risk (AV@R)

SDDP.AVaRType
AVaR(β)

The average value at risk (AV@R) risk measure.

Computes the expectation of the β fraction of worst outcomes. β must be in [0, 1]. When β=1, this is equivalent to the Expectation risk measure. When β=0, this is equivalent to the WorstCase risk measure.

AV@R is also known as the conditional value at risk (CV@R) or expected shortfall.

source
julia> SDDP.adjust_probability(
+ 0.0

Average value at risk (AV@R)

SDDP.AVaRType
AVaR(β)

The average value at risk (AV@R) risk measure.

Computes the expectation of the β fraction of worst outcomes. β must be in [0, 1]. When β=1, this is equivalent to the Expectation risk measure. When β=0, this is equivalent to the WorstCase risk measure.

AV@R is also known as the conditional value at risk (CV@R) or expected shortfall.

source
julia> SDDP.adjust_probability(
            SDDP.AVaR(0.5),
            risk_adjusted_probability,
            nominal_probability,
@@ -80,10 +80,10 @@
  0.05
  0.1
  0.65
- 0.2

As a special case, the SDDP.EAVaR risk-measure is a convex combination of SDDP.Expectation and SDDP.AVaR:

julia> SDDP.EAVaR(beta=0.25, lambda=0.4)A convex combination of 0.4 * SDDP.Expectation() + 0.6 * SDDP.AVaR(0.25)
SDDP.EAVaRFunction
EAVaR(;lambda=1.0, beta=1.0)

A risk measure that is a convex combination of Expectation and Average Value @ Risk (also called Conditional Value @ Risk).

    λ * E[x] + (1 - λ) * AV@R(β)[x]

Keyword Arguments

  • lambda: Convex weight on the expectation ((1-lambda) weight is put on the AV@R component. Inreasing values of lambda are less risk averse (more weight on expectation).

  • beta: The quantile at which to calculate the Average Value @ Risk. Increasing values of beta are less risk averse. If beta=0, then the AV@R component is the worst case risk measure.

source

Distributionally robust

SDDP.jl supports two types of distributionally robust risk measures: the modified Χ² method of Philpott et al. (2018), and a method based on the Wasserstein distance metric.

Modified Chi-squard

SDDP.ModifiedChiSquaredType
ModifiedChiSquared(radius::Float64; minimum_std=1e-5)

The distributionally robust SDDP risk measure of Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP. Computational Management Science (2018) 165:431-454.

Explanation

In a Distributionally Robust Optimization (DRO) approach, we modify the probabilities we associate with all future scenarios so that the resulting probability distribution is the "worst case" probability distribution, in some sense.

In each backward pass we will compute a worst case probability distribution vector p. We compute p so that:

p ∈ argmax p'z
+ 0.2

As a special case, the SDDP.EAVaR risk-measure is a convex combination of SDDP.Expectation and SDDP.AVaR:

julia> SDDP.EAVaR(beta=0.25, lambda=0.4)A convex combination of 0.4 * SDDP.Expectation() + 0.6 * SDDP.AVaR(0.25)
SDDP.EAVaRFunction
EAVaR(;lambda=1.0, beta=1.0)

A risk measure that is a convex combination of Expectation and Average Value @ Risk (also called Conditional Value @ Risk).

    λ * E[x] + (1 - λ) * AV@R(β)[x]

Keyword Arguments

  • lambda: Convex weight on the expectation ((1-lambda) weight is put on the AV@R component. Inreasing values of lambda are less risk averse (more weight on expectation).

  • beta: The quantile at which to calculate the Average Value @ Risk. Increasing values of beta are less risk averse. If beta=0, then the AV@R component is the worst case risk measure.

source

Distributionally robust

SDDP.jl supports two types of distributionally robust risk measures: the modified Χ² method of Philpott et al. (2018), and a method based on the Wasserstein distance metric.

Modified Chi-squard

SDDP.ModifiedChiSquaredType
ModifiedChiSquared(radius::Float64; minimum_std=1e-5)

The distributionally robust SDDP risk measure of Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP. Computational Management Science (2018) 165:431-454.

Explanation

In a Distributionally Robust Optimization (DRO) approach, we modify the probabilities we associate with all future scenarios so that the resulting probability distribution is the "worst case" probability distribution, in some sense.

In each backward pass we will compute a worst case probability distribution vector p. We compute p so that:

p ∈ argmax p'z
       s.t. [r; p - a] in SecondOrderCone()
            sum(p) == 1
-           p >= 0

where

  1. z is a vector of future costs. We assume that our aim is to minimize future cost p'z. If we maximize reward, we would have p ∈ argmin{p'z}.
  2. a is the uniform distribution
  3. r is a user specified radius - the larger the radius, the more conservative the policy.

Notes

The largest radius that will work with S scenarios is sqrt((S-1)/S).

If the uncorrected standard deviation of the objecive realizations is less than minimum_std, then the risk-measure will default to Expectation().

This code was contributed by Lea Kapelevich.

source
julia> SDDP.adjust_probability(
+           p >= 0

where

  1. z is a vector of future costs. We assume that our aim is to minimize future cost p'z. If we maximize reward, we would have p ∈ argmin{p'z}.
  2. a is the uniform distribution
  3. r is a user specified radius - the larger the radius, the more conservative the policy.

Notes

The largest radius that will work with S scenarios is sqrt((S-1)/S).

If the uncorrected standard deviation of the objecive realizations is less than minimum_std, then the risk-measure will default to Expectation().

This code was contributed by Lea Kapelevich.

source
julia> SDDP.adjust_probability(
            SDDP.ModifiedChiSquared(0.5),
            risk_adjusted_probability,
            [0.25, 0.25, 0.25, 0.25],
@@ -94,7 +94,7 @@
  0.3333333333333333
  0.044658198738520394
  0.6220084679281462
- 0.0

Wasserstein

SDDP.WassersteinType
Wasserstein(norm::Function, solver_factory; alpha::Float64)

A distributionally-robust risk measure based on the Wasserstein distance.

As alpha increases, the measure becomes more risk-averse. When alpha=0, the measure is equivalent to the expectation operator. As alpha increases, the measure approaches the Worst-case risk measure.

source
julia> import HiGHS
julia> SDDP.adjust_probability( + 0.0

Wasserstein

SDDP.WassersteinType
Wasserstein(norm::Function, solver_factory; alpha::Float64)

A distributionally-robust risk measure based on the Wasserstein distance.

As alpha increases, the measure becomes more risk-averse. When alpha=0, the measure is equivalent to the expectation operator. As alpha increases, the measure approaches the Worst-case risk measure.

source
julia> import HiGHS
julia> SDDP.adjust_probability( SDDP.Wasserstein(HiGHS.Optimizer; alpha=0.5) do x, y return abs(x - y) end, @@ -109,7 +109,7 @@ 0.7999999999999999 -0.0

Entropic

SDDP.EntropicType
Entropic(γ::Float64)

The entropic risk measure as described by:

Dowson, O., Morton, D.P. & Pagnoncelli, B.K. Incorporating convex risk
 measures into multistage stochastic programming algorithms. Annals of
-Operations Research (2022). [doi](https://doi.org/10.1007/s10479-022-04977-w).

As γ increases, the measure becomes more risk-averse.

source
julia> SDDP.adjust_probability(
+Operations Research (2022). [doi](https://doi.org/10.1007/s10479-022-04977-w).

As γ increases, the measure becomes more risk-averse.

source
julia> SDDP.adjust_probability(
            SDDP.Entropic(0.1),
            risk_adjusted_probability,
            nominal_probability,
@@ -120,4 +120,4 @@
  0.1100296362588547
  0.19911786395979578
  0.3648046623591841
- 0.3260478374221655
+ 0.3260478374221655 diff --git a/dev/guides/add_integrality/index.html b/dev/guides/add_integrality/index.html index f8bf9a54f..238efb794 100644 --- a/dev/guides/add_integrality/index.html +++ b/dev/guides/add_integrality/index.html @@ -21,4 +21,4 @@ \max\limits_{\lambda}\min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)] - \lambda^\top(\bar{x} - x)\\ & x^\prime = T_i(\bar{x}, u, \omega) \\ & u \in U_i(\bar{x}, \omega) -\end{aligned}\]

You can use Lagrangian duality in SDDP.jl by passing SDDP.LagrangianDuality to the duality_handler argument of SDDP.train.

Compared with linear programming duality, the Lagrangian problem is difficult to solve because it requires the solution of many mixed-integer programs instead of a single linear program. This is one reason why "SDDiP" has poor performance.

Convergence

The second part to SDDiP is a very tightly scoped claim: if all of the state variables are binary and the algorithm uses Lagrangian duality to compute a subgradient, then it will converge to an optimal policy.

In many cases, papers claim to "do SDDiP," but they have state variables which are not binary. In these cases, the algorithm is not guaranteed to converge to a globally optimal policy.

One work-around that has been suggested is to discretize the state variables into a set of binary state variables. However, this leads to a large number of binary state variables, which is another reason why "SDDiP" has poor performance.

In general, we recommend that you introduce integer variables into your model without fear of the consequences, and that you treat the resulting policy as a good heuristic, rather than an attempt to find a globally optimal policy.

+\end{aligned}\]

You can use Lagrangian duality in SDDP.jl by passing SDDP.LagrangianDuality to the duality_handler argument of SDDP.train.

Compared with linear programming duality, the Lagrangian problem is difficult to solve because it requires the solution of many mixed-integer programs instead of a single linear program. This is one reason why "SDDiP" has poor performance.

Convergence

The second part to SDDiP is a very tightly scoped claim: if all of the state variables are binary and the algorithm uses Lagrangian duality to compute a subgradient, then it will converge to an optimal policy.

In many cases, papers claim to "do SDDiP," but they have state variables which are not binary. In these cases, the algorithm is not guaranteed to converge to a globally optimal policy.

One work-around that has been suggested is to discretize the state variables into a set of binary state variables. However, this leads to a large number of binary state variables, which is another reason why "SDDiP" has poor performance.

In general, we recommend that you introduce integer variables into your model without fear of the consequences, and that you treat the resulting policy as a good heuristic, rather than an attempt to find a globally optimal policy.

diff --git a/dev/guides/add_multidimensional_noise/index.html b/dev/guides/add_multidimensional_noise/index.html index 94a088ec9..44c407e24 100644 --- a/dev/guides/add_multidimensional_noise/index.html +++ b/dev/guides/add_multidimensional_noise/index.html @@ -77,4 +77,4 @@ julia> SDDP.simulate(model, 1); ω is: [54, 38, 19] ω is: [43, 3, 13] -ω is: [43, 4, 17] +ω is: [43, 4, 17] diff --git a/dev/guides/add_noise_in_the_constraint_matrix/index.html b/dev/guides/add_noise_in_the_constraint_matrix/index.html index aca6ec66c..884f1d9ce 100644 --- a/dev/guides/add_noise_in_the_constraint_matrix/index.html +++ b/dev/guides/add_noise_in_the_constraint_matrix/index.html @@ -16,4 +16,4 @@ julia> SDDP.simulate(model, 1); emissions : x_out <= 1 emissions : 0.2 x_out <= 1 -emissions : 0.5 x_out <= 1
Note

JuMP will normalize constraints by moving all variables to the left-hand side. Thus, @constraint(model, 0 <= 1 - x.out) becomes x.out <= 1. JuMP.set_normalized_coefficient sets the coefficient on the normalized constraint.

+emissions : 0.5 x_out <= 1
Note

JuMP will normalize constraints by moving all variables to the left-hand side. Thus, @constraint(model, 0 <= 1 - x.out) becomes x.out <= 1. JuMP.set_normalized_coefficient sets the coefficient on the normalized constraint.

diff --git a/dev/guides/choose_a_stopping_rule/index.html b/dev/guides/choose_a_stopping_rule/index.html index 3a773eee0..378f785c6 100644 --- a/dev/guides/choose_a_stopping_rule/index.html +++ b/dev/guides/choose_a_stopping_rule/index.html @@ -1,2 +1,2 @@ -Choose a stopping rule · SDDP.jl

Choose a stopping rule

The theory of SDDP tells us that the algorithm converges to an optimal policy almost surely in a finite number of iterations. In practice, this number is very large. Therefore, we need some way of pre-emptively terminating SDDP when the solution is “good enough.” We call heuristics for pre-emptively terminating SDDP stopping rules.

Basic limits

The training of an SDDP policy can be terminated after a fixed number of iterations using the iteration_limit keyword.

SDDP.train(model, iteration_limit = 10)

The training of an SDDP policy can be terminated after a fixed number of seconds using the time_limit keyword.

SDDP.train(model, time_limit = 2.0)

Stopping rules

In addition to the limits provided as keyword arguments, a variety of other stopping rules are available. These can be passed to SDDP.train as a vector to the stopping_rules keyword. For example:

SDDP.train(model, stopping_rules = [SDDP.BoundStalling(10, 1e-4)])

See Stopping rules for a list of stopping rules supported by SDDP.jl.

+Choose a stopping rule · SDDP.jl

Choose a stopping rule

The theory of SDDP tells us that the algorithm converges to an optimal policy almost surely in a finite number of iterations. In practice, this number is very large. Therefore, we need some way of pre-emptively terminating SDDP when the solution is “good enough.” We call heuristics for pre-emptively terminating SDDP stopping rules.

Basic limits

The training of an SDDP policy can be terminated after a fixed number of iterations using the iteration_limit keyword.

SDDP.train(model, iteration_limit = 10)

The training of an SDDP policy can be terminated after a fixed number of seconds using the time_limit keyword.

SDDP.train(model, time_limit = 2.0)

Stopping rules

In addition to the limits provided as keyword arguments, a variety of other stopping rules are available. These can be passed to SDDP.train as a vector to the stopping_rules keyword. For example:

SDDP.train(model, stopping_rules = [SDDP.BoundStalling(10, 1e-4)])

See Stopping rules for a list of stopping rules supported by SDDP.jl.

diff --git a/dev/guides/create_a_belief_state/index.html b/dev/guides/create_a_belief_state/index.html index f84908b3b..ebb5ea99c 100644 --- a/dev/guides/create_a_belief_state/index.html +++ b/dev/guides/create_a_belief_state/index.html @@ -30,4 +30,4 @@ (1, 2) => (2, 2) w.p. 0.2 Partitions {(1, 1), (1, 2)} - {(2, 1), (2, 2)} + {(2, 1), (2, 2)} diff --git a/dev/guides/create_a_general_policy_graph/index.html b/dev/guides/create_a_general_policy_graph/index.html index 230e3d14d..14c641730 100644 --- a/dev/guides/create_a_general_policy_graph/index.html +++ b/dev/guides/create_a_general_policy_graph/index.html @@ -106,4 +106,4 @@ @variable(subproblem, x >= 0, SDDP.State, initial_value = 1) @constraint(subproblem, x.out <= x.in) @stageobjective(subproblem, price * x.out) -end +end diff --git a/dev/guides/debug_a_model/index.html b/dev/guides/debug_a_model/index.html index 04870952e..4c4131f06 100644 --- a/dev/guides/debug_a_model/index.html +++ b/dev/guides/debug_a_model/index.html @@ -63,4 +63,4 @@ julia> optimize!(det_equiv) julia> objective_value(det_equiv) --5.472500000000001
Warning

The deterministic equivalent scales poorly with problem size. Only use this on small problems!

+-5.472500000000001
Warning

The deterministic equivalent scales poorly with problem size. Only use this on small problems!

diff --git a/dev/guides/improve_computational_performance/index.html b/dev/guides/improve_computational_performance/index.html index f3420de04..f2e4f7f6f 100644 --- a/dev/guides/improve_computational_performance/index.html +++ b/dev/guides/improve_computational_performance/index.html @@ -41,4 +41,4 @@ env = Gurobi.Env() set_optimizer(m, () -> Gurobi.Optimizer(env)) end, -) +) diff --git a/dev/guides/simulate_using_a_different_sampling_scheme/index.html b/dev/guides/simulate_using_a_different_sampling_scheme/index.html index 0be311a44..b1b76a159 100644 --- a/dev/guides/simulate_using_a_different_sampling_scheme/index.html +++ b/dev/guides/simulate_using_a_different_sampling_scheme/index.html @@ -166,4 +166,4 @@ ], [0.3, 0.7], ) -A Historical sampler with 2 scenarios sampled probabilistically.
Tip

Your sample space doesn't have to be a NamedTuple. It an be any Julia type! Use a Vector if that is easier, or define your own struct.

+A Historical sampler with 2 scenarios sampled probabilistically.
Tip

Your sample space doesn't have to be a NamedTuple. It an be any Julia type! Use a Vector if that is easier, or define your own struct.

diff --git a/dev/index.html b/dev/index.html index 63f6c14de..99d83c7e0 100644 --- a/dev/index.html +++ b/dev/index.html @@ -43,4 +43,4 @@ journal = {Annals of Operations Research}, author = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.}, year = {2022}, -}

Here is an earlier preprint.

+}

Here is an earlier preprint.

diff --git a/dev/release_notes/index.html b/dev/release_notes/index.html index bc79b50c6..5a857b6b9 100644 --- a/dev/release_notes/index.html +++ b/dev/release_notes/index.html @@ -1,2 +1,2 @@ -Release notes · SDDP.jl

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

Other

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

v0.3.7 (January 8, 2021)

Other

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

+Release notes · SDDP.jl

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

Other

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

v0.3.7 (January 8, 2021)

Other

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

diff --git a/dev/search/index.html b/dev/search/index.html index 0d5d68f89..d50a35432 100644 --- a/dev/search/index.html +++ b/dev/search/index.html @@ -1,2 +1,2 @@ -Search · SDDP.jl

Loading search...

    +Search · SDDP.jl

    Loading search...

      diff --git a/dev/search_index.js b/dev/search_index.js index bae3004fc..f1c5c16f0 100644 --- a/dev/search_index.js +++ b/dev/search_index.js @@ -1,3 +1,3 @@ var documenterSearchIndex = {"docs": -[{"location":"guides/create_a_general_policy_graph/#Create-a-general-policy-graph","page":"Create a general policy graph","title":"Create a general policy graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.jl uses the concept of a policy graph to formulate multistage stochastic programming problems. For more details, read An introduction to SDDP.jl or the paper Dowson, O., (2020). The policy graph decomposition of multistage stochastic optimization problems. Networks, 76(1), 3-23. doi.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-[SDDP.Graph](@ref)","page":"Create a general policy graph","title":"Creating a SDDP.Graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/#Linear-graphs","page":"Create a general policy graph","title":"Linear graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Linear policy graphs can be created using the SDDP.LinearGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.LinearGraph(3)\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"We can add nodes to a graph using SDDP.add_node and edges using SDDP.add_edge.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.add_node(graph, 4)\n\njulia> SDDP.add_edge(graph, 3 => 4, 1.0)\n\njulia> SDDP.add_edge(graph, 4 => 1, 0.9)\n\njulia> graph\nRoot\n 0\nNodes\n 1\n 2\n 3\n 4\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n 3 => 4 w.p. 1.0\n 4 => 1 w.p. 0.9","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Look! We just made a cyclic graph! SDDP.jl can solve infinite horizon problems. The probability on the arc that completes a cycle should be interpreted as a discount factor.","category":"page"},{"location":"guides/create_a_general_policy_graph/#guide_unicyclic_policy_graph","page":"Create a general policy graph","title":"Unicyclic policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Linear policy graphs with a single infinite-horizon cycle can be created using the SDDP.UnicyclicGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.UnicyclicGraph(0.95; num_nodes = 2)\nRoot\n 0\nNodes\n 1\n 2\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 1 w.p. 0.95","category":"page"},{"location":"guides/create_a_general_policy_graph/#guide_markovian_policy_graph","page":"Create a general policy graph","title":"Markovian policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Markovian policy graphs can be created using the SDDP.MarkovianGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.MarkovianGraph(Matrix{Float64}[[1.0]', [0.4 0.6]])\nRoot\n (0, 1)\nNodes\n (1, 1)\n (2, 1)\n (2, 2)\nArcs\n (0, 1) => (1, 1) w.p. 1.0\n (1, 1) => (2, 1) w.p. 0.4\n (1, 1) => (2, 2) w.p. 0.6","category":"page"},{"location":"guides/create_a_general_policy_graph/#General-graphs","page":"Create a general policy graph","title":"General graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Arbitrarily complicated graphs can be constructed using SDDP.Graph, SDDP.add_node and SDDP.add_edge. For example","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.Graph(:root_node)\nRoot\n root_node\nNodes\n {}\nArcs\n {}\n\njulia> SDDP.add_node(graph, :decision_node)\n\njulia> SDDP.add_edge(graph, :root_node => :decision_node, 1.0)\n\njulia> SDDP.add_edge(graph, :decision_node => :decision_node, 0.9)\n\njulia> graph\nRoot\n root_node\nNodes\n decision_node\nArcs\n root_node => decision_node w.p. 1.0\n decision_node => decision_node w.p. 0.9","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-policy-graph","page":"Create a general policy graph","title":"Creating a policy graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Once you have constructed an instance of SDDP.Graph, you can create a policy graph by passing the graph as the first argument.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.Graph(\n :root_node,\n [:decision_node],\n [\n (:root_node => :decision_node, 1.0),\n (:decision_node => :decision_node, 0.9)\n ]);\n\njulia> model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer) do subproblem, node\n println(\"Called from node: \", node)\n end;\nCalled from node: decision_node","category":"page"},{"location":"guides/create_a_general_policy_graph/#Special-cases","page":"Create a general policy graph","title":"Special cases","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"There are two special cases which cover the majority of models in the literature.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.LinearPolicyGraph is a special case where a SDDP.LinearGraph is passed as the first argument.\nSDDP.MarkovianPolicyGraph is a special case where a SDDP.MarkovianGraph is passed as the first argument.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Note that the type of the names of all nodes (including the root node) must be the same. In this case, they are Symbols.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Simulating-non-standard-policy-graphs","page":"Create a general policy graph","title":"Simulating non-standard policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"If you simulate a policy graph with a node that has outgoing arcs that sum to less than one, you will end up with simulations of different lengths. (The most common case is an infinite horizon stochastic program, aka a linear policy graph with a single cycle.)","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"To simulate a fixed number of stages, use:","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"simulations = SDDP.simulate(\n model,\n 1,\n sampling_scheme = SDDP.InSampleMonteCarlo(\n max_depth = 10,\n terminate_on_dummy_leaf = false\n )\n)","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Here, max_depth controls the number of stages, and terminate_on_dummy_leaf = false stops us from terminating early.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"See also Simulate using a different sampling scheme.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-Markovian-graph-automatically","page":"Create a general policy graph","title":"Creating a Markovian graph automatically","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.jl can create a Markovian graph by automatically discretizing a one-dimensional stochastic process and fitting a Markov chain.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"To access this functionality, pass a function that takes no arguments and returns a Vector{Float64} to SDDP.MarkovianGraph. To keyword arguments also need to be provided: budget is the total number of nodes in the Markovian graph, and scenarios is the number of realizations of the simulator function used to approximate the graph.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"In some cases, scenarios may be too small to provide a reasonable fit of the stochastic process. If so, SDDP.jl will automatically try to re-fit the Markov chain using more scenarios.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"function simulator()\n scenario = zeros(5)\n for i = 2:5\n scenario[i] = scenario[i - 1] + rand() - 0.5\n end\n return scenario\nend\n\nmodel = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(simulator; budget = 10, scenarios = 100),\n sense = :Max,\n upper_bound = 1e3\n) do subproblem, node\n (stage, price) = node\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 1)\n @constraint(subproblem, x.out <= x.in)\n @stageobjective(subproblem, price * x.out)\nend","category":"page"},{"location":"guides/debug_a_model/#Debug-a-model","page":"Debug a model","title":"Debug a model","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Building multistage stochastic programming models is hard. There are a lot of different pieces that need to be put together, and we typically have no idea of the optimal policy, so it can be hard (impossible?) to validate the solution.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"That said, here are a few tips to verify and validate models built using SDDP.jl.","category":"page"},{"location":"guides/debug_a_model/#Writing-subproblems-to-file","page":"Debug a model","title":"Writing subproblems to file","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"The first step to debug a model is to write out the subproblems to a file in order to check that you are actually building what you think you are building.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"This can be achieved with the help of two functions: SDDP.parameterize and SDDP.write_subproblem_to_file. The first lets you parameterize a node given a noise, and the second writes out the subproblem to a file.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Here is an example model:","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 1)\n @variable(subproblem, y)\n @constraint(subproblem, balance, x.in == x.out + y)\n SDDP.parameterize(subproblem, [1.1, 2.2]) do ω\n @stageobjective(subproblem, ω * x.out)\n JuMP.fix(y, ω)\n end\nend\n\n# output\n\nA policy graph with 2 nodes.\n Node indices: 1, 2","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Initially, model hasn't been parameterized with a concrete realizations of ω. Let's do so now by parameterizing the first subproblem with ω=1.1.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.parameterize(model[1], 1.1)","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Easy! To parameterize the second stage problem, we would have used model[2].","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Now to write out the problem to a file. We'll get a few warnings because some variables and constraints don't have names. They don't matter, so ignore them.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.write_subproblem_to_file(model[1], \"subproblem.lp\")\n\njulia> read(\"subproblem.lp\") |> String |> print\nminimize\nobj: 1.1 x_out + 1 x2\nsubject to\nbalance: -1 y + 1 x_in - 1 x_out = 0\nBounds\ny = 1.1\nx2 >= 0\nx_in free\nx_out free\nEnd","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"It is easy to see that ω has been set in the objective, and as the fixed value for y.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"It is also possible to parameterize the subproblems using values for ω that are not in the original problem formulation.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.parameterize(model[1], 3.3)\n\njulia> SDDP.write_subproblem_to_file(model[1], \"subproblem.lp\")\n\njulia> read(\"subproblem.lp\") |> String |> print\nminimize\nobj: 3.3 x_out + 1 x2\nsubject to\nbalance: -1 y + 1 x_in - 1 x_out = 0\nBounds\ny = 3.3\nx2 >= 0\nx_in free\nx_out free\nEnd\n\njulia> rm(\"subproblem.lp\") # Clean up.","category":"page"},{"location":"guides/debug_a_model/#Solve-the-deterministic-equivalent","page":"Debug a model","title":"Solve the deterministic equivalent","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Sometimes, it can be helpful to solve the deterministic equivalent of a problem in order to obtain an exact solution to the problem. To obtain a JuMP model that represents the deterministic equivalent, use SDDP.deterministic_equivalent. The returned model is just a normal JuMP model. Use JuMP to optimize it and query the solution.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> det_equiv = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\nA JuMP Model\nFeasibility problem with:\nVariables: 24\n`AffExpr`-in-`MathOptInterface.EqualTo{Float64}`: 10 constraints\n`VariableRef`-in-`MathOptInterface.EqualTo{Float64}`: 8 constraints\n`VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 6 constraints\n`VariableRef`-in-`MathOptInterface.LessThan{Float64}`: 4 constraints\nModel mode: AUTOMATIC\nCachingOptimizer state: EMPTY_OPTIMIZER\nSolver name: HiGHS\n\njulia> set_silent(det_equiv)\n\njulia> optimize!(det_equiv)\n\njulia> objective_value(det_equiv)\n-5.472500000000001","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"warning: Warning\nThe deterministic equivalent scales poorly with problem size. Only use this on small problems!","category":"page"},{"location":"guides/add_multidimensional_noise/#Add-multi-dimensional-noise-terms","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"Multi-dimensional stagewise-independent random variables can be created by forming the Cartesian product of the random variables.","category":"page"},{"location":"guides/add_multidimensional_noise/#A-simple-example","page":"Add multi-dimensional noise terms","title":"A simple example","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"If the sample space and probabilities are given as vectors for each marginal distribution, do:","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"julia> model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n Ω = [(value = v, coefficient = c) for v in [1, 2] for c in [3, 4, 5]]\n P = [v * c for v in [0.5, 0.5] for c in [0.3, 0.5, 0.2]]\n SDDP.parameterize(subproblem, Ω, P) do ω\n JuMP.fix(x.out, ω.value)\n @stageobjective(subproblem, ω.coefficient * x.out)\n println(\"ω is: \", ω)\n end\n end;\n\njulia> SDDP.simulate(model, 1);\nω is: (value = 1, coefficient = 4)\nω is: (value = 1, coefficient = 3)\nω is: (value = 2, coefficient = 4)","category":"page"},{"location":"guides/add_multidimensional_noise/#Using-Distributions.jl","page":"Add multi-dimensional noise terms","title":"Using Distributions.jl","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"For sampling multidimensional random variates, it can be useful to use the Product type from Distributions.jl.","category":"page"},{"location":"guides/add_multidimensional_noise/#Finite-discrete-distributions","page":"Add multi-dimensional noise terms","title":"Finite discrete distributions","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"There are several ways to go about this. If the sample space is finite, and small enough that it makes sense to enumerate each element, we can use Base.product and Distributions.support to generate the entire sample space Ω from each of the marginal distributions. ","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"We can then evaluate the density function of the product distribution on each element of this space to get the vector of corresponding probabilities, P.","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"julia> import Distributions\n\njulia> distributions = [\n Distributions.Binomial(10, 0.5),\n Distributions.Bernoulli(0.5),\n Distributions.truncated(Distributions.Poisson(5), 2, 8)\n ];\n\njulia> supports = Distributions.support.(distributions);\n\njulia> Ω = vec([collect(ω) for ω in Base.product(supports...)]);\n\njulia> P = [Distributions.pdf(Distributions.Product(distributions), ω) for ω in Ω];\n\njulia> model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n SDDP.parameterize(subproblem, Ω, P) do ω\n JuMP.fix(x.out, ω[1])\n @stageobjective(subproblem, ω[2] * x.out + ω[3])\n println(\"ω is: \", ω)\n end\n end;\n\njulia> SDDP.simulate(model, 1);\nω is: [10, 0, 3]\nω is: [0, 1, 6]\nω is: [6, 0, 5]","category":"page"},{"location":"guides/add_multidimensional_noise/#Sampling","page":"Add multi-dimensional noise terms","title":"Sampling","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"For sample spaces that are too large to explicitly represent, we can instead approximate the distribution by a sample of N points. Now Ω is a sample from the full sample space, and P is the uniform distribution over those points. Points with higher density in the full sample space will appear more frequently in Ω.","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"julia> import Distributions\n\njulia> distributions = Distributions.Product([\n Distributions.Binomial(100, 0.5),\n Distributions.Geometric(1 / 20),\n Distributions.Poisson(20),\n ]);\n\njulia> N = 100;\n\njulia> Ω = [rand(distributions) for _ in 1:N];\n\njulia> P = fill(1 / N, N);\n\njulia> model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n SDDP.parameterize(subproblem, Ω, P) do ω\n JuMP.fix(x.out, ω[1])\n @stageobjective(subproblem, ω[2] * x.out + ω[3])\n println(\"ω is: \", ω)\n end\n end;\n\njulia> SDDP.simulate(model, 1);\nω is: [54, 38, 19]\nω is: [43, 3, 13]\nω is: [43, 4, 17]","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"EditURL = \"booking_management.jl\"","category":"page"},{"location":"examples/booking_management/#Booking-management","page":"Booking management","title":"Booking management","text":"","category":"section"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"This example concerns the acceptance of booking requests for rooms in a hotel in the lead up to a large event.","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"Each stage, we receive a booking request and can choose to accept or decline it. Once accepted, bookings cannot be terminated.","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"using SDDP, HiGHS, Test\n\nfunction booking_management_model(num_days, num_rooms, num_requests)\n # maximum revenue that could be accrued.\n max_revenue = (num_rooms + num_requests) * num_days * num_rooms\n # booking_requests is a vector of {0,1} arrays of size\n # (num_days x num_rooms) if the room is requested.\n booking_requests = Array{Int,2}[]\n for room in 1:num_rooms\n for day in 1:num_days\n # note: length_of_stay is 0 indexed to avoid unnecessary +/- 1\n # on the indexing\n for length_of_stay in 0:(num_days-day)\n req = zeros(Int, (num_rooms, num_days))\n req[room:room, day.+(0:length_of_stay)] .= 1\n push!(booking_requests, req)\n end\n end\n end\n\n return model = SDDP.LinearPolicyGraph(\n stages = num_requests,\n upper_bound = max_revenue,\n sense = :Max,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(\n sp,\n 0 <= vacancy[room = 1:num_rooms, day = 1:num_days] <= 1,\n SDDP.State,\n Bin,\n initial_value = 1\n )\n @variables(\n sp,\n begin\n # Accept request for booking of room for length of time.\n 0 <= accept_request <= 1, Bin\n # Accept a booking for an individual room on an individual day.\n 0 <= room_request_accepted[1:num_rooms, 1:num_days] <= 1, Bin\n # Helper for JuMP.fix\n req[1:num_rooms, 1:num_days]\n end\n )\n for room in 1:num_rooms, day in 1:num_days\n @constraints(\n sp,\n begin\n # Update vacancy if we accept a room request\n vacancy[room, day].out ==\n vacancy[room, day].in - room_request_accepted[room, day]\n # Can't accept a request of a filled room\n room_request_accepted[room, day] <= vacancy[room, day].in\n # Can't accept invididual room request if entire request is declined\n room_request_accepted[room, day] <= accept_request\n # Can't accept request if room not requested\n room_request_accepted[room, day] <= req[room, day]\n # Accept all individual rooms is entire request is accepted\n room_request_accepted[room, day] + (1 - accept_request) >= req[room, day]\n end\n )\n end\n SDDP.parameterize(sp, booking_requests) do request\n return JuMP.fix.(req, request)\n end\n @stageobjective(\n sp,\n sum(\n (room + stage - 1) * room_request_accepted[room, day] for\n room in 1:num_rooms for day in 1:num_days\n )\n )\n end\nend\n\nfunction booking_management(duality_handler)\n m_1_2_5 = booking_management_model(1, 2, 5)\n SDDP.train(m_1_2_5; log_frequency = 5, duality_handler = duality_handler)\n if duality_handler == SDDP.ContinuousConicDuality()\n @test SDDP.calculate_bound(m_1_2_5) >= 7.25 - 1e-4\n else\n @test isapprox(SDDP.calculate_bound(m_1_2_5), 7.25, atol = 0.02)\n end\n\n m_2_2_3 = booking_management_model(2, 2, 3)\n SDDP.train(m_2_2_3; log_frequency = 10, duality_handler = duality_handler)\n if duality_handler == SDDP.ContinuousConicDuality()\n @test SDDP.calculate_bound(m_1_2_5) > 6.13\n else\n @test isapprox(SDDP.calculate_bound(m_2_2_3), 6.13, atol = 0.02)\n end\nend\n\nbooking_management(SDDP.ContinuousConicDuality())","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"New version of HiGHS stalls booking_management(SDDP.LagrangianDuality())","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"EditURL = \"no_strong_duality.jl\"","category":"page"},{"location":"examples/no_strong_duality/#No-strong-duality","page":"No strong duality","title":"No strong duality","text":"","category":"section"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"This example is interesting, because strong duality doesn't hold for the extensive form (see if you can show why!), but we still converge.","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"using SDDP, HiGHS, Test\n\nfunction no_strong_duality()\n model = SDDP.PolicyGraph(\n SDDP.Graph(\n :root,\n [:node],\n [(:root => :node, 1.0), (:node => :node, 0.5)],\n ),\n optimizer = HiGHS.Optimizer,\n lower_bound = 0.0,\n ) do sp, t\n @variable(sp, x, SDDP.State, initial_value = 1.0)\n @stageobjective(sp, x.out)\n @constraint(sp, x.in == x.out)\n end\n SDDP.train(model)\n @test SDDP.calculate_bound(model) ≈ 2.0 atol = 1e-5\n return\nend\n\nno_strong_duality()","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"CurrentModule = SDDP","category":"page"},{"location":"guides/add_integrality/#Integrality","page":"Integrality","title":"Integrality","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"There's nothing special about binary and integer variables in SDDP.jl. Your models may contain a mix of binary, integer, or continuous state and control variables. Use the standard JuMP syntax to add binary or integer variables.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"For example:","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"using SDDP, HiGHS\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 0 <= x <= 100, Int, SDDP.State, initial_value = 0)\n @variable(sp, 0 <= u <= 200, integer = true)\n @variable(sp, v >= 0)\n @constraint(sp, x.out == x.in + u + v - 150)\n @stageobjective(sp, 2u + 6v + x.out)\nend","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"If you want finer control over how SDDP.jl computes subgradients in the backward pass, you can pass an SDDP.AbstractDualityHandler to the duality_handler argument of SDDP.train.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"See Duality handlers for the list of handlers you can pass.","category":"page"},{"location":"guides/add_integrality/#Convergence","page":"Integrality","title":"Convergence","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"SDDP.jl cannot guarantee that it will find a globally optimal policy when some of the variables are discrete. However, in most cases we find that it can still find an integer feasible policy that performs well in simulation.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"Moreover, when the number of nodes in the graph is large, or there is uncertainty, we are not aware of another algorithm that can claim to find a globally optimal policy.","category":"page"},{"location":"guides/add_integrality/#Does-SDDP.jl-implement-the-SDDiP-algorithm?","page":"Integrality","title":"Does SDDP.jl implement the SDDiP algorithm?","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"Most discussions of SDDiP in the literature confuse two unrelated things.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"First, how to compute dual variables\nSecond, when the algorithm will converge to a globally optimal policy.","category":"page"},{"location":"guides/add_integrality/#Computing-dual-variables","page":"Integrality","title":"Computing dual variables","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"The stochastic dual dynamic programming algorithm requires a subgradient of the objective with respect to the incoming state variable. ","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"One way to obtain a valid subgradient is to compute an optimal value of the dual variable lambda in the following subproblem:","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x quad lambda\nendaligned","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"The easiest option is to relax integrality of the discrete variables to form a linear program and then use linear programming duality to obtain the dual. But we could also use Lagrangian duality without needing to relax the integrality constraints.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"To compute the Lagrangian dual lambda, we penalize lambda^top(barx - x) in the objective instead of enforcing the constraint:","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"beginaligned\nmaxlimits_lambdaminlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi) - lambda^top(barx - x)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega)\nendaligned","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"You can use Lagrangian duality in SDDP.jl by passing SDDP.LagrangianDuality to the duality_handler argument of SDDP.train.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"Compared with linear programming duality, the Lagrangian problem is difficult to solve because it requires the solution of many mixed-integer programs instead of a single linear program. This is one reason why \"SDDiP\" has poor performance.","category":"page"},{"location":"guides/add_integrality/#Convergence-2","page":"Integrality","title":"Convergence","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"The second part to SDDiP is a very tightly scoped claim: if all of the state variables are binary and the algorithm uses Lagrangian duality to compute a subgradient, then it will converge to an optimal policy.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"In many cases, papers claim to \"do SDDiP,\" but they have state variables which are not binary. In these cases, the algorithm is not guaranteed to converge to a globally optimal policy.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"One work-around that has been suggested is to discretize the state variables into a set of binary state variables. However, this leads to a large number of binary state variables, which is another reason why \"SDDiP\" has poor performance.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"In general, we recommend that you introduce integer variables into your model without fear of the consequences, and that you treat the resulting policy as a good heuristic, rather than an attempt to find a globally optimal policy.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"EditURL = \"StructDualDynProg.jl_prob5.2_2stages.jl\"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/#StructDualDynProg:-Problem-5.2,-2-stages","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"","category":"section"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"This example comes from StochasticDualDynamicProgramming.jl","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"using SDDP, HiGHS, Test\n\nfunction test_prob52_2stages()\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n # ========== Problem data ==========\n n = 4\n m = 3\n i_c = [16, 5, 32, 2]\n C = [25, 80, 6.5, 160]\n T = [8760, 7000, 1500] / 8760\n D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]\n p2 = [0.9, 0.1]\n # ========== State Variables ==========\n @variable(subproblem, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)\n # ========== Variables ==========\n @variables(subproblem, begin\n y[1:n, 1:m] >= 0\n v[1:n] >= 0\n penalty >= 0\n rhs_noise[1:m] # Dummy variable for RHS noise term.\n end)\n # ========== Constraints ==========\n @constraints(\n subproblem,\n begin\n [i = 1:n], x[i].out == x[i].in + v[i]\n [i = 1:n], sum(y[i, :]) <= x[i].in\n [j = 1:m], sum(y[:, j]) + penalty >= rhs_noise[j]\n end\n )\n if stage == 2\n # No investment in last stage.\n @constraint(subproblem, sum(v) == 0)\n end\n # ========== Uncertainty ==========\n if stage != 1 # no uncertainty in first stage\n SDDP.parameterize(subproblem, 1:size(D2, 2), p2) do ω\n for j in 1:m\n JuMP.fix(rhs_noise[j], D2[j, ω])\n end\n end\n end\n # ========== Stage objective ==========\n @stageobjective(subproblem, i_c' * v + C' * y * T + 1e6 * penalty)\n return\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 340315.52 atol = 0.1\n return\nend\n\ntest_prob52_2stages()","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"EditURL = \"stochastic_all_blacks.jl\"","category":"page"},{"location":"examples/stochastic_all_blacks/#Stochastic-All-Blacks","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"","category":"section"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"using SDDP, HiGHS, Test\n\nfunction stochastic_all_blacks()\n # Number of time periods\n T = 3\n # Number of seats\n N = 2\n # R_ij = price of seat i at time j\n R = [3 3 6; 3 3 6]\n # Number of noises\n s = 3\n offers = [\n [[1, 1], [0, 0], [1, 1]],\n [[1, 0], [0, 0], [0, 0]],\n [[0, 1], [1, 0], [1, 1]],\n ]\n\n model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n # Seat remaining?\n @variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)\n # Action: accept offer, or don't accept offer\n # We are allowed to accept some of the seats offered but not others\n @variable(sp, accept_offer[1:N], Bin)\n @variable(sp, offers_made[1:N])\n # Balance on seats\n @constraint(\n sp,\n balance[i in 1:N],\n x[i].in - x[i].out == accept_offer[i]\n )\n @stageobjective(sp, sum(R[i, stage] * accept_offer[i] for i in 1:N))\n SDDP.parameterize(sp, offers[stage]) do o\n return JuMP.fix.(offers_made, o)\n end\n @constraint(sp, accept_offer .<= offers_made)\n end\n\n SDDP.train(model; duality_handler = SDDP.LagrangianDuality())\n @test SDDP.calculate_bound(model) ≈ 8.0\n return\nend\n\nstochastic_all_blacks()","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"EditURL = \"example_milk_producer.jl\"","category":"page"},{"location":"tutorial/example_milk_producer/#Example:-the-milk-producer","page":"Example: the milk producer","title":"Example: the milk producer","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The purpose of this tutorial is to demonstrate how to fit a Markovian policy graph to a univariate stochastic process.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"using SDDP\nimport HiGHS\nimport Plots","category":"page"},{"location":"tutorial/example_milk_producer/#Background","page":"Example: the milk producer","title":"Background","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"A company produces milk for sale on a spot market each month. The quantity of milk they produce is uncertain, and so too is the price on the spot market.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The company can store the milk, but, over time, some milk spoils and must be discarded. Eventually the milk expires and is worthless.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The spot price is determined by an auction system, and so varies from month to month, but demonstrates serial correlation. In each auction, there is sufficient demand that the milk producer finds a buyer for all their widgets, regardless of the quantity they supply. Furthermore, the spot price is independent of the milk producer (they are a small player in the market).","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The spot price is highly volatile, and is the result of a process that is out of the control of the company. To counteract their price risk, the company engages in a forward contracting programme.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The forward contracting programme is a deal for physical milk at a future date in time, up to four months in the future.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The futures price is the current spot price, plus some forward contango (the buyers gain certainty that they will receive the milk in the future).","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"In general, the milk company should forward contract (since they reduce their price risk), however they also have production risk. Therefore, it may be the case that they forward contract a fixed amount, but find that they do not produce enough milk to meet the fixed demand. They are then forced to buy additional milk on the spot market.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The goal of the milk company is to choose the extent to which they forward contract in order to maximise (risk-adjusted) revenues, whilst managing their production risk.","category":"page"},{"location":"tutorial/example_milk_producer/#A-stochastic-process-for-price","page":"Example: the milk producer","title":"A stochastic process for price","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"It is outside the scope of this tutorial, but assume that we have gone away and analysed historical data to fit a stochastic process to the sequence of monthly auction spot prices.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"One plausible model is a multiplicative auto-regressive model of order one, where the white noise term is modeled by a finite distribution of empirical residuals. We can simulate this stochastic process as follows:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"function simulator()\n residuals = [0.0987, 0.199, 0.303, 0.412, 0.530, 0.661, 0.814, 1.010, 1.290]\n residuals = 0.1 * vcat(-residuals, 0.0, residuals)\n scenario = zeros(12)\n y, μ, α = 4.5, 6.0, 0.05\n for t in 1:12\n y = exp((1 - α) * log(y) + α * log(μ) + rand(residuals))\n scenario[t] = clamp(y, 3.0, 9.0)\n end\n return scenario\nend\n\nsimulator()","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"It may be helpful to visualize a number of simulations of the price process:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"plot = Plots.plot(\n [simulator() for _ in 1:500];\n color = \"gray\",\n opacity = 0.2,\n legend = false,\n xlabel = \"Month\",\n ylabel = \"Price [\\$/kg]\",\n xlims = (1, 12),\n ylims = (3, 9),\n)","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The prices gradually revert to the mean of $6/kg, and there is high volatility.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"We can't incorporate this price process directly into SDDP.jl, but we can fit a SDDP.MarkovianGraph directly from the simulator:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"graph = SDDP.MarkovianGraph(simulator; budget = 60, scenarios = 10_000);\nnothing # hide","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Here budget is the number of nodes in the policy graph, and scenarios is the number of simulations to use when estimating the transition probabilities.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The graph contains too many nodes to be show, but we can plot it:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"for ((t, price), edges) in graph.nodes\n for ((t′, price′), probability) in edges\n Plots.plot!(\n plot,\n [t, t′],\n [price, price′];\n color = \"red\",\n width = 3 * probability,\n )\n end\nend\n\nplot","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"That looks okay. Try changing budget and scenarios to see how different Markovian policy graphs can be created.","category":"page"},{"location":"tutorial/example_milk_producer/#Model","page":"Example: the milk producer","title":"Model","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Now that we have a Markovian graph, we can build the model. See if you can work out how we arrived at this formulation by reading the background description. Do all the variables and constraints make sense?","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"model = SDDP.PolicyGraph(\n graph;\n sense = :Max,\n upper_bound = 1e4,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n t, price = node\n c_contango = [1.0, 1.025, 1.05, 1.075]\n c_transaction, c_perish_factor, c_buy_premium = 0.01, 0.95, 1.5\n F, P = length(c_contango), 5\n @variable(sp, 0 <= x_forward[1:F], SDDP.State, initial_value = 0)\n @variable(sp, 0 <= x_stock[p = 1:P], SDDP.State, initial_value = 0)\n @variable(sp, 0 <= u_spot_sell[1:P])\n @variable(sp, 0 <= u_spot_buy)\n @variable(sp, 0 <= u_forward_deliver[1:P])\n @variable(sp, 0 <= u_forward_sell[1:F] <= 10)\n @variable(sp, 0 <= u_production)\n for f in 1:F\n if t + f >= 12\n fix(u_forward_sell[f], 0.0; force = true)\n end\n end\n @constraint(\n sp,\n [i = 1:F-1],\n x_forward[i].out == x_forward[i+1].in + u_forward_sell[i],\n )\n @constraint(sp, x_forward[F].out == u_forward_sell[F])\n @constraint(sp, x_forward[1].in == sum(u_forward_deliver))\n @constraint(\n sp,\n x_stock[1].out ==\n u_production - u_forward_deliver[1] - u_spot_sell[1] + u_spot_buy\n )\n @constraint(\n sp,\n [i = 2:P],\n x_stock[i].out ==\n c_perish_factor * x_stock[i-1].in - u_forward_deliver[i] -\n u_spot_sell[i]\n )\n Ω = [(price, (production = p,)) for p in range(0.1, 0.2; length = 5)]\n SDDP.parameterize(sp, Ω) do (price, ω)\n set_upper_bound(u_production, ω.production)\n @stageobjective(\n sp,\n price * sum(u_spot_sell) +\n price * sum(c_contango[f] * u_forward_sell[f] for f in 1:F) -\n price * c_buy_premium * u_spot_buy -\n c_transaction * sum(u_forward_sell)\n )\n end\nend","category":"page"},{"location":"tutorial/example_milk_producer/#Training-a-policy","page":"Example: the milk producer","title":"Training a policy","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Now we have a model, we train a policy. The SDDP.SimulatorSamplingScheme is used in the forward pass. It generates an out-of-sample sequence of prices using simulator and traverses the closest sequence of nodes in the policy graph. When calling SDDP.parameterize for each subproblem, it uses the new out-of-sample price instead of the price associated with the Markov node.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"SDDP.train(\n model;\n time_limit = 20,\n risk_measure = SDDP.EAVaR(; lambda = 0.5, beta = 0.25),\n sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),\n)","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"warning: Warning\nWe're intentionally terminating the training early so that the documentation doesn't take too long to build. If you run this example locally, increase the time limit.","category":"page"},{"location":"tutorial/example_milk_producer/#Simulating-the-policy","page":"Example: the milk producer","title":"Simulating the policy","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"When simulating the policy, we can also use the SDDP.SimulatorSamplingScheme.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"simulations = SDDP.simulate(\n model,\n 200,\n Symbol[:x_forward, :x_stock, :u_spot_sell, :u_spot_buy];\n sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),\n);\nnothing # hide","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"To show how the sampling scheme uses the new out-of-sample price instead of the price associated with the Markov node, compare the index of the Markov state visited in stage 12 of the first simulation:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"simulations[1][12][:node_index]","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"to the realization of the noise (price, ω) passed to SDDP.parameterize:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"simulations[1][12][:noise_term]","category":"page"},{"location":"tutorial/example_milk_producer/#Visualizing-the-policy","page":"Example: the milk producer","title":"Visualizing the policy","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Finally, we can plot the policy to gain insight (although note that we terminated the training early, so we should run the re-train the policy for more iterations before making too many judgements).","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Plots.plot(\n SDDP.publication_plot(simulations; title = \"sum(x_stock.out)\") do data\n return sum(y.out for y in data[:x_stock])\n end,\n SDDP.publication_plot(simulations; title = \"sum(x_forward.out)\") do data\n return sum(y.out for y in data[:x_forward])\n end,\n SDDP.publication_plot(simulations; title = \"u_spot_buy\") do data\n return data[:u_spot_buy]\n end,\n SDDP.publication_plot(simulations; title = \"sum(u_spot_sell)\") do data\n return sum(data[:u_spot_sell])\n end;\n layout = (2, 2),\n)","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"EditURL = \"FAST_production_management.jl\"","category":"page"},{"location":"examples/FAST_production_management/#FAST:-the-production-management-problem","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"","category":"section"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"An implementation of the Production Management example from FAST","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"using SDDP, HiGHS, Test\n\nfunction fast_production_management(; cut_type)\n DEMAND = [2, 10]\n H = 3\n N = 2\n C = [0.2, 0.7]\n S = 2 .+ [0.33, 0.54]\n model = SDDP.LinearPolicyGraph(;\n stages = H,\n lower_bound = -50.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, x[1:N] >= 0, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n s[i = 1:N] >= 0\n d\n end)\n @constraints(sp, begin\n [i = 1:N], s[i] <= x[i].in\n sum(s) <= d\n end)\n SDDP.parameterize(sp, t == 1 ? [0] : DEMAND) do ω\n return JuMP.fix(d, ω)\n end\n @stageobjective(sp, sum(C[i] * x[i].out for i in 1:N) - S's)\n end\n SDDP.train(model; cut_type = cut_type, print_level = 2, log_frequency = 5)\n @test SDDP.calculate_bound(model) ≈ -23.96 atol = 1e-2\nend\n\nfast_production_management(cut_type = SDDP.SINGLE_CUT)\nfast_production_management(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"EditURL = \"example_reservoir.jl\"","category":"page"},{"location":"tutorial/example_reservoir/#Example:-deterministic-to-stochastic","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"This tutorial was written by Oscar Dowson and Andy Philpott for the 2023 Winter School \"Planning under Uncertainty in Energy Markets,\" held March 26 to 31 in Geilo, Norway.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Download this tutorial as a notebook.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The purpose of this tutorial is to explain how we can go from a deterministic time-staged optimal control model in JuMP to a multistage stochastic optimization model in SDDP.jl. As a motivating problem, we consider the hydro-thermal problem with a single reservoir.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For variations on this problem, see the examples and tutorials:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"An introduction to SDDP.jl\nHydro-thermal scheduling\nHydro valleys\nInfinite horizon hydro-thermal","category":"page"},{"location":"tutorial/example_reservoir/#Packages","page":"Example: deterministic to stochastic","title":"Packages","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"This tutorial requires the following packages:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"using JuMP\nusing SDDP\nimport CSV\nimport DataFrames\nimport HiGHS\nimport Plots","category":"page"},{"location":"tutorial/example_reservoir/#Data","page":"Example: deterministic to stochastic","title":"Data","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The data for this tutorial is contained in the example_reservoir.csv file in the SDDP.jl repository. To run locally, download the CSV file, then change filename to point to the location where you downloaded it to.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"filename = joinpath(@__DIR__, \"example_reservoir.csv\")\ndata = CSV.read(filename, DataFrames.DataFrame)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"It's easier to visualize the data if we plot it:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Plots.plot(\n Plots.plot(data[!, :inflow], ylabel = \"Inflow\"),\n Plots.plot(data[!, :demand], ylabel = \"Demand\"),\n Plots.plot(data[!, :cost], ylabel = \"Cost\", xlabel = \"Week\");\n layout = (3, 1),\n legend = false,\n)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The number of weeks will be useful later:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"T = size(data, 1)","category":"page"},{"location":"tutorial/example_reservoir/#Deterministic-JuMP-model","page":"Example: deterministic to stochastic","title":"Deterministic JuMP model","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"To start, we construct a deterministic model in pure JuMP.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Create a JuMP model, using HiGHS as the optimizer:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"model = Model(HiGHS.Optimizer)\nset_silent(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"x[t]: the amount of water in the reservoir at the start of stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"reservoir_max = 320.0\n@variable(model, 0 <= x[1:T+1] <= reservoir_max)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"We need an initial condition for x[1]. Fix it to 300 units:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"reservoir_initial = 300\nfix(x[1], reservoir_initial; force = true)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"u[t]: the amount of water to flow through the turbine in stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"flow_max = 12\n@variable(model, 0 <= u[i = 1:T] <= flow_max)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"s[t]: the amount of water to spill from the reservoir in stage t, bypassing the turbine:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@variable(model, 0 <= s[1:T])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"r[t]: the amount of thermal generation in stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@variable(model, 0 <= r[1:T])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"i[t]: the amount of inflow to the reservoir in stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@variable(model, i[1:T])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For this model, our inflow is fixed, so we fix it to the data we have:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"for t in 1:T\n fix(i[t], data[t, :inflow])\nend","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The water balance constraint says that the water in the reservoir at the start of stage t+1 is the water in the reservoir at the start of stage t, less the amount flowed through the turbine, u[t], less the amount spilled, s[t], plus the amount of inflow, i[t], into the reservoir:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@constraint(model, [t = 1:T], x[t+1] == x[t] - u[t] - s[t] + i[t])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"We also need a supply = demand constraint. In practice, the units of this would be in MWh, and there would be a conversion factor between the amount of water flowing through the turbine and the power output. To simplify, we assume that power and water have the same units, so that one \"unit\" of demand is equal to one \"unit\" of the reservoir x[t]:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@constraint(model, [t = 1:T], u[t] + r[t] == data[t, :demand])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Our objective is to minimize the cost of thermal generation:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@objective(model, Min, sum(data[t, :cost] * r[t] for t in 1:T))","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Let's optimize and check the solution","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"optimize!(model)\nsolution_summary(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The total cost is:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"objective_value(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Here's a plot of demand and generation:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Plots.plot(data[!, :demand]; label = \"Demand\", xlabel = \"Week\")\nPlots.plot!(value.(r), label = \"Thermal\")\nPlots.plot!(value.(u), label = \"Hydro\")","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"And here's the storage over time:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Plots.plot(value.(x); label = \"Storage\", xlabel = \"Week\")","category":"page"},{"location":"tutorial/example_reservoir/#Deterministic-SDDP-model","page":"Example: deterministic to stochastic","title":"Deterministic SDDP model","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For the next step, we show how to decompose our JuMP model into SDDP.jl. It should obtain the same solution!","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(\n subproblem,\n 0 <= x <= reservoir_max,\n SDDP.State,\n initial_value = reservoir_initial,\n )\n @variable(subproblem, 0 <= u <= flow_max)\n @variable(subproblem, 0 <= r)\n @variable(subproblem, 0 <= s)\n @variable(subproblem, i)\n fix(i, data[t, :inflow])\n @constraint(subproblem, x.out == x.in - u - s + i)\n @constraint(subproblem, u + r == data[t, :demand])\n @stageobjective(subproblem, data[t, :cost] * r)\nend","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Can you see how the JuMP model maps to this syntax? We have created a SDDP.LinearPolicyGraph with T stages, we're minimizing, and we're using HiGHS.Optimizer as the optimizer.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"A few bits might be non-obvious:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"We need to provide a lower bound for the objective function. Since our costs are always positive, a valid lower bound for the total cost is 0.0.\nWe define x as a state variable using SDDP.State. A state variable is any variable that flows through time, and for which we need to know the value of it in stage t-1 to compute the best action in stage t. The state variable x is actually two decision variables, x.in and x.out, which represent x[t] and x[t+1] respectively.\nWe need to use @stageobjective instead of @objective.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Instead of calling JuMP.optimize!, SDDP.jl uses a train method. With our machine learning hat on, you can think of SDDP.jl as training a function for each stage that accepts the current reservoir state as input and returns the optimal actions as output. It is also an iterative algorithm, so we need to specify when it should terminate:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"SDDP.train(model; iteration_limit = 10)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"As a quick sanity check, did we get the same cost as our JuMP model?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"SDDP.calculate_bound(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"That's good. Next, to check the value of the decision variables. This isn't as straight forward as our JuMP model. Instead, we need to simulate the policy, and then extract the values of the decision variables from the results of the simulation.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Since our model is deterministic, we need only 1 replication of the simulation, and we want to record the values of the x, u, and r variables:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"n_replications = 1\nsimulations = SDDP.simulate(model, n_replications, [:x, :u, :r]);\n","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The simulations vector is too big to show. But it contains one element for each replication, and each replication contains one dictionary for each stage.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For example, the data corresponding to the tenth stage in the first replication is:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"simulations[1][10]","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Let's grab the trace of the r and u variables in the first replication, and then plot them:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"r_sim = [sim[:r] for sim in simulations[1]]\nu_sim = [sim[:u] for sim in simulations[1]]\n\nPlots.plot(data[!, :demand]; label = \"Demand\", xlabel = \"Week\")\nPlots.plot!(r_sim, label = \"Thermal\")\nPlots.plot!(u_sim, label = \"Hydro\")","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Perfect. That's the same as we got before.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Now let's look at x. This is a little more complicated, because we need to grab the outgoing value of the state variable in each stage:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"x_sim = [sim[:x].out for sim in simulations[1]]\n\nPlots.plot(x_sim; label = \"Storage\", xlabel = \"Week\")","category":"page"},{"location":"tutorial/example_reservoir/#Stochastic-SDDP-model","page":"Example: deterministic to stochastic","title":"Stochastic SDDP model","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Now we add some randomness to our model. In each stage, we assume that the inflow could be: 2 units lower, with 30% probability; the same as before, with 40% probability; or 5 units higher, with 30% probability.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(\n sp,\n 0 <= x <= reservoir_max,\n SDDP.State,\n initial_value = reservoir_initial,\n )\n @variable(sp, 0 <= u <= flow_max)\n @variable(sp, 0 <= r)\n @variable(sp, 0 <= s)\n @variable(sp, i)\n # <--- This bit is new\n Ω = [data[t, :inflow] - 2, data[t, :inflow], data[t, :inflow] + 5]\n P = [0.3, 0.4, 0.3]\n SDDP.parameterize(sp, Ω, P) do ω\n fix(i, ω)\n return\n end\n # --->\n @constraint(sp, x.out == x.in - u - s + i)\n @constraint(sp, u + r == data[t, :demand])\n @stageobjective(sp, data[t, :cost] * r)\n return\nend","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Can you see the differences?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Let's train our new model. We need more iterations because of the stochasticity:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"SDDP.train(model; iteration_limit = 100)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Now simulate the policy. This time we do 100 replications because the policy is now stochastic instead of deterministic:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"n_replications = 100\nsimulations = SDDP.simulate(model, n_replications, [:x, :u, :r, :i]);\n","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"And let's plot the use of thermal generation in each replication:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"plot = Plots.plot(data[!, :demand]; label = \"Demand\", xlabel = \"Week\")\nfor simulation in simulations\n Plots.plot!(plot, [sim[:r] for sim in simulation], label = \"\")\nend\nplot","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Viewing an interpreting static plots like this is difficult, particularly as the number of simulations grows. SDDP.jl includes an interactive SpaghettiPlot that makes things easier:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"plot = SDDP.SpaghettiPlot(simulations)\nSDDP.add_spaghetti(plot; title = \"Storage\") do sim\n return sim[:x].out\nend\nSDDP.add_spaghetti(plot; title = \"Hydro\") do sim\n return sim[:u]\nend\nSDDP.add_spaghetti(plot; title = \"Inflow\") do sim\n return sim[:i]\nend\nSDDP.plot(\n plot,\n \"spaghetti_plot.html\";\n # We need this to build the documentation. Set to true if running locally.\n open = false,\n)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"info: Info\nIf you have trouble viewing the plot, you can open it in a new window.","category":"page"},{"location":"tutorial/example_reservoir/#Next-steps","page":"Example: deterministic to stochastic","title":"Next steps","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Take this model and modify it following the suggestions below. The SDDP.jl documentation has a range of similar examples and hints for how to achieve them.","category":"page"},{"location":"tutorial/example_reservoir/#Terminal-value-functions","page":"Example: deterministic to stochastic","title":"Terminal value functions","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The model ends with an empty reservoir. That isn't ideal for the following year. Can you modify the objective in the final stage to encourage ending the year with a full reservoir?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"You might write some variation of:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"if t == 52\n @variable(sp, terminal_cost_function >= 0)\n @constraint(sp, terminal_cost_function >= reservoir_initial - x.out)\n @stageobjective(sp, data[t, :cost] * r + terminal_cost_function)\nelse\n @stageobjective(sp, data[t, :cost] * r)\nend","category":"page"},{"location":"tutorial/example_reservoir/#Higher-fidelity-modeling","page":"Example: deterministic to stochastic","title":"Higher fidelity modeling","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Our model is very basic. There are many aspects that we could improve:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Instead of hard-coding a terminal value function, can you solve an infinite horizon model? What are the differences?\nCan you add a second reservoir to make a river chain?\nCan you modify the problem and data to use proper units, including a conversion between the volume of water flowing through the turbine and the electrical power output?\nCan you add random demand or cost data as well as inflows?","category":"page"},{"location":"tutorial/example_reservoir/#Algorithmic-considerations","page":"Example: deterministic to stochastic","title":"Algorithmic considerations","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The algorithm implemented by SDDP.jl has a number of tuneable parameters:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Try using a different lower bound. What happens if it is too low, or too high?\nWas our stopping rule correct? What happens if we use fewer or more iterations? What other stopping rules could you try?\nCan you add a risk measure to make the policy risk-averse?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"This page was generated using Literate.jl.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"CurrentModule = SDDP","category":"page"},{"location":"changelog/#Release-notes","page":"Release notes","title":"Release notes","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.","category":"page"},{"location":"changelog/#v1.6.2-(August-24,-2023)","page":"Release notes","title":"v1.6.2 (August 24, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"MSPFormat now detect and exploit stagewise independent lattices (#653)\nFixed set_optimizer for models read from file (#654)","category":"page"},{"location":"changelog/#Other","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed typo in pglib_opf.jl (#647)\nFixed documentation build and added color (#652)","category":"page"},{"location":"changelog/#v1.6.1-(July-20,-2023)","page":"Release notes","title":"v1.6.1 (July 20, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed-2","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed bugs in MSPFormat reader (#638) (#639)","category":"page"},{"location":"changelog/#Other-2","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Clarified OutOfSampleMonteCarlo docstring (#643)","category":"page"},{"location":"changelog/#v1.6.0-(July-3,-2023)","page":"Release notes","title":"v1.6.0 (July 3, 2023)","text":"","category":"section"},{"location":"changelog/#Added","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added RegularizedForwardPass (#624)\nAdded FirstStageStoppingRule (#634)","category":"page"},{"location":"changelog/#Other-3","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Removed an unbound type parameter (#632)\nFixed typo in docstring (#633)\nAdded Here-and-now and hazard-decision tutorial (#635)","category":"page"},{"location":"changelog/#v1.5.1-(June-30,-2023)","page":"Release notes","title":"v1.5.1 (June 30, 2023)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a \"good\" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.","category":"page"},{"location":"changelog/#Other-4","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed various typos in the documentation (#617)\nFixed printing test after changes in JuMP (#618)\nSet SimulationStoppingRule as the default stopping rule (#619)\nChanged the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)\nAdded example usage with Distributions.jl (@slwu89) (#622)\nRemoved the numerical issue @warn (#627)\nImproved the quality of docstrings (#630)","category":"page"},{"location":"changelog/#v1.5.0-(May-14,-2023)","page":"Release notes","title":"v1.5.0 (May 14, 2023)","text":"","category":"section"},{"location":"changelog/#Added-2","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)","category":"page"},{"location":"changelog/#Other-5","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated missing changelog entries (#608)\nRemoved global variables (#610)\nConverted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)\nFixed some typos (#613)","category":"page"},{"location":"changelog/#v1.4.0-(May-8,-2023)","page":"Release notes","title":"v1.4.0 (May 8, 2023)","text":"","category":"section"},{"location":"changelog/#Added-3","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulationStoppingRule (#598)\nAdded sampling_scheme argument to SDDP.write_to_file (#607)","category":"page"},{"location":"changelog/#Fixed-3","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed parsing of some MSPFormat files (#602) (#604)\nFixed printing in header (#605)","category":"page"},{"location":"changelog/#v1.3.0-(May-3,-2023)","page":"Release notes","title":"v1.3.0 (May 3, 2023)","text":"","category":"section"},{"location":"changelog/#Added-4","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added experimental support for SDDP.MSPFormat.read_from_file (#593)","category":"page"},{"location":"changelog/#Other-6","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated to StochOptFormat v0.3 (#600)","category":"page"},{"location":"changelog/#v1.2.1-(May-1,-2023)","page":"Release notes","title":"v1.2.1 (May 1, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed-4","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed log_every_seconds (#597)","category":"page"},{"location":"changelog/#v1.2.0-(May-1,-2023)","page":"Release notes","title":"v1.2.0 (May 1, 2023)","text":"","category":"section"},{"location":"changelog/#Added-5","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulatorSamplingScheme (#594)\nAdded log_every_seconds argument to SDDP.train (#595)","category":"page"},{"location":"changelog/#Other-7","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Tweaked how the log is printed (#588)\nUpdated to StochOptFormat v0.2 (#592)","category":"page"},{"location":"changelog/#v1.1.4-(April-10,-2023)","page":"Release notes","title":"v1.1.4 (April 10, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed-5","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Logs are now flushed every iteration (#584)","category":"page"},{"location":"changelog/#Other-8","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added docstrings to various functions (#581)\nMinor documentation updates (#580)\nClarified integrality documentation (#582)\nUpdated the README (#585)\nNumber of numerical issues is now printed to the log (#586)","category":"page"},{"location":"changelog/#v1.1.3-(April-2,-2023)","page":"Release notes","title":"v1.1.3 (April 2, 2023)","text":"","category":"section"},{"location":"changelog/#Other-9","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed typo in Example: deterministic to stochastic tutorial (#578)\nFixed typo in documentation of SDDP.simulate (#577)","category":"page"},{"location":"changelog/#v1.1.2-(March-18,-2023)","page":"Release notes","title":"v1.1.2 (March 18, 2023)","text":"","category":"section"},{"location":"changelog/#Other-10","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added Example: deterministic to stochastic tutorial (#572)","category":"page"},{"location":"changelog/#v1.1.1-(March-16,-2023)","page":"Release notes","title":"v1.1.1 (March 16, 2023)","text":"","category":"section"},{"location":"changelog/#Other-11","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed email in Project.toml\nAdded notebook to documentation tutorials (#571)","category":"page"},{"location":"changelog/#v1.1.0-(January-12,-2023)","page":"Release notes","title":"v1.1.0 (January 12, 2023)","text":"","category":"section"},{"location":"changelog/#Added-6","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added the node_name_parser argument to SDDP.write_cuts_to_file and added the option to skip nodes in SDDP.read_cuts_from_file (#565)","category":"page"},{"location":"changelog/#v1.0.0-(January-3,-2023)","page":"Release notes","title":"v1.0.0 (January 3, 2023)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Although we're bumping MAJOR version, this is a non-breaking release. Going forward:","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"New features will bump the MINOR version\nBug fixes, maintenance, and documentation updates will bump the PATCH version\nWe will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version\nUpdates to the compat bounds of package dependencies will bump the PATCH version.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.","category":"page"},{"location":"changelog/#Added-7","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added num_nodes argument to SDDP.UnicyclicGraph (#562)\nAdded support for passing an optimizer to SDDP.Asynchronous (#545)","category":"page"},{"location":"changelog/#Other-12","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated Plotting tools to use live plots (#563)\nAdded vale as a linter (#565)\nImproved documentation for initializing a parallel scheme (#566)","category":"page"},{"location":"changelog/#v0.4.9-(January-3,-2023)","page":"Release notes","title":"v0.4.9 (January 3, 2023)","text":"","category":"section"},{"location":"changelog/#Added-8","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.UnicyclicGraph (#556)","category":"page"},{"location":"changelog/#Other-13","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added tutorial on Markov Decision Processes (#556)\nAdded two-stage newsvendor tutorial (#557)\nRefactored the layout of the documentation (#554) (#555)\nUpdated copyright to 2023 (#558)\nFixed errors in the documentation (#561)","category":"page"},{"location":"changelog/#v0.4.8-(December-19,-2022)","page":"Release notes","title":"v0.4.8 (December 19, 2022)","text":"","category":"section"},{"location":"changelog/#Added-9","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added terminate_on_cycle option to SDDP.Historical (#549)\nAdded include_last_node option to SDDP.DefaultForwardPass (#547)","category":"page"},{"location":"changelog/#Fixed-6","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)","category":"page"},{"location":"changelog/#v0.4.7-(December-17,-2022)","page":"Release notes","title":"v0.4.7 (December 17, 2022)","text":"","category":"section"},{"location":"changelog/#Added-10","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)","category":"page"},{"location":"changelog/#Fixed-7","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Rethrow InterruptException when solver is interrupted (#534)\nFixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)\nFixed re-using the dashboard = true option between solves (#538)\nFixed bug when no @stageobjective is set (now defaults to 0.0) (#539)\nFixed errors thrown when invalid inputs are provided to add_objective_state (#540)","category":"page"},{"location":"changelog/#Other-14","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Drop support for Julia versions prior to 1.6 (#533)\nUpdated versions of dependencies (#522) (#533)\nSwitched to HiGHS in the documentation and tests (#533)\nAdded license headers (#519)\nFixed link in air conditioning example (#521) (Thanks @conema)\nClarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)\nAdded this change log (#536)\nCuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)","category":"page"},{"location":"changelog/#v0.4.6-(March-25,-2022)","page":"Release notes","title":"v0.4.6 (March 25, 2022)","text":"","category":"section"},{"location":"changelog/#Other-15","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated to JuMP v1.0 (#517)","category":"page"},{"location":"changelog/#v0.4.5-(March-9,-2022)","page":"Release notes","title":"v0.4.5 (March 9, 2022)","text":"","category":"section"},{"location":"changelog/#Fixed-8","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed issue with set_silent in a subproblem (#510)","category":"page"},{"location":"changelog/#Other-16","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)\nUpdate to JuMP v0.23 (#514)\nAdded auto-regressive tutorial (#507)","category":"page"},{"location":"changelog/#v0.4.4-(December-11,-2021)","page":"Release notes","title":"v0.4.4 (December 11, 2021)","text":"","category":"section"},{"location":"changelog/#Added-11","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added BanditDuality (#471)\nAdded benchmark scripts (#475) (#476) (#490)\nwrite_cuts_to_file now saves visited states (#468)","category":"page"},{"location":"changelog/#Fixed-9","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed BoundStalling in a deterministic policy (#470) (#474)\nFixed magnitude warning with zero coefficients (#483)","category":"page"},{"location":"changelog/#Other-17","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improvements to LagrangianDuality (#481) (#482) (#487)\nImprovements to StrengthenedConicDuality (#486)\nSwitch to functional form for the tests (#478)\nFixed typos (#472) (Thanks @vfdev-5)\nUpdate to JuMP v0.22 (#498)","category":"page"},{"location":"changelog/#v0.4.3-(August-31,-2021)","page":"Release notes","title":"v0.4.3 (August 31, 2021)","text":"","category":"section"},{"location":"changelog/#Added-12","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added biobjective solver (#462)\nAdded forward_pass_callback (#466)","category":"page"},{"location":"changelog/#Other-18","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update tutorials and documentation (#459) (#465)\nOrganize how paper materials are stored (#464)","category":"page"},{"location":"changelog/#v0.4.2-(August-24,-2021)","page":"Release notes","title":"v0.4.2 (August 24, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-10","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed a bug in Lagrangian duality (#457)","category":"page"},{"location":"changelog/#v0.4.1-(August-23,-2021)","page":"Release notes","title":"v0.4.1 (August 23, 2021)","text":"","category":"section"},{"location":"changelog/#Other-19","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Minor changes to our implementation of LagrangianDuality (#454) (#455)","category":"page"},{"location":"changelog/#v0.4.0-(August-17,-2021)","page":"Release notes","title":"v0.4.0 (August 17, 2021)","text":"","category":"section"},{"location":"changelog/#Breaking","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#499) (#453)","category":"page"},{"location":"changelog/#Other-20","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#447) (#448) (#450)","category":"page"},{"location":"changelog/#v0.3.17-(July-6,-2021)","page":"Release notes","title":"v0.3.17 (July 6, 2021)","text":"","category":"section"},{"location":"changelog/#Added-13","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.PSRSamplingScheme (#426)","category":"page"},{"location":"changelog/#Other-21","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Display more model attributes (#438)\nDocumentation improvements (#433) (#437) (#439)","category":"page"},{"location":"changelog/#v0.3.16-(June-17,-2021)","page":"Release notes","title":"v0.3.16 (June 17, 2021)","text":"","category":"section"},{"location":"changelog/#Added-14","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.RiskAdjustedForwardPass (#413)\nAllow SDDP.Historical to sample sequentially (#420)","category":"page"},{"location":"changelog/#Other-22","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update risk measure docstrings (#418)","category":"page"},{"location":"changelog/#v0.3.15-(June-1,-2021)","page":"Release notes","title":"v0.3.15 (June 1, 2021)","text":"","category":"section"},{"location":"changelog/#Added-15","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.StoppingChain","category":"page"},{"location":"changelog/#Fixed-11","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed scoping bug in SDDP.@stageobjective (#407)\nFixed a bug when the initial point is infeasible (#411)\nSet subproblems to silent by default (#409)","category":"page"},{"location":"changelog/#Other-23","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Add JuliaFormatter (#412)\nDocumentation improvements (#406) (#408)","category":"page"},{"location":"changelog/#v0.3.14-(March-30,-2021)","page":"Release notes","title":"v0.3.14 (March 30, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-12","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed O(N^2) behavior in get_same_children (#393)","category":"page"},{"location":"changelog/#v0.3.13-(March-27,-2021)","page":"Release notes","title":"v0.3.13 (March 27, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-13","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed bug in print.jl\nFixed compat of Reexport (#388)","category":"page"},{"location":"changelog/#v0.3.12-(March-22,-2021)","page":"Release notes","title":"v0.3.12 (March 22, 2021)","text":"","category":"section"},{"location":"changelog/#Added-16","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added problem statistics to header (#385) (#386)","category":"page"},{"location":"changelog/#Fixed-14","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed subtypes in visualization (#384)","category":"page"},{"location":"changelog/#v0.3.11-(March-22,-2021)","page":"Release notes","title":"v0.3.11 (March 22, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-15","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed constructor in direct mode (#383)","category":"page"},{"location":"changelog/#Other-24","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fix documentation (#379)","category":"page"},{"location":"changelog/#v0.3.10-(February-23,-2021)","page":"Release notes","title":"v0.3.10 (February 23, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-16","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed seriescolor in publication plot (#376)","category":"page"},{"location":"changelog/#v0.3.9-(February-20,-2021)","page":"Release notes","title":"v0.3.9 (February 20, 2021)","text":"","category":"section"},{"location":"changelog/#Added-17","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Add option to simulate with different incoming state (#372)\nAdded warning for cuts with high dynamic range (#373)","category":"page"},{"location":"changelog/#Fixed-17","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed seriesalpha in publication plot (#375)","category":"page"},{"location":"changelog/#v0.3.8-(January-19,-2021)","page":"Release notes","title":"v0.3.8 (January 19, 2021)","text":"","category":"section"},{"location":"changelog/#Other-25","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#367) (#369) (#370)","category":"page"},{"location":"changelog/#v0.3.7-(January-8,-2021)","page":"Release notes","title":"v0.3.7 (January 8, 2021)","text":"","category":"section"},{"location":"changelog/#Other-26","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#362) (#363) (#365) (#366)\nBump copyright (#364)","category":"page"},{"location":"changelog/#v0.3.6-(December-17,-2020)","page":"Release notes","title":"v0.3.6 (December 17, 2020)","text":"","category":"section"},{"location":"changelog/#Other-27","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fix typos (#358)\nCollapse navigation bar in docs (#359)\nUpdate TagBot.yml (#361)","category":"page"},{"location":"changelog/#v0.3.5-(November-18,-2020)","page":"Release notes","title":"v0.3.5 (November 18, 2020)","text":"","category":"section"},{"location":"changelog/#Other-28","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update citations (#348)\nSwitch to GitHub actions (#355)","category":"page"},{"location":"changelog/#v0.3.4-(August-25,-2020)","page":"Release notes","title":"v0.3.4 (August 25, 2020)","text":"","category":"section"},{"location":"changelog/#Added-18","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added non-uniform distributionally robust risk measure (#328)\nAdded numerical recovery functions (#330)\nAdded experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)\nAdded entropic risk measure (#347)","category":"page"},{"location":"changelog/#Other-29","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#327) (#333) (#339) (#340)","category":"page"},{"location":"changelog/#v0.3.3-(June-19,-2020)","page":"Release notes","title":"v0.3.3 (June 19, 2020)","text":"","category":"section"},{"location":"changelog/#Added-19","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added asynchronous support for price and belief states (#325)\nAdded ForwardPass plug-in system (#320)","category":"page"},{"location":"changelog/#Fixed-18","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fix check for probabilities in Markovian graph (#322)","category":"page"},{"location":"changelog/#v0.3.2-(April-6,-2020)","page":"Release notes","title":"v0.3.2 (April 6, 2020)","text":"","category":"section"},{"location":"changelog/#Added-20","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added log_frequency argument to SDDP.train (#307)","category":"page"},{"location":"changelog/#Other-30","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improve error message in deterministic equivalent (#312)\nUpdate to RecipesBase 1.0 (#313)","category":"page"},{"location":"changelog/#v0.3.1-(February-26,-2020)","page":"Release notes","title":"v0.3.1 (February 26, 2020)","text":"","category":"section"},{"location":"changelog/#Fixed-19","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed filename in integrality_handlers.jl (#304)","category":"page"},{"location":"changelog/#v0.3.0-(February-20,-2020)","page":"Release notes","title":"v0.3.0 (February 20, 2020)","text":"","category":"section"},{"location":"changelog/#Breaking-2","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Breaking changes to update to JuMP v0.21 (#300).","category":"page"},{"location":"changelog/#v0.2.4-(February-7,-2020)","page":"Release notes","title":"v0.2.4 (February 7, 2020)","text":"","category":"section"},{"location":"changelog/#Added-21","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added a counter for the number of total subproblem solves (#301)","category":"page"},{"location":"changelog/#Other-31","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update formatter (#298)\nAdded tests (#299)","category":"page"},{"location":"changelog/#v0.2.3-(January-24,-2020)","page":"Release notes","title":"v0.2.3 (January 24, 2020)","text":"","category":"section"},{"location":"changelog/#Added-22","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added support for convex risk measures (#294)","category":"page"},{"location":"changelog/#Fixed-20","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed bug when subproblem is infeasible (#296)\nFixed bug in deterministic equivalent (#297)","category":"page"},{"location":"changelog/#Other-32","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added example from IJOC paper (#293)","category":"page"},{"location":"changelog/#v0.2.2-(January-10,-2020)","page":"Release notes","title":"v0.2.2 (January 10, 2020)","text":"","category":"section"},{"location":"changelog/#Fixed-21","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed flakey time limit in tests (#291)","category":"page"},{"location":"changelog/#Other-33","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Removed MathOptFormat.jl (#289)\nUpdate copyright (#290)","category":"page"},{"location":"changelog/#v0.2.1-(December-19,-2019)","page":"Release notes","title":"v0.2.1 (December 19, 2019)","text":"","category":"section"},{"location":"changelog/#Added-23","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added support for approximating a Markov lattice (#282) (#285)\nAdd tools for visualizing the value function (#272) (#286)\nWrite .mof.json files on error (#284)","category":"page"},{"location":"changelog/#Other-34","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improve documentation (#281) (#283)\nUpdate tests for Julia 1.3 (#287)","category":"page"},{"location":"changelog/#v0.2.0-(December-16,-2019)","page":"Release notes","title":"v0.2.0 (December 16, 2019)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.","category":"page"},{"location":"changelog/#Added-24","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added asynchronous parallel implementation (#277)\nAdded roll-out algorithm for cyclic graphs (#279)","category":"page"},{"location":"changelog/#Other-35","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improved error messages in PolicyGraph (#271)\nAdded JuliaFormatter (#273) (#276)\nFixed compat bounds (#274) (#278)\nAdded documentation for simulating non-standard graphs (#280)","category":"page"},{"location":"changelog/#v0.1.0-(October-17,-2019)","page":"Release notes","title":"v0.1.0 (October 17, 2019)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.","category":"page"},{"location":"changelog/#v0.0.1-(April-18,-2018)","page":"Release notes","title":"v0.0.1 (April 18, 2018)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"EditURL = \"example_newsvendor.jl\"","category":"page"},{"location":"tutorial/example_newsvendor/#Example:-Two-stage-Newsvendor","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"","category":"section"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"This example is based on the classical newsvendor problem.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"using SDDP\nimport HiGHS\nimport Plots","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"First, we need some discretized distribution of demand. For simplicity, we're going to sample 10 points from the uniform [0, 1) distribution three times and add them together. This is a rough approximation of a normal distribution.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Ω = rand(10) .+ rand(10) .+ rand(10)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"We also need some price data. We assume the agent can buy a newspaper for $1 and sell it for $1.20.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"buy_price, sales_price = 1.0, 1.2","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Now we can formulate and train a policy for the two-stage newsvendor problem:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"model = SDDP.LinearPolicyGraph(\n stages = 2,\n sense = :Max,\n upper_bound = maximum(Ω) * sales_price,\n optimizer = HiGHS.Optimizer,\n) do subproblem, stage\n @variable(subproblem, inventory >= 0, SDDP.State, initial_value = 0)\n if stage == 1\n @variable(subproblem, buy >= 0)\n @constraint(subproblem, inventory.out == inventory.in + buy)\n @stageobjective(subproblem, -buy_price * buy)\n else\n @variable(subproblem, sell >= 0)\n @constraint(subproblem, sell <= inventory.in)\n SDDP.parameterize(subproblem, Ω) do ω\n return JuMP.set_upper_bound(sell, ω)\n end\n @stageobjective(subproblem, sales_price * sell)\n end\nend\n\nSDDP.train(model)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"To check the first-stage buy decision, we need to obtain a decision rule for the first-stage node 1:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"first_stage_rule = SDDP.DecisionRule(model, node = 1)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Then we can evaluate it, passing in a starting point for the incoming state:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solution = SDDP.evaluate(\n first_stage_rule;\n incoming_state = Dict(:inventory => 0.0),\n controls_to_record = [:buy],\n)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"The optimal value of the buy variable is stored here:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solution.controls[:buy]","category":"page"},{"location":"tutorial/example_newsvendor/#Introducing-risk","page":"Example: Two-stage Newsvendor","title":"Introducing risk","text":"","category":"section"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"The solution of a single newsvendor problem offers little insight about how a decision-maker should act. In particular, they may be averse to bad outcomes, such as when they purchase a larger number of newspapers only for there to be little demand.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"We can explore how the optimal decision changes with risk by creating a function:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"function solve_risk_averse_newsvendor(Ω, risk_measure)\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n sense = :Max,\n upper_bound = maximum(Ω) * sales_price,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n @variable(subproblem, inventory >= 0, SDDP.State, initial_value = 0)\n if stage == 1\n @variable(subproblem, buy >= 0)\n @constraint(subproblem, inventory.out == inventory.in + buy)\n @stageobjective(subproblem, -buy_price * buy)\n else\n @variable(subproblem, sell >= 0)\n @constraint(subproblem, sell <= inventory.in)\n SDDP.parameterize(subproblem, Ω) do ω\n return JuMP.set_upper_bound(sell, ω)\n end\n @stageobjective(subproblem, sales_price * sell)\n end\n end\n SDDP.train(model; risk_measure = risk_measure, print_level = 0)\n first_stage_rule = SDDP.DecisionRule(model, node = 1)\n solution = SDDP.evaluate(\n first_stage_rule;\n incoming_state = Dict(:inventory => 0.0),\n controls_to_record = [:buy],\n )\n return solution.controls[:buy]\nend","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Now we can see how many units a risk-neutral decision maker would order:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solve_risk_averse_newsvendor(Ω, SDDP.Expectation())","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"as well as a decision-maker who cares only about the worst-case outcome:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solve_risk_averse_newsvendor(Ω, SDDP.WorstCase())","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"In general, the decision-maker will be somewhere between the two extremes. The SDDP.Entropic risk measure is a risk measure that has a single parameter that lets us explore the space of policies between the two extremes. When the parameter is small, the measure acts like SDDP.Expectation, and when it is large, it acts like SDDP.WorstCase.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Here is what we get if we solve our problem multiple times for different values of the risk aversion parameter gamma:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Γ = [10^i for i in -3:0.2:3]\nbuy = [solve_risk_averse_newsvendor(Ω, SDDP.Entropic(γ)) for γ in Γ]\nPlots.plot(\n Γ,\n buy;\n xaxis = :log,\n xlabel = \"Risk aversion parameter γ\",\n ylabel = \"First-stage buy decision\",\n legend = false,\n)","category":"page"},{"location":"tutorial/example_newsvendor/#Things-to-try","page":"Example: Two-stage Newsvendor","title":"Things to try","text":"","category":"section"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"There are a number of things you can try next:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Experiment with different buy and sales prices\nExperiment with different distributions of demand\nExplore how the optimal policy changes if you use a different risk measure\nWhat happens if you can only buy and sell integer numbers of newspapers? Try this by adding Int to the variable definitions: @variable(subproblem, buy >= 0, Int)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"This page was generated using Literate.jl.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"EditURL = \"theory_intro.jl\"","category":"page"},{"location":"explanation/theory_intro/#Introductory-theory","page":"Introductory theory","title":"Introductory theory","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"(Image: ) (Image: )","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThis tutorial is aimed at advanced undergraduates or early-stage graduate students. You don't need prior exposure to stochastic programming! (Indeed, it may be better if you don't, because our approach is non-standard in the literature.)This tutorial is also a living document. If parts are unclear, please open an issue so it can be improved!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This tutorial will teach you how the stochastic dual dynamic programming algorithm works by implementing a simplified version of the algorithm.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Our implementation is very much a \"vanilla\" version of SDDP; it doesn't have (m)any fancy computational tricks (e.g., the ones included in SDDP.jl) that you need to code a performant or stable version that will work on realistic instances. However, our simplified implementation will work on arbitrary policy graphs, including those with cycles such as infinite horizon problems!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Packages","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This tutorial uses the following packages. For clarity, we call import PackageName so that we must prefix PackageName. to all functions and structs provided by that package. Everything not prefixed is either part of base Julia, or we wrote it.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"import ForwardDiff\nimport HiGHS\nimport JuMP\nimport Statistics","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"tip: Tip\nYou can follow along by installing the above packages, and copy-pasting the code we will write into a Julia REPL. Alternatively, you can download the Julia .jl file which created this tutorial from GitHub.","category":"page"},{"location":"explanation/theory_intro/#Preliminaries:-background-theory","page":"Introductory theory","title":"Preliminaries: background theory","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Start this tutorial by reading An introduction to SDDP.jl, which introduces the necessary notation and vocabulary that we need for this tutorial.","category":"page"},{"location":"explanation/theory_intro/#Preliminaries:-Kelley's-cutting-plane-algorithm","page":"Introductory theory","title":"Preliminaries: Kelley's cutting plane algorithm","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Kelley's cutting plane algorithm is an iterative method for minimizing convex functions. Given a convex function f(x), Kelley's constructs an under-approximation of the function at the minimum by a set of first-order Taylor series approximations (called cuts) constructed at a set of points k = 1ldotsK:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"beginaligned\nf^K = minlimits_theta in mathbbR x in mathbbR^N theta\n theta ge f(x_k) + fracddxf(x_k)^top (x - x_k)quad k=1ldotsK\n theta ge M\nendaligned","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"where M is a sufficiently large negative number that is a lower bound for f over the domain of x.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Kelley's cutting plane algorithm is a structured way of choosing points x_k to visit, so that as more cuts are added:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"lim_K rightarrow infty f^K = minlimits_x in mathbbR^N f(x)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"However, before we introduce the algorithm, we need to introduce some bounds.","category":"page"},{"location":"explanation/theory_intro/#Bounds","page":"Introductory theory","title":"Bounds","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"By convexity, f^K le f(x) for all x. Thus, if x^* is a minimizer of f, then at any point in time we can construct a lower bound for f(x^*) by solving f^K.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Moreover, we can use the primal solutions x_k^* returned by solving f^k to evaluate f(x_k^*) to generate an upper bound.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Therefore, f^K le f(x^*) le minlimits_k=1ldotsK f(x_k^*).","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"When the lower bound is sufficiently close to the upper bound, we can terminate the algorithm and declare that we have found an solution that is close to optimal.","category":"page"},{"location":"explanation/theory_intro/#Implementation","page":"Introductory theory","title":"Implementation","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Here is pseudo-code fo the Kelley algorithm:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Take as input a convex function f(x) and a iteration limit K_max. Set K = 0, and initialize f^K. Set lb = -infty and ub = infty.\nSolve f^K to obtain a candidate solution x_K+1.\nUpdate lb = f^K and ub = minub f(x_K+1).\nAdd a cut theta ge f(x_K+1) + fracddxfleft(x_K+1right)^top (x - x_K+1) to form f^K+1.\nIncrement K.\nIf K = K_max or ub - lb epsilon, STOP, otherwise, go to step 2.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"And here's a complete implementation:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function kelleys_cutting_plane(\n # The function to be minimized.\n f::Function,\n # The gradient of `f`. By default, we use automatic differentiation to\n # compute the gradient of f so the user doesn't have to!\n dfdx::Function = x -> ForwardDiff.gradient(f, x);\n # The number of arguments to `f`.\n input_dimension::Int,\n # A lower bound for the function `f` over its domain.\n lower_bound::Float64,\n # The number of iterations to run Kelley's algorithm for before stopping.\n iteration_limit::Int,\n # The absolute tolerance ϵ to use for convergence.\n tolerance::Float64 = 1e-6,\n)\n # Step (1):\n K = 0\n model = JuMP.Model(HiGHS.Optimizer)\n JuMP.set_silent(model)\n JuMP.@variable(model, θ >= lower_bound)\n JuMP.@variable(model, x[1:input_dimension])\n JuMP.@objective(model, Min, θ)\n x_k = fill(NaN, input_dimension)\n lower_bound, upper_bound = -Inf, Inf\n while true\n # Step (2):\n JuMP.optimize!(model)\n x_k .= JuMP.value.(x)\n # Step (3):\n lower_bound = JuMP.objective_value(model)\n upper_bound = min(upper_bound, f(x_k))\n println(\"K = $K : $(lower_bound) <= f(x*) <= $(upper_bound)\")\n # Step (4):\n JuMP.@constraint(model, θ >= f(x_k) + dfdx(x_k)' * (x .- x_k))\n # Step (5):\n K = K + 1\n # Step (6):\n if K == iteration_limit\n println(\"-- Termination status: iteration limit --\")\n break\n elseif abs(upper_bound - lower_bound) < tolerance\n println(\"-- Termination status: converged --\")\n break\n end\n end\n println(\"Found solution: x_K = \", x_k)\n return\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Let's run our algorithm to see what happens:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"kelleys_cutting_plane(\n input_dimension = 2,\n lower_bound = 0.0,\n iteration_limit = 20,\n) do x\n return (x[1] - 1)^2 + (x[2] + 2)^2 + 1.0\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"warning: Warning\nIt's hard to choose a valid lower bound! If you choose one too loose, the algorithm can take a long time to converge. However, if you choose one so tight that M f(x^*), then you can obtain a suboptimal solution. For a deeper discussion of the implications for SDDP.jl, see Choosing an initial bound.","category":"page"},{"location":"explanation/theory_intro/#Preliminaries:-approximating-the-cost-to-go-term","page":"Introductory theory","title":"Preliminaries: approximating the cost-to-go term","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"In the background theory section, we discussed how you could formulate an optimal policy to a multistage stochastic program using the dynamic programming recursion:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"where our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution. Moreover, we alluded to the fact that the cost-to-go term (the nasty recursive expectation) makes this problem intractable to solve.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"However, if, excluding the cost-to-go term (i.e., the SP formulation), V_i(x omega) can be formulated as a linear program (this also works for convex programs, but the math is more involved), then we can make some progress by noticing that x only appears as a right-hand side term of the fishing constraint barx = x.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Therefore, V_i(x cdot) is convex with respect to x for fixed omega. Moreover, if we implement the constraint barx = x by setting the lower- and upper bounds of barx to x, then the reduced cost of the decision variable barx is a subgradient of the function V_i with respect to x! (This is the algorithmic simplification that leads us to add barx and the fishing constraint barx = x.)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"tip: Tip\nThe subproblem can have binary and integer variables, but you'll need to use Lagrangian duality to compute a subgradient!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Stochastic dual dynamic programming converts this problem into a tractable form by applying Kelley's cutting plane algorithm to the V_j functions in the cost-to-go term:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbE_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)rightquad k=1ldotsK \n theta ge M\nendaligned","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"All we need now is a way of generating these cutting planes in an iterative manner. Before we get to that though, let's start writing some code.","category":"page"},{"location":"explanation/theory_intro/#Implementation:-modeling","page":"Introductory theory","title":"Implementation: modeling","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Let's make a start by defining the problem structure. Like SDDP.jl, we need a few things:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"A description of the structure of the policy graph: how many nodes there are, and the arcs linking the nodes together with their corresponding probabilities.\nA JuMP model for each node in the policy graph.\nA way to identify the incoming and outgoing state variables of each node.\nA description of the random variable, as well as a function that we can call that will modify the JuMP model to reflect the realization of the random variable.\nA decision variable to act as the approximated cost-to-go term.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"warning: Warning\nIn the interests of brevity, there is minimal error checking. Think about all the different ways you could break the code!","category":"page"},{"location":"explanation/theory_intro/#Structs","page":"Introductory theory","title":"Structs","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The first struct we are going to use is a State struct that will wrap an incoming and outgoing state variable:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct State\n in::JuMP.VariableRef\n out::JuMP.VariableRef\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Next, we need a struct to wrap all of the uncertainty within a node:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct Uncertainty\n parameterize::Function\n Ω::Vector{Any}\n P::Vector{Float64}\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"parameterize is a function which takes a realization of the random variable omegainOmega and updates the subproblem accordingly. The finite discrete random variable is defined by the vectors Ω and P, so that the random variable takes the value Ω[i] with probability P[i]. As such, P should sum to 1. (We don't check this here, but we should; we do in SDDP.jl.)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Now we have two building blocks, we can declare the structure of each node:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct Node\n subproblem::JuMP.Model\n states::Dict{Symbol,State}\n uncertainty::Uncertainty\n cost_to_go::JuMP.VariableRef\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"subproblem is going to be the JuMP model that we build at each node.\nstates is a dictionary that maps a symbolic name of a state variable to a State object wrapping the incoming and outgoing state variables in subproblem.\nuncertainty is an Uncertainty object described above.\ncost_to_go is a JuMP variable that approximates the cost-to-go term.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Finally, we define a simplified policy graph as follows:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct PolicyGraph\n nodes::Vector{Node}\n arcs::Vector{Dict{Int,Float64}}\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"There is a vector of nodes, as well as a data structure for the arcs. arcs is a vector of dictionaries, where arcs[i][j] gives the probability of transitioning from node i to node j, if an arc exists.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"To simplify things, we will assume that the root node transitions to node 1 with probability 1, and there are no other incoming arcs to node 1. Notably, we can still define cyclic graphs though!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"We also define a nice show method so that we don't accidentally print a large amount of information to the screen when creating a model:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function Base.show(io::IO, model::PolicyGraph)\n println(io, \"A policy graph with $(length(model.nodes)) nodes\")\n println(io, \"Arcs:\")\n for (from, arcs) in enumerate(model.arcs)\n for (to, probability) in arcs\n println(io, \" $(from) => $(to) w.p. $(probability)\")\n end\n end\n return\nend","category":"page"},{"location":"explanation/theory_intro/#Functions","page":"Introductory theory","title":"Functions","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Now we have some basic types, let's implement some functions so that the user can create a model.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"First, we need an example of a function that the user will provide. Like SDDP.jl, this takes an empty subproblem, and a node index, in this case t::Int. You could change this function to change the model, or define a new one later in the code.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"We're going to copy the example from An introduction to SDDP.jl, with some minor adjustments for the fact we don't have many of the bells and whistles of SDDP.jl. You can probably see how some of the SDDP.jl functionality like @stageobjective and SDDP.parameterize help smooth some of the usability issues like needing to construct both the incoming and outgoing state variables, or needing to explicitly declare return states, uncertainty.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function subproblem_builder(subproblem::JuMP.Model, t::Int)\n # Define the state variables. Note how we fix the incoming state to the\n # initial state variable regardless of `t`! This isn't strictly necessary;\n # it only matters that we do it for the first node.\n JuMP.@variable(subproblem, volume_in == 200)\n JuMP.@variable(subproblem, 0 <= volume_out <= 200)\n states = Dict(:volume => State(volume_in, volume_out))\n # Define the control variables.\n JuMP.@variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n JuMP.@constraints(\n subproblem,\n begin\n volume_out == volume_in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n # Define the objective for each stage `t`. Note that we can use `t` as an\n # index for t = 1, 2, 3.\n fuel_cost = [50.0, 100.0, 150.0]\n JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)\n # Finally, we define the uncertainty object. Because this is a simplified\n # implementation of SDDP, we shall politely ask the user to only modify the\n # constraints, and not the objective function! (Not that it changes the\n # algorithm, we just have to add more information to keep track of things.)\n uncertainty = Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω\n return JuMP.fix(inflow, ω)\n end\n return states, uncertainty\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The next function we need to define is the analog of SDDP.PolicyGraph. It should be pretty readable.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function PolicyGraph(\n subproblem_builder::Function;\n graph::Vector{Dict{Int,Float64}},\n lower_bound::Float64,\n optimizer,\n)\n nodes = Node[]\n for t in 1:length(graph)\n # Create a model.\n model = JuMP.Model(optimizer)\n JuMP.set_silent(model)\n # Use the provided function to build out each subproblem. The user's\n # function returns a dictionary mapping `Symbol`s to `State` objects,\n # and an `Uncertainty` object.\n states, uncertainty = subproblem_builder(model, t)\n # Now add the cost-to-go terms:\n JuMP.@variable(model, cost_to_go >= lower_bound)\n obj = JuMP.objective_function(model)\n JuMP.@objective(model, Min, obj + cost_to_go)\n # If there are no outgoing arcs, the cost-to-go is 0.0.\n if length(graph[t]) == 0\n JuMP.fix(cost_to_go, 0.0; force = true)\n end\n push!(nodes, Node(model, states, uncertainty, cost_to_go))\n end\n return PolicyGraph(nodes, graph)\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Then, we can create a model using the subproblem_builder function we defined earlier:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"model = PolicyGraph(\n subproblem_builder;\n graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict{Int,Float64}()],\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"explanation/theory_intro/#Implementation:-helpful-samplers","page":"Introductory theory","title":"Implementation: helpful samplers","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Before we get properly coding the solution algorithm, it's also going to be useful to have a function that samples a realization of the random variable defined by Ω and P.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function sample_uncertainty(uncertainty::Uncertainty)\n r = rand()\n for (p, ω) in zip(uncertainty.P, uncertainty.Ω)\n r -= p\n if r < 0.0\n return ω\n end\n end\n return error(\"We should never get here because P should sum to 1.0.\")\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nrand() samples a uniform random variable in [0, 1).","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"For example:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"for i in 1:3\n println(\"ω = \", sample_uncertainty(model.nodes[1].uncertainty))\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"It's also going to be useful to define a function that generates a random walk through the nodes of the graph:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function sample_next_node(model::PolicyGraph, current::Int)\n if length(model.arcs[current]) == 0\n # No outgoing arcs!\n return nothing\n else\n r = rand()\n for (to, probability) in model.arcs[current]\n r -= probability\n if r < 0.0\n return to\n end\n end\n # We looped through the outgoing arcs and still have probability left\n # over! This means we've hit an implicit \"zero\" node.\n return nothing\n end\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"For example:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"for i in 1:3\n # We use `repr` to print the next node, because `sample_next_node` can\n # return `nothing`.\n println(\"Next node from $(i) = \", repr(sample_next_node(model, i)))\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This is a little boring, because our graph is simple. However, more complicated graphs will generate more interesting trajectories!","category":"page"},{"location":"explanation/theory_intro/#Implementation:-the-forward-pass","page":"Introductory theory","title":"Implementation: the forward pass","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Recall that, after approximating the cost-to-go term, we need a way of generating the cuts. As the first step, we need a way of generating candidate solutions x_k^prime. However, unlike the Kelley's example, our functions V_j^k(x^prime varphi) need two inputs: an outgoing state variable and a realization of the random variable.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"One way of getting these inputs is just to pick a random (feasible) value. However, in doing so, we might pick outgoing state variables that we will never see in practice, or we might infrequently pick outgoing state variables that we will often see in practice. Therefore, a better way of generating the inputs is to use a simulation of the policy, which we call the forward pass.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The forward pass walks the policy graph from start to end, transitioning randomly along the arcs. At each node, it observes a realization of the random variable and solves the approximated subproblem to generate a candidate outgoing state variable x_k^prime. The outgoing state variable is passed as the incoming state variable to the next node in the trajectory.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function forward_pass(model::PolicyGraph, io::IO = stdout)\n println(io, \"| Forward Pass\")\n # First, get the value of the state at the root node (e.g., x_R).\n incoming_state =\n Dict(k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states)\n # `simulation_cost` is an accumlator that is going to sum the stage-costs\n # incurred over the forward pass.\n simulation_cost = 0.0\n # We also need to record the nodes visited and resultant outgoing state\n # variables so we can pass them to the backward pass.\n trajectory = Tuple{Int,Dict{Symbol,Float64}}[]\n # Now's the meat of the forward pass: beginning at the first node:\n t = 1\n while t !== nothing\n node = model.nodes[t]\n println(io, \"| | Visiting node $(t)\")\n # Sample the uncertainty:\n ω = sample_uncertainty(node.uncertainty)\n println(io, \"| | | ω = \", ω)\n # Parameterizing the subproblem using the user-provided function:\n node.uncertainty.parameterize(ω)\n println(io, \"| | | x = \", incoming_state)\n # Update the incoming state variable:\n for (k, v) in incoming_state\n JuMP.fix(node.states[k].in, v; force = true)\n end\n # Now solve the subproblem and check we found an optimal solution:\n JuMP.optimize!(node.subproblem)\n if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL\n error(\"Something went terribly wrong!\")\n end\n # Compute the outgoing state variables:\n outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)\n println(io, \"| | | x′ = \", outgoing_state)\n # We also need to compute the stage cost to add to our\n # `simulation_cost` accumulator:\n stage_cost =\n JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)\n simulation_cost += stage_cost\n println(io, \"| | | C(x, u, ω) = \", stage_cost)\n # As a penultimate step, set the outgoing state of stage t and the\n # incoming state of stage t + 1, and add the node to the trajectory.\n incoming_state = outgoing_state\n push!(trajectory, (t, outgoing_state))\n # Finally, sample a new node to step to. If `t === nothing`, the\n # `while` loop will break.\n t = sample_next_node(model, t)\n end\n return trajectory, simulation_cost\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Let's take a look at one forward pass:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"trajectory, simulation_cost = forward_pass(model);\n","category":"page"},{"location":"explanation/theory_intro/#Implementation:-the-backward-pass","page":"Introductory theory","title":"Implementation: the backward pass","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"From the forward pass, we obtained a vector of nodes visited and their corresponding outgoing state variables. Now we need to refine the approximation for each node at the candidate solution for the outgoing state variable. That is, we need to add a new cut:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"theta ge mathbbE_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"or alternatively:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"theta ge sumlimits_j in i^+ sumlimits_varphi in Omega_j p_ij p_varphileftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"It doesn't matter what order we visit the nodes to generate these cuts for. For example, we could compute them all in parallel, using the current approximations of V^K_i.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"However, we can be smarter than that.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"If we traverse the list of nodes visited in the forward pass in reverse, then we come to refine the i^th node in the trajectory, we will already have improved the approximation of the (i+1)^th node in the trajectory as well! Therefore, our refinement of the i^th node will be better than if we improved node i first, and then refined node (i+1).","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Because we walk the nodes in reverse, we call this the backward pass.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"info: Info\nIf you're into deep learning, you could view this as the equivalent of back-propagation: the forward pass pushes primal information through the graph (outgoing state variables), and the backward pass pulls dual information (cuts) back through the graph to improve our decisions on the next forward pass.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function backward_pass(\n model::PolicyGraph,\n trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},\n io::IO = stdout,\n)\n println(io, \"| Backward pass\")\n # For the backward pass, we walk back up the nodes.\n for i in reverse(1:length(trajectory))\n index, outgoing_states = trajectory[i]\n node = model.nodes[index]\n println(io, \"| | Visiting node $(index)\")\n if length(model.arcs[index]) == 0\n # If there are no children, the cost-to-go is 0.\n println(io, \"| | | Skipping node because the cost-to-go is 0\")\n continue\n end\n # Create an empty affine expression that we will use to build up the\n # right-hand side of the cut expression.\n cut_expression = JuMP.AffExpr(0.0)\n # For each node j ∈ i⁺\n for (j, P_ij) in model.arcs[index]\n next_node = model.nodes[j]\n # Set the incoming state variables of node j to the outgoing state\n # variables of node i\n for (k, v) in outgoing_states\n JuMP.fix(next_node.states[k].in, v; force = true)\n end\n # Then for each realization of φ ∈ Ωⱼ\n for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)\n # Setup and solve for the realization of φ\n println(io, \"| | | Solving φ = \", φ)\n next_node.uncertainty.parameterize(φ)\n JuMP.optimize!(next_node.subproblem)\n # Then prepare the cut `P_ij * pφ * [V + dVdxᵀ(x - x_k)]``\n V = JuMP.objective_value(next_node.subproblem)\n println(io, \"| | | | V = \", V)\n dVdx = Dict(\n k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states\n )\n println(io, \"| | | | dVdx′ = \", dVdx)\n cut_expression += JuMP.@expression(\n node.subproblem,\n P_ij *\n pφ *\n (\n V + sum(\n dVdx[k] * (x.out - outgoing_states[k]) for\n (k, x) in node.states\n )\n ),\n )\n end\n end\n # And then refine the cost-to-go variable by adding the cut:\n c = JuMP.@constraint(node.subproblem, node.cost_to_go >= cut_expression)\n println(io, \"| | | Adding cut : \", c)\n end\n return nothing\nend","category":"page"},{"location":"explanation/theory_intro/#Implementation:-bounds","page":"Introductory theory","title":"Implementation: bounds","text":"","category":"section"},{"location":"explanation/theory_intro/#Lower-bounds","page":"Introductory theory","title":"Lower bounds","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Recall from Kelley's that we can obtain a lower bound for f(x^*) be evaluating f^K. The analogous lower bound for a multistage stochastic program is:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"mathbbE_i in R^+ omega in Omega_iV_i^K(x_R omega) le min_pi mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Here's how we compute the lower bound:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function lower_bound(model::PolicyGraph)\n node = model.nodes[1]\n bound = 0.0\n for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)\n node.uncertainty.parameterize(ω)\n JuMP.optimize!(node.subproblem)\n bound += p * JuMP.objective_value(node.subproblem)\n end\n return bound\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThe implementation is simplified because we assumed that there is only one arc from the root node, and that it pointed to the first node in the vector.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Because we haven't trained a policy yet, the lower bound is going to be very bad:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"lower_bound(model)","category":"page"},{"location":"explanation/theory_intro/#Upper-bounds","page":"Introductory theory","title":"Upper bounds","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"With Kelley's algorithm, we could easily construct an upper bound by evaluating f(x_K). However, it is almost always intractable to evaluate an upper bound for multistage stochastic programs due to the large number of nodes and the nested expectations. Instead, we can perform a Monte Carlo simulation of the policy to build a statistical estimate for the value of mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega), where pi is the policy defined by the current approximations V^K_i.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function upper_bound(model::PolicyGraph; replications::Int)\n # Pipe the output to `devnull` so we don't print too much!\n simulations = [forward_pass(model, devnull) for i in 1:replications]\n z = [s[2] for s in simulations]\n μ = Statistics.mean(z)\n tσ = 1.96 * Statistics.std(z) / sqrt(replications)\n return μ, tσ\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThe width of the confidence interval is incorrect if there are cycles in the graph, because the distribution of simulation costs z is not symmetric. The mean is correct, however.","category":"page"},{"location":"explanation/theory_intro/#Termination-criteria","page":"Introductory theory","title":"Termination criteria","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"In Kelley's algorithm, the upper bound was deterministic. Therefore, we could terminate the algorithm when the lower bound was sufficiently close to the upper bound. However, our upper bound for SDDP is not deterministic; it is a confidence interval!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Some people suggest terminating SDDP when the lower bound is contained within the confidence interval. However, this is a poor choice because it is too easy to generate a false positive. For example, if we use a small number of replications then the width of the confidence will be large, and we are more likely to terminate!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"In a future tutorial (not yet written...) we will discuss termination criteria in more depth. For now, pick a large number of iterations and train for as long as possible.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"tip: Tip\nFor a rule of thumb, pick a large number of iterations to train the policy for (e.g., 10 times mathcalN times maxlimits_iinmathcalN Omega_i)","category":"page"},{"location":"explanation/theory_intro/#Implementation:-the-training-loop","page":"Introductory theory","title":"Implementation: the training loop","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The train loop of SDDP just applies the forward and backward passes iteratively, followed by a final simulation to compute the upper bound confidence interval:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function train(\n model::PolicyGraph;\n iteration_limit::Int,\n replications::Int,\n io::IO = stdout,\n)\n for i in 1:iteration_limit\n println(io, \"Starting iteration $(i)\")\n outgoing_states, _ = forward_pass(model, io)\n backward_pass(model, outgoing_states, io)\n println(io, \"| Finished iteration\")\n println(io, \"| | lower_bound = \", lower_bound(model))\n end\n println(io, \"Termination status: iteration limit\")\n μ, tσ = upper_bound(model; replications = replications)\n println(io, \"Upper bound = $(μ) ± $(tσ)\")\n return\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Using our model we defined earlier, we can go:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"train(model; iteration_limit = 3, replications = 100)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Success! We trained a policy for a finite horizon multistage stochastic program using stochastic dual dynamic programming.","category":"page"},{"location":"explanation/theory_intro/#Implementation:-evaluating-the-policy","page":"Introductory theory","title":"Implementation: evaluating the policy","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"A final step is the ability to evaluate the policy at a given point.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function evaluate_policy(\n model::PolicyGraph;\n node::Int,\n incoming_state::Dict{Symbol,Float64},\n random_variable,\n)\n the_node = model.nodes[node]\n the_node.uncertainty.parameterize(random_variable)\n for (k, v) in incoming_state\n JuMP.fix(the_node.states[k].in, v; force = true)\n end\n JuMP.optimize!(the_node.subproblem)\n return Dict(\n k => JuMP.value.(v) for\n (k, v) in JuMP.object_dictionary(the_node.subproblem)\n )\nend\n\nevaluate_policy(\n model;\n node = 1,\n incoming_state = Dict(:volume => 150.0),\n random_variable = 75,\n)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThe random variable can be out-of-sample, i.e., it doesn't have to be in the vector Omega we created when defining the model! This is a notable difference to other multistage stochastic solution methods like progressive hedging or using the deterministic equivalent.","category":"page"},{"location":"explanation/theory_intro/#Example:-infinite-horizon","page":"Introductory theory","title":"Example: infinite horizon","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"As promised earlier, our implementation is actually pretty general. It can solve any multistage stochastic (linear) program defined by a policy graph, including infinite horizon problems!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Here's an example, where we have extended our earlier problem with an arc from node 3 to node 2 with probability 0.5. You can interpret the 0.5 as a discount factor.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"model = PolicyGraph(\n subproblem_builder;\n graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict(2 => 0.5)],\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Then, train a policy:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"train(model; iteration_limit = 3, replications = 100)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Success! We trained a policy for an infinite horizon multistage stochastic program using stochastic dual dynamic programming. Note how some of the forward passes are different lengths!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"evaluate_policy(\n model;\n node = 3,\n incoming_state = Dict(:volume => 100.0),\n random_variable = 10.0,\n)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"EditURL = \"generation_expansion.jl\"","category":"page"},{"location":"examples/generation_expansion/#Generation-expansion","page":"Generation expansion","title":"Generation expansion","text":"","category":"section"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction generation_expansion(duality_handler)\n build_cost = 1e4\n use_cost = 4\n num_units = 5\n capacities = ones(num_units)\n demand_vals =\n 0.5 * [\n 5 5 5 5 5 5 5 5\n 4 3 1 3 0 9 8 17\n 0 9 4 2 19 19 13 7\n 25 11 4 14 4 6 15 12\n 6 7 5 3 8 4 17 13\n ]\n # Cost of unmet demand\n penalty = 5e5\n # Discounting rate\n rho = 0.99\n model = SDDP.LinearPolicyGraph(\n stages = 5,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(\n sp,\n 0 <= invested[1:num_units] <= 1,\n SDDP.State,\n Int,\n initial_value = 0\n )\n @variables(sp, begin\n generation >= 0\n unmet >= 0\n demand\n end)\n\n @constraints(\n sp,\n begin\n # Can't un-invest\n investment[i in 1:num_units], invested[i].out >= invested[i].in\n # Generation capacity\n sum(capacities[i] * invested[i].out for i in 1:num_units) >=\n generation\n # Meet demand or pay a penalty\n unmet >= demand - sum(generation)\n # For fewer iterations order the units to break symmetry, units are identical (tougher numerically)\n [j in 1:(num_units-1)], invested[j].out <= invested[j+1].out\n end\n )\n # Demand is uncertain\n SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, demand_vals[stage, :])\n\n @expression(\n sp,\n investment_cost,\n build_cost *\n sum(invested[i].out - invested[i].in for i in 1:num_units)\n )\n @stageobjective(\n sp,\n (investment_cost + generation * use_cost) * rho^(stage - 1) +\n penalty * unmet\n )\n end\n if get(ARGS, 1, \"\") == \"--write\"\n # Run `$ julia generation_expansion.jl --write` to update the benchmark\n # model directory\n model_dir = joinpath(@__DIR__, \"..\", \"..\", \"..\", \"benchmarks\", \"models\")\n SDDP.write_to_file(\n model,\n joinpath(model_dir, \"generation_expansion.sof.json.gz\");\n test_scenarios = 100,\n )\n exit(0)\n end\n SDDP.train(model; log_frequency = 10, duality_handler = duality_handler)\n Test.@test SDDP.calculate_bound(model) ≈ 2.078860e6 atol = 1e3\n return\nend\n\ngeneration_expansion(SDDP.ContinuousConicDuality())\ngeneration_expansion(SDDP.LagrangianDuality())","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"EditURL = \"biobjective_hydro.jl\"","category":"page"},{"location":"examples/biobjective_hydro/#Biobjective-hydro-thermal","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"","category":"section"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"using SDDP, HiGHS, Statistics, Test\n\nfunction biobjective_example()\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, _\n @variable(subproblem, 0 <= v <= 200, SDDP.State, initial_value = 50)\n @variables(subproblem, begin\n 0 <= g[i = 1:2] <= 100\n 0 <= u <= 150\n s >= 0\n shortage_cost >= 0\n end)\n @expressions(subproblem, begin\n objective_1, g[1] + 10 * g[2]\n objective_2, shortage_cost\n end)\n @constraints(subproblem, begin\n inflow_constraint, v.out == v.in - u - s\n g[1] + g[2] + u == 150\n shortage_cost >= 40 - v.out\n shortage_cost >= 60 - 2 * v.out\n shortage_cost >= 80 - 4 * v.out\n end)\n # You must call this for a biobjective problem!\n SDDP.initialize_biobjective_subproblem(subproblem)\n SDDP.parameterize(subproblem, 0.0:5:50.0) do ω\n JuMP.set_normalized_rhs(inflow_constraint, ω)\n # You must call `set_biobjective_functions` from within\n # `SDDP.parameterize`.\n return SDDP.set_biobjective_functions(\n subproblem,\n objective_1,\n objective_2,\n )\n end\n end\n pareto_weights =\n SDDP.train_biobjective(model, solution_limit = 10, iteration_limit = 10)\n solutions = [(k, v) for (k, v) in pareto_weights]\n sort!(solutions; by = x -> x[1])\n @test length(solutions) == 10\n # Test for convexity! The gradient must be decreasing as we move from left\n # to right.\n gradient(a, b) = (b[2] - a[2]) / (b[1] - a[1])\n grad = Inf\n for i in 1:9\n new_grad = gradient(solutions[i], solutions[i+1])\n @test new_grad < grad\n grad = new_grad\n end\n return\nend\n\nbiobjective_example()","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"EditURL = \"asset_management_simple.jl\"","category":"page"},{"location":"examples/asset_management_simple/#Asset-management","page":"Asset management","title":"Asset management","text":"","category":"section"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"using SDDP, HiGHS, Test\n\nfunction asset_management_simple()\n model = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(\n Array{Float64,2}[\n [1.0]',\n [0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n ],\n ),\n lower_bound = -1_000.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, index\n (stage, markov_state) = index\n r_stock = [1.25, 1.06]\n r_bonds = [1.14, 1.12]\n @variable(subproblem, stocks >= 0, SDDP.State, initial_value = 0.0)\n @variable(subproblem, bonds >= 0, SDDP.State, initial_value = 0.0)\n if stage == 1\n @constraint(subproblem, stocks.out + bonds.out == 55)\n @stageobjective(subproblem, 0)\n elseif 1 < stage < 4\n @constraint(\n subproblem,\n r_stock[markov_state] * stocks.in +\n r_bonds[markov_state] * bonds.in == stocks.out + bonds.out\n )\n @stageobjective(subproblem, 0)\n else\n @variable(subproblem, over >= 0)\n @variable(subproblem, short >= 0)\n @constraint(\n subproblem,\n r_stock[markov_state] * stocks.in +\n r_bonds[markov_state] * bonds.in - over + short == 80\n )\n @stageobjective(subproblem, -over + 4 * short)\n end\n end\n SDDP.train(model; log_frequency = 5)\n @test SDDP.calculate_bound(model) ≈ 1.514 atol = 1e-4\n return\nend\n\nasset_management_simple()","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/access_previous_variables/#Access-variables-from-a-previous-stage","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"","category":"section"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"A common question is \"how do I use a variable from a previous stage in a constraint?\"","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"info: Info\nIf you want to use a variable from a previous stage, it must be a state variable.","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"Here are some examples:","category":"page"},{"location":"guides/access_previous_variables/#Access-a-first-stage-decision-in-a-future-stage","page":"Access variables from a previous stage","title":"Access a first-stage decision in a future stage","text":"","category":"section"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"This is often useful if your first-stage decisions are capacity-expansion type decisions (e.g., you choose first how much capacity to add, but because it takes time to build, it only shows up in some future stage).","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"using SDDP, HiGHS\nSDDP.LinearPolicyGraph(\n stages = 10,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n # Capacity of the generator. Decided in the first stage.\n @variable(sp, capacity >= 0, SDDP.State, initial_value = 0)\n # Quantity of water stored.\n @variable(sp, reservoir >= 0, SDDP.State, initial_value = 0)\n # Quantity of water to use for electricity generation in current stage.\n @variable(sp, generation >= 0)\n if t == 1\n # There are no constraints in the first stage, but we need to push the\n # initial value of the reservoir to the next stage.\n @constraint(sp, reservoir.out == reservoir.in)\n # Since we're maximizing profit, subtract cost of capacity.\n @stageobjective(sp, -capacity.out)\n else\n # Water balance constraint.\n @constraint(sp, balance, reservoir.out - reservoir.in + generation == 0)\n # Generation limit.\n @constraint(sp, generation <= capacity.in)\n # Push capacity to the next stage.\n @constraint(sp, capacity.out == capacity.in)\n # Maximize generation.\n @stageobjective(sp, generation)\n # Random inflow in balance constraint.\n SDDP.parameterize(sp, rand(4)) do w\n set_normalized_rhs(balance, w)\n end\n end\nend","category":"page"},{"location":"guides/access_previous_variables/#Access-a-decision-from-N-stages-ago","page":"Access variables from a previous stage","title":"Access a decision from N stages ago","text":"","category":"section"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"This is often useful if have some inventory problem with a lead-time on orders.","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"using SDDP, HiGHS\nSDDP.LinearPolicyGraph(\n stages = 10,\n sense = :Max,\n upper_bound = 100,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n # Current inventory on hand.\n @variable(sp, inventory >= 0, SDDP.State, initial_value = 0)\n # Inventory pipeline.\n # pipeline[1].out are orders placed today.\n # pipeline[5].in are orders that arrive today and can be added to the\n # current inventory.\n # Stock moves up one slot in the pipeline each stage.\n @variable(sp, pipeline[1:5], SDDP.State, initial_value = 0)\n # The number of units to order today.\n @variable(sp, 0 <= buy <= 10)\n # The number of units to sell today.\n @variable(sp, sell >= 0)\n # Buy orders get placed in the pipeline.\n @constraint(sp, pipeline[1].out == buy)\n # Stock moves up one slot in the pipeline each stage.\n @constraint(sp, [i=2:5], pipeline[i].out == pipeline[i-1].in)\n # Stock balance constraint.\n @constraint(sp, inventory.out == inventory.in - sell + pipeline[5].in)\n # Maximize quantity of sold items.\n @stageobjective(sp, sell)\nend","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"warning: Warning\nYou must initialize the same number of state variables in every stage, even if they are not used in that stage.","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"DocTestSetup = quote\n using SDDP\nend","category":"page"},{"location":"guides/create_a_belief_state/#Create-a-belief-state","page":"Create a belief state","title":"Create a belief state","text":"","category":"section"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"SDDP.jl includes an implementation of the algorithm described in Dowson, O., Morton, D.P., & Pagnoncelli, B.K. (2020). Partially observable multistage stochastic optimization. Operations Research Letters, 48(4), 505–512.","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"Given a SDDP.Graph object (see Create a general policy graph for details), we can define the ambiguity partition using SDDP.add_ambiguity_set.","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"For example, first we create a Markovian graph:","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"using SDDP\nG = SDDP.MarkovianGraph([[0.5 0.5], [0.2 0.8; 0.8 0.2]])","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"Then we add an ambiguity set over the nodes in the each stage:","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"for t in 1:2\n SDDP.add_ambiguity_set(G, [(t, 1), (t, 2)])\nend","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"This results in the graph:","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"G","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"CurrentModule = SDDP","category":"page"},{"location":"release_notes/#Release-notes","page":"Release notes","title":"Release notes","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.","category":"page"},{"location":"release_notes/#[v1.6.2](https://github.com/odow/SDDP.jl/releases/tag/v1.6.2)-(August-24,-2023)","page":"Release notes","title":"v1.6.2 (August 24, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"MSPFormat now detect and exploit stagewise independent lattices (#653)\nFixed set_optimizer for models read from file (#654)","category":"page"},{"location":"release_notes/#Other","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed typo in pglib_opf.jl (#647)\nFixed documentation build and added color (#652)","category":"page"},{"location":"release_notes/#[v1.6.1](https://github.com/odow/SDDP.jl/releases/tag/v1.6.1)-(July-20,-2023)","page":"Release notes","title":"v1.6.1 (July 20, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-2","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bugs in MSPFormat reader (#638) (#639)","category":"page"},{"location":"release_notes/#Other-2","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Clarified OutOfSampleMonteCarlo docstring (#643)","category":"page"},{"location":"release_notes/#[v1.6.0](https://github.com/odow/SDDP.jl/releases/tag/v1.6.0)-(July-3,-2023)","page":"Release notes","title":"v1.6.0 (July 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added RegularizedForwardPass (#624)\nAdded FirstStageStoppingRule (#634)","category":"page"},{"location":"release_notes/#Other-3","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Removed an unbound type parameter (#632)\nFixed typo in docstring (#633)\nAdded Here-and-now and hazard-decision tutorial (#635)","category":"page"},{"location":"release_notes/#[v1.5.1](https://github.com/odow/SDDP.jl/releases/tag/v1.5.1)-(June-30,-2023)","page":"Release notes","title":"v1.5.1 (June 30, 2023)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a \"good\" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.","category":"page"},{"location":"release_notes/#Other-4","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed various typos in the documentation (#617)\nFixed printing test after changes in JuMP (#618)\nSet SimulationStoppingRule as the default stopping rule (#619)\nChanged the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)\nAdded example usage with Distributions.jl (@slwu89) (#622)\nRemoved the numerical issue @warn (#627)\nImproved the quality of docstrings (#630)","category":"page"},{"location":"release_notes/#[v1.5.0](https://github.com/odow/SDDP.jl/releases/tag/v1.5.0)-(May-14,-2023)","page":"Release notes","title":"v1.5.0 (May 14, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-2","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)","category":"page"},{"location":"release_notes/#Other-5","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated missing changelog entries (#608)\nRemoved global variables (#610)\nConverted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)\nFixed some typos (#613)","category":"page"},{"location":"release_notes/#[v1.4.0](https://github.com/odow/SDDP.jl/releases/tag/v1.4.0)-(May-8,-2023)","page":"Release notes","title":"v1.4.0 (May 8, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-3","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulationStoppingRule (#598)\nAdded sampling_scheme argument to SDDP.write_to_file (#607)","category":"page"},{"location":"release_notes/#Fixed-3","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed parsing of some MSPFormat files (#602) (#604)\nFixed printing in header (#605)","category":"page"},{"location":"release_notes/#[v1.3.0](https://github.com/odow/SDDP.jl/releases/tag/v1.3.0)-(May-3,-2023)","page":"Release notes","title":"v1.3.0 (May 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-4","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added experimental support for SDDP.MSPFormat.read_from_file (#593)","category":"page"},{"location":"release_notes/#Other-6","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated to StochOptFormat v0.3 (#600)","category":"page"},{"location":"release_notes/#[v1.2.1](https://github.com/odow/SDDP.jl/releases/tag/v1.2.1)-(May-1,-2023)","page":"Release notes","title":"v1.2.1 (May 1, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-4","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed log_every_seconds (#597)","category":"page"},{"location":"release_notes/#[v1.2.0](https://github.com/odow/SDDP.jl/releases/tag/v1.2.0)-(May-1,-2023)","page":"Release notes","title":"v1.2.0 (May 1, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-5","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulatorSamplingScheme (#594)\nAdded log_every_seconds argument to SDDP.train (#595)","category":"page"},{"location":"release_notes/#Other-7","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Tweaked how the log is printed (#588)\nUpdated to StochOptFormat v0.2 (#592)","category":"page"},{"location":"release_notes/#[v1.1.4](https://github.com/odow/SDDP.jl/releases/tag/v1.1.4)-(April-10,-2023)","page":"Release notes","title":"v1.1.4 (April 10, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-5","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Logs are now flushed every iteration (#584)","category":"page"},{"location":"release_notes/#Other-8","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added docstrings to various functions (#581)\nMinor documentation updates (#580)\nClarified integrality documentation (#582)\nUpdated the README (#585)\nNumber of numerical issues is now printed to the log (#586)","category":"page"},{"location":"release_notes/#[v1.1.3](https://github.com/odow/SDDP.jl/releases/tag/v1.1.3)-(April-2,-2023)","page":"Release notes","title":"v1.1.3 (April 2, 2023)","text":"","category":"section"},{"location":"release_notes/#Other-9","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed typo in Example: deterministic to stochastic tutorial (#578)\nFixed typo in documentation of SDDP.simulate (#577)","category":"page"},{"location":"release_notes/#[v1.1.2](https://github.com/odow/SDDP.jl/releases/tag/v1.1.2)-(March-18,-2023)","page":"Release notes","title":"v1.1.2 (March 18, 2023)","text":"","category":"section"},{"location":"release_notes/#Other-10","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Example: deterministic to stochastic tutorial (#572)","category":"page"},{"location":"release_notes/#[v1.1.1](https://github.com/odow/SDDP.jl/releases/tag/v1.1.1)-(March-16,-2023)","page":"Release notes","title":"v1.1.1 (March 16, 2023)","text":"","category":"section"},{"location":"release_notes/#Other-11","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed email in Project.toml\nAdded notebook to documentation tutorials (#571)","category":"page"},{"location":"release_notes/#[v1.1.0](https://github.com/odow/SDDP.jl/releases/tag/v1.1.0)-(January-12,-2023)","page":"Release notes","title":"v1.1.0 (January 12, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-6","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added the node_name_parser argument to SDDP.write_cuts_to_file and added the option to skip nodes in SDDP.read_cuts_from_file (#565)","category":"page"},{"location":"release_notes/#[v1.0.0](https://github.com/odow/SDDP.jl/releases/tag/v1.0.0)-(January-3,-2023)","page":"Release notes","title":"v1.0.0 (January 3, 2023)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Although we're bumping MAJOR version, this is a non-breaking release. Going forward:","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"New features will bump the MINOR version\nBug fixes, maintenance, and documentation updates will bump the PATCH version\nWe will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version\nUpdates to the compat bounds of package dependencies will bump the PATCH version.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.","category":"page"},{"location":"release_notes/#Added-7","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added num_nodes argument to SDDP.UnicyclicGraph (#562)\nAdded support for passing an optimizer to SDDP.Asynchronous (#545)","category":"page"},{"location":"release_notes/#Other-12","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated Plotting tools to use live plots (#563)\nAdded vale as a linter (#565)\nImproved documentation for initializing a parallel scheme (#566)","category":"page"},{"location":"release_notes/#[v0.4.9](https://github.com/odow/SDDP.jl/releases/tag/v0.4.9)-(January-3,-2023)","page":"Release notes","title":"v0.4.9 (January 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-8","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.UnicyclicGraph (#556)","category":"page"},{"location":"release_notes/#Other-13","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added tutorial on Markov Decision Processes (#556)\nAdded two-stage newsvendor tutorial (#557)\nRefactored the layout of the documentation (#554) (#555)\nUpdated copyright to 2023 (#558)\nFixed errors in the documentation (#561)","category":"page"},{"location":"release_notes/#[v0.4.8](https://github.com/odow/SDDP.jl/releases/tag/v0.4.8)-(December-19,-2022)","page":"Release notes","title":"v0.4.8 (December 19, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-9","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added terminate_on_cycle option to SDDP.Historical (#549)\nAdded include_last_node option to SDDP.DefaultForwardPass (#547)","category":"page"},{"location":"release_notes/#Fixed-6","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)","category":"page"},{"location":"release_notes/#[v0.4.7](https://github.com/odow/SDDP.jl/releases/tag/v0.4.7)-(December-17,-2022)","page":"Release notes","title":"v0.4.7 (December 17, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-10","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)","category":"page"},{"location":"release_notes/#Fixed-7","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Rethrow InterruptException when solver is interrupted (#534)\nFixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)\nFixed re-using the dashboard = true option between solves (#538)\nFixed bug when no @stageobjective is set (now defaults to 0.0) (#539)\nFixed errors thrown when invalid inputs are provided to add_objective_state (#540)","category":"page"},{"location":"release_notes/#Other-14","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Drop support for Julia versions prior to 1.6 (#533)\nUpdated versions of dependencies (#522) (#533)\nSwitched to HiGHS in the documentation and tests (#533)\nAdded license headers (#519)\nFixed link in air conditioning example (#521) (Thanks @conema)\nClarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)\nAdded this change log (#536)\nCuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)","category":"page"},{"location":"release_notes/#[v0.4.6](https://github.com/odow/SDDP.jl/releases/tag/v0.4.6)-(March-25,-2022)","page":"Release notes","title":"v0.4.6 (March 25, 2022)","text":"","category":"section"},{"location":"release_notes/#Other-15","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated to JuMP v1.0 (#517)","category":"page"},{"location":"release_notes/#[v0.4.5](https://github.com/odow/SDDP.jl/releases/tag/v0.4.5)-(March-9,-2022)","page":"Release notes","title":"v0.4.5 (March 9, 2022)","text":"","category":"section"},{"location":"release_notes/#Fixed-8","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed issue with set_silent in a subproblem (#510)","category":"page"},{"location":"release_notes/#Other-16","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)\nUpdate to JuMP v0.23 (#514)\nAdded auto-regressive tutorial (#507)","category":"page"},{"location":"release_notes/#[v0.4.4](https://github.com/odow/SDDP.jl/releases/tag/v0.4.4)-(December-11,-2021)","page":"Release notes","title":"v0.4.4 (December 11, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-11","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added BanditDuality (#471)\nAdded benchmark scripts (#475) (#476) (#490)\nwrite_cuts_to_file now saves visited states (#468)","category":"page"},{"location":"release_notes/#Fixed-9","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed BoundStalling in a deterministic policy (#470) (#474)\nFixed magnitude warning with zero coefficients (#483)","category":"page"},{"location":"release_notes/#Other-17","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improvements to LagrangianDuality (#481) (#482) (#487)\nImprovements to StrengthenedConicDuality (#486)\nSwitch to functional form for the tests (#478)\nFixed typos (#472) (Thanks @vfdev-5)\nUpdate to JuMP v0.22 (#498)","category":"page"},{"location":"release_notes/#[v0.4.3](https://github.com/odow/SDDP.jl/releases/tag/v0.4.3)-(August-31,-2021)","page":"Release notes","title":"v0.4.3 (August 31, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-12","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added biobjective solver (#462)\nAdded forward_pass_callback (#466)","category":"page"},{"location":"release_notes/#Other-18","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update tutorials and documentation (#459) (#465)\nOrganize how paper materials are stored (#464)","category":"page"},{"location":"release_notes/#[v0.4.2](https://github.com/odow/SDDP.jl/releases/tag/v0.4.2)-(August-24,-2021)","page":"Release notes","title":"v0.4.2 (August 24, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-10","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in Lagrangian duality (#457)","category":"page"},{"location":"release_notes/#[v0.4.1](https://github.com/odow/SDDP.jl/releases/tag/v0.4.1)-(August-23,-2021)","page":"Release notes","title":"v0.4.1 (August 23, 2021)","text":"","category":"section"},{"location":"release_notes/#Other-19","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Minor changes to our implementation of LagrangianDuality (#454) (#455)","category":"page"},{"location":"release_notes/#[v0.4.0](https://github.com/odow/SDDP.jl/releases/tag/v0.4.0)-(August-17,-2021)","page":"Release notes","title":"v0.4.0 (August 17, 2021)","text":"","category":"section"},{"location":"release_notes/#Breaking","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#499) (#453)","category":"page"},{"location":"release_notes/#Other-20","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#447) (#448) (#450)","category":"page"},{"location":"release_notes/#[v0.3.17](https://github.com/odow/SDDP.jl/releases/tag/v0.3.17)-(July-6,-2021)","page":"Release notes","title":"v0.3.17 (July 6, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-13","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.PSRSamplingScheme (#426)","category":"page"},{"location":"release_notes/#Other-21","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Display more model attributes (#438)\nDocumentation improvements (#433) (#437) (#439)","category":"page"},{"location":"release_notes/#[v0.3.16](https://github.com/odow/SDDP.jl/releases/tag/v0.3.16)-(June-17,-2021)","page":"Release notes","title":"v0.3.16 (June 17, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-14","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.RiskAdjustedForwardPass (#413)\nAllow SDDP.Historical to sample sequentially (#420)","category":"page"},{"location":"release_notes/#Other-22","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update risk measure docstrings (#418)","category":"page"},{"location":"release_notes/#[v0.3.15](https://github.com/odow/SDDP.jl/releases/tag/v0.3.15)-(June-1,-2021)","page":"Release notes","title":"v0.3.15 (June 1, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-15","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.StoppingChain","category":"page"},{"location":"release_notes/#Fixed-11","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed scoping bug in SDDP.@stageobjective (#407)\nFixed a bug when the initial point is infeasible (#411)\nSet subproblems to silent by default (#409)","category":"page"},{"location":"release_notes/#Other-23","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add JuliaFormatter (#412)\nDocumentation improvements (#406) (#408)","category":"page"},{"location":"release_notes/#[v0.3.14](https://github.com/odow/SDDP.jl/releases/tag/v0.3.14)-(March-30,-2021)","page":"Release notes","title":"v0.3.14 (March 30, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-12","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed O(N^2) behavior in get_same_children (#393)","category":"page"},{"location":"release_notes/#[v0.3.13](https://github.com/odow/SDDP.jl/releases/tag/v0.3.13)-(March-27,-2021)","page":"Release notes","title":"v0.3.13 (March 27, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-13","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bug in print.jl\nFixed compat of Reexport (#388)","category":"page"},{"location":"release_notes/#[v0.3.12](https://github.com/odow/SDDP.jl/releases/tag/v0.3.12)-(March-22,-2021)","page":"Release notes","title":"v0.3.12 (March 22, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-16","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added problem statistics to header (#385) (#386)","category":"page"},{"location":"release_notes/#Fixed-14","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed subtypes in visualization (#384)","category":"page"},{"location":"release_notes/#[v0.3.11](https://github.com/odow/SDDP.jl/releases/tag/v0.3.11)-(March-22,-2021)","page":"Release notes","title":"v0.3.11 (March 22, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-15","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed constructor in direct mode (#383)","category":"page"},{"location":"release_notes/#Other-24","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix documentation (#379)","category":"page"},{"location":"release_notes/#[v0.3.10](https://github.com/odow/SDDP.jl/releases/tag/v0.3.10)-(February-23,-2021)","page":"Release notes","title":"v0.3.10 (February 23, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-16","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed seriescolor in publication plot (#376)","category":"page"},{"location":"release_notes/#[v0.3.9](https://github.com/odow/SDDP.jl/releases/tag/v0.3.9)-(February-20,-2021)","page":"Release notes","title":"v0.3.9 (February 20, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-17","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add option to simulate with different incoming state (#372)\nAdded warning for cuts with high dynamic range (#373)","category":"page"},{"location":"release_notes/#Fixed-17","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed seriesalpha in publication plot (#375)","category":"page"},{"location":"release_notes/#[v0.3.8](https://github.com/odow/SDDP.jl/releases/tag/v0.3.8)-(January-19,-2021)","page":"Release notes","title":"v0.3.8 (January 19, 2021)","text":"","category":"section"},{"location":"release_notes/#Other-25","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#367) (#369) (#370)","category":"page"},{"location":"release_notes/#[v0.3.7](https://github.com/odow/SDDP.jl/releases/tag/v0.3.7)-(January-8,-2021)","page":"Release notes","title":"v0.3.7 (January 8, 2021)","text":"","category":"section"},{"location":"release_notes/#Other-26","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#362) (#363) (#365) (#366)\nBump copyright (#364)","category":"page"},{"location":"release_notes/#[v0.3.6](https://github.com/odow/SDDP.jl/releases/tag/v0.3.6)-(December-17,-2020)","page":"Release notes","title":"v0.3.6 (December 17, 2020)","text":"","category":"section"},{"location":"release_notes/#Other-27","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix typos (#358)\nCollapse navigation bar in docs (#359)\nUpdate TagBot.yml (#361)","category":"page"},{"location":"release_notes/#[v0.3.5](https://github.com/odow/SDDP.jl/releases/tag/v0.3.5)-(November-18,-2020)","page":"Release notes","title":"v0.3.5 (November 18, 2020)","text":"","category":"section"},{"location":"release_notes/#Other-28","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update citations (#348)\nSwitch to GitHub actions (#355)","category":"page"},{"location":"release_notes/#[v0.3.4](https://github.com/odow/SDDP.jl/releases/tag/v0.3.4)-(August-25,-2020)","page":"Release notes","title":"v0.3.4 (August 25, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-18","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added non-uniform distributionally robust risk measure (#328)\nAdded numerical recovery functions (#330)\nAdded experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)\nAdded entropic risk measure (#347)","category":"page"},{"location":"release_notes/#Other-29","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#327) (#333) (#339) (#340)","category":"page"},{"location":"release_notes/#[v0.3.3](https://github.com/odow/SDDP.jl/releases/tag/v0.3.3)-(June-19,-2020)","page":"Release notes","title":"v0.3.3 (June 19, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-19","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added asynchronous support for price and belief states (#325)\nAdded ForwardPass plug-in system (#320)","category":"page"},{"location":"release_notes/#Fixed-18","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix check for probabilities in Markovian graph (#322)","category":"page"},{"location":"release_notes/#[v0.3.2](https://github.com/odow/SDDP.jl/releases/tag/v0.3.2)-(April-6,-2020)","page":"Release notes","title":"v0.3.2 (April 6, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-20","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added log_frequency argument to SDDP.train (#307)","category":"page"},{"location":"release_notes/#Other-30","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improve error message in deterministic equivalent (#312)\nUpdate to RecipesBase 1.0 (#313)","category":"page"},{"location":"release_notes/#[v0.3.1](https://github.com/odow/SDDP.jl/releases/tag/v0.3.1)-(February-26,-2020)","page":"Release notes","title":"v0.3.1 (February 26, 2020)","text":"","category":"section"},{"location":"release_notes/#Fixed-19","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed filename in integrality_handlers.jl (#304)","category":"page"},{"location":"release_notes/#[v0.3.0](https://github.com/odow/SDDP.jl/releases/tag/v0.3.0)-(February-20,-2020)","page":"Release notes","title":"v0.3.0 (February 20, 2020)","text":"","category":"section"},{"location":"release_notes/#Breaking-2","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Breaking changes to update to JuMP v0.21 (#300).","category":"page"},{"location":"release_notes/#[v0.2.4](https://github.com/odow/SDDP.jl/releases/tag/v0.2.4)-(February-7,-2020)","page":"Release notes","title":"v0.2.4 (February 7, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-21","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added a counter for the number of total subproblem solves (#301)","category":"page"},{"location":"release_notes/#Other-31","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update formatter (#298)\nAdded tests (#299)","category":"page"},{"location":"release_notes/#[v0.2.3](https://github.com/odow/SDDP.jl/releases/tag/v0.2.3)-(January-24,-2020)","page":"Release notes","title":"v0.2.3 (January 24, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-22","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for convex risk measures (#294)","category":"page"},{"location":"release_notes/#Fixed-20","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bug when subproblem is infeasible (#296)\nFixed bug in deterministic equivalent (#297)","category":"page"},{"location":"release_notes/#Other-32","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added example from IJOC paper (#293)","category":"page"},{"location":"release_notes/#[v0.2.2](https://github.com/odow/SDDP.jl/releases/tag/v0.2.2)-(January-10,-2020)","page":"Release notes","title":"v0.2.2 (January 10, 2020)","text":"","category":"section"},{"location":"release_notes/#Fixed-21","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed flakey time limit in tests (#291)","category":"page"},{"location":"release_notes/#Other-33","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Removed MathOptFormat.jl (#289)\nUpdate copyright (#290)","category":"page"},{"location":"release_notes/#[v0.2.1](https://github.com/odow/SDDP.jl/releases/tag/v0.2.1)-(December-19,-2019)","page":"Release notes","title":"v0.2.1 (December 19, 2019)","text":"","category":"section"},{"location":"release_notes/#Added-23","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for approximating a Markov lattice (#282) (#285)\nAdd tools for visualizing the value function (#272) (#286)\nWrite .mof.json files on error (#284)","category":"page"},{"location":"release_notes/#Other-34","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improve documentation (#281) (#283)\nUpdate tests for Julia 1.3 (#287)","category":"page"},{"location":"release_notes/#[v0.2.0](https://github.com/odow/SDDP.jl/releases/tag/v0.2.0)-(December-16,-2019)","page":"Release notes","title":"v0.2.0 (December 16, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.","category":"page"},{"location":"release_notes/#Added-24","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added asynchronous parallel implementation (#277)\nAdded roll-out algorithm for cyclic graphs (#279)","category":"page"},{"location":"release_notes/#Other-35","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved error messages in PolicyGraph (#271)\nAdded JuliaFormatter (#273) (#276)\nFixed compat bounds (#274) (#278)\nAdded documentation for simulating non-standard graphs (#280)","category":"page"},{"location":"release_notes/#[v0.1.0](https://github.com/odow/SDDP.jl/releases/tag/v0.1.0)-(October-17,-2019)","page":"Release notes","title":"v0.1.0 (October 17, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.","category":"page"},{"location":"release_notes/#[v0.0.1](https://github.com/odow/SDDP.jl/releases/tag/v0.0.1)-(April-18,-2018)","page":"Release notes","title":"v0.0.1 (April 18, 2018)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"EditURL = \"asset_management_stagewise.jl\"","category":"page"},{"location":"examples/asset_management_stagewise/#Asset-management-with-modifications","page":"Asset management with modifications","title":"Asset management with modifications","text":"","category":"section"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"A modified version of the Asset Management Problem Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"using SDDP, HiGHS, Test\n\nfunction asset_management_stagewise(; cut_type)\n w_s = [1.25, 1.06]\n w_b = [1.14, 1.12]\n Phi = [-1, 5]\n Psi = [0.02, 0.0]\n\n model = SDDP.MarkovianPolicyGraph(\n sense = :Max,\n transition_matrices = Array{Float64,2}[\n [1.0]',\n [0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n ],\n upper_bound = 1000.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n t, i = node\n @variable(subproblem, xs >= 0, SDDP.State, initial_value = 0)\n @variable(subproblem, xb >= 0, SDDP.State, initial_value = 0)\n if t == 1\n @constraint(subproblem, xs.out + xb.out == 55 + xs.in + xb.in)\n @stageobjective(subproblem, 0)\n elseif t == 2 || t == 3\n @variable(subproblem, phi)\n @constraint(\n subproblem,\n w_s[i] * xs.in + w_b[i] * xb.in + phi == xs.out + xb.out\n )\n SDDP.parameterize(subproblem, [1, 2], [0.6, 0.4]) do ω\n JuMP.fix(phi, Phi[ω])\n @stageobjective(subproblem, Psi[ω] * xs.out)\n end\n else\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(\n subproblem,\n w_s[i] * xs.in + w_b[i] * xb.in + u - v == 80,\n )\n @stageobjective(subproblem, -4u + v)\n end\n end\n SDDP.train(\n model;\n cut_type = cut_type,\n log_frequency = 10,\n risk_measure = (node) -> begin\n if node[1] != 3\n SDDP.Expectation()\n else\n SDDP.EAVaR(lambda = 0.5, beta = 0.5)\n end\n end,\n )\n @test SDDP.calculate_bound(model) ≈ 1.278 atol = 1e-3\n return\nend\n\nasset_management_stagewise(cut_type = SDDP.SINGLE_CUT)\n\nasset_management_stagewise(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/choose_a_stopping_rule/#Choose-a-stopping-rule","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The theory of SDDP tells us that the algorithm converges to an optimal policy almost surely in a finite number of iterations. In practice, this number is very large. Therefore, we need some way of pre-emptively terminating SDDP when the solution is “good enough.” We call heuristics for pre-emptively terminating SDDP stopping rules.","category":"page"},{"location":"guides/choose_a_stopping_rule/#Basic-limits","page":"Choose a stopping rule","title":"Basic limits","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The training of an SDDP policy can be terminated after a fixed number of iterations using the iteration_limit keyword.","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, iteration_limit = 10)","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The training of an SDDP policy can be terminated after a fixed number of seconds using the time_limit keyword.","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, time_limit = 2.0)","category":"page"},{"location":"guides/choose_a_stopping_rule/#Stopping-rules","page":"Choose a stopping rule","title":"Stopping rules","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"In addition to the limits provided as keyword arguments, a variety of other stopping rules are available. These can be passed to SDDP.train as a vector to the stopping_rules keyword. For example:","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, stopping_rules = [SDDP.BoundStalling(10, 1e-4)])","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"See Stopping rules for a list of stopping rules supported by SDDP.jl.","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"EditURL = \"belief.jl\"","category":"page"},{"location":"examples/belief/#Partially-observable-inventory-management","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"","category":"section"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"using SDDP, HiGHS, Random, Statistics, Test\n\nfunction inventory_management_problem()\n demand_values = [1.0, 2.0]\n demand_prob = Dict(:Ah => [0.2, 0.8], :Bh => [0.8, 0.2])\n graph = SDDP.Graph(\n :root_node,\n [:Ad, :Ah, :Bd, :Bh],\n [\n (:root_node => :Ad, 0.5),\n (:root_node => :Bd, 0.5),\n (:Ad => :Ah, 1.0),\n (:Ah => :Ad, 0.8),\n (:Ah => :Bd, 0.1),\n (:Bd => :Bh, 1.0),\n (:Bh => :Bd, 0.8),\n (:Bh => :Ad, 0.1),\n ],\n )\n SDDP.add_ambiguity_set(graph, [:Ad, :Bd], 1e2)\n SDDP.add_ambiguity_set(graph, [:Ah, :Bh], 1e2)\n\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n @variables(\n subproblem,\n begin\n 0 <= inventory <= 2, (SDDP.State, initial_value = 0.0)\n buy >= 0\n demand\n end\n )\n @constraint(subproblem, demand == inventory.in - inventory.out + buy)\n if node == :Ad || node == :Bd || node == :D\n JuMP.fix(demand, 0)\n @stageobjective(subproblem, buy)\n else\n SDDP.parameterize(subproblem, demand_values, demand_prob[node]) do ω\n return JuMP.fix(demand, ω)\n end\n @stageobjective(subproblem, 2 * buy + inventory.out)\n end\n end\n # Train the policy.\n Random.seed!(123)\n SDDP.train(\n model;\n iteration_limit = 100,\n cut_type = SDDP.SINGLE_CUT,\n log_frequency = 10,\n )\n results = SDDP.simulate(model, 500)\n objectives =\n [sum(s[:stage_objective] for s in simulation) for simulation in results]\n sample_mean = round(Statistics.mean(objectives); digits = 2)\n sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)\n @test SDDP.calculate_bound(model) ≈ sample_mean atol = sample_ci\n return\nend\n\ninventory_management_problem()","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"EditURL = \"decision_hazard.jl\"","category":"page"},{"location":"tutorial/decision_hazard/#Here-and-now-and-hazard-decision","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"SDDP.jl assumes that the agent gets to make a decision after observing the realization of the random variable. This is called a wait-and-see or hazard-decision model. In contrast, you might want your agent to make decisions before observing the random variable. This is called a here-and-now or decision-hazard model.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"info: Info\nThe terms decision-hazard and hazard-decision from the French hasard, meaning chance. It could also have been translated as uncertainty-decision and decision-uncertainty, but the community seems to have settled on the transliteration hazard instead. We like the hazard-decision and decision-hazard terms because they clearly communicate the order of the decision and the uncertainty.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"The purpose of this tutorial is to demonstrate how to model here-and-how decisions in SDDP.jl.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"using SDDP\nimport HiGHS","category":"page"},{"location":"tutorial/decision_hazard/#Hazard-decision-formulation","page":"Here-and-now and hazard-decision","title":"Hazard-decision formulation","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"As an example, we're going to build a standard hydro-thermal scheduling model, with a single hydro-reservoir and a single thermal generation plant. In each of the four stages, we need to choose some mix of u_thermal and u_hydro to meet a demand of 9 units, where unmet demand is penalized at a rate of $500/unit.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"hazard_decision = SDDP.LinearPolicyGraph(\n stages = 4,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n @variables(sp, begin\n 0 <= x_storage <= 8, (SDDP.State, initial_value = 6)\n u_thermal >= 0\n u_hydro >= 0\n u_unmet_demand >= 0\n end)\n @constraint(sp, u_thermal + u_hydro == 9 - u_unmet_demand)\n @constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)\n SDDP.parameterize(sp, [2, 3]) do ω_inflow\n return set_normalized_rhs(c_balance, ω_inflow)\n end\n @stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal)\nend","category":"page"},{"location":"tutorial/decision_hazard/#Decision-hazard-formulation","page":"Here-and-now and hazard-decision","title":"Decision-hazard formulation","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In the wait-and-see formulation, we get to decide the generation variables after observing the realization of ω_inflow. However, a common modeling situation is that we need to decide the level of thermal generation u_thermal before observing the inflow.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"SDDP.jl can model here-and-now decisions with a modeling trick: a wait-and-see decision in stage t-1 is equivalent to a here-and-now decision in stage t.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In other words, we need to convert the u_thermal decision from a control variable that is decided in stage t, to a state variable that is decided in stage t-1. Here's our new model, with the three lines that have changed:","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"decision_hazard = SDDP.LinearPolicyGraph(\n stages = 4,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n @variables(sp, begin\n 0 <= x_storage <= 8, (SDDP.State, initial_value = 6)\n u_thermal >= 0, (SDDP.State, initial_value = 0) # <-- changed\n u_hydro >= 0\n u_unmet_demand >= 0\n end)\n @constraint(sp, u_thermal.in + u_hydro == 9 - u_unmet_demand) # <-- changed\n @constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)\n SDDP.parameterize(sp, [2, 3]) do ω\n return set_normalized_rhs(c_balance, ω)\n end\n @stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal.in) # <-- changed\nend","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"Can you understand the reformulation? In each stage, we now use the value of u_thermal.in instead of u_thermal, and the value of the outgoing u_thermal.out is the here-and-how decision for stage t+1.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"(If you can spot a \"mistake\" with this model, don't worry, we'll fix it below. Presenting it like this simplifies the exposition.)","category":"page"},{"location":"tutorial/decision_hazard/#Comparison","page":"Here-and-now and hazard-decision","title":"Comparison","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"Let's compare the cost of operating the two models:","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"function train_and_compute_cost(model)\n SDDP.train(model; print_level = 0)\n return println(\"Cost = \\$\", SDDP.calculate_bound(model))\nend\n\ntrain_and_compute_cost(hazard_decision)","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"train_and_compute_cost(decision_hazard)","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"This suggests that choosing the thermal generation before observing the inflow adds a cost of $250. But does this make sense?","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"If we look carefully at our decision_hazard model, the incoming value of u_thermal.in in the first stage is fixed to the initial_value of 0. Therefore, we must always meet the full demand with u_hydro, which we cannot do without incurring unmet demand.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"To allow the model to choose an optimal level of u_thermal in the first-stage, we need to add an extra stage that is deterministic with no stage objective.","category":"page"},{"location":"tutorial/decision_hazard/#Fixing-the-decision-hazard","page":"Here-and-now and hazard-decision","title":"Fixing the decision-hazard","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In the following model, we now have five stages, so that stage t+1 in decision_hazard_2 corresponds to stage t in decision_hazard. We've also added an if-statement, which adds different constraints depending on the node. Note that we need to add an x_storage.out == x_storage.in constraint because the storage can't change in this new first-stage.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"decision_hazard_2 = SDDP.LinearPolicyGraph(\n stages = 5, # <-- changed\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n @variables(sp, begin\n 0 <= x_storage <= 8, (SDDP.State, initial_value = 6)\n u_thermal >= 0, (SDDP.State, initial_value = 0)\n u_hydro >= 0\n u_unmet_demand >= 0\n end)\n if node == 1 # <-- new\n @constraint(sp, x_storage.out == x_storage.in) # <-- new\n @stageobjective(sp, 0) # <-- new\n else\n @constraint(sp, u_thermal.in + u_hydro == 9 - u_unmet_demand)\n @constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)\n SDDP.parameterize(sp, [2, 3]) do ω\n return set_normalized_rhs(c_balance, ω)\n end\n @stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal.in)\n end\nend\n\ntrain_and_compute_cost(decision_hazard_2)","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"Now we find that the cost of choosing the thermal generation before observing the inflow adds a much more reasonable cost of $10.","category":"page"},{"location":"tutorial/decision_hazard/#Summary","page":"Here-and-now and hazard-decision","title":"Summary","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"To summarize, the difference between here-and-now and wait-and-see variables is a modeling choice.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"To create a here-and-now decision, add it as a state variable to the previous stage","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In some cases, you'll need to add an additional \"first-stage\" problem to enable the model to choose an optimal value for the here-and-now decision variable. You do not need to do this if the first stage is deterministic. You must make sure that the subproblem is feasible for all possible incoming values of the here-and-now decision variable.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"EditURL = \"pglib_opf.jl\"","category":"page"},{"location":"tutorial/pglib_opf/#Alternative-forward-models","page":"Alternative forward models","title":"Alternative forward models","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"This example demonstrates how to train convex and non-convex models.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"This example uses the following packages:","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"using SDDP\nimport Ipopt\nimport PowerModels\nimport Test","category":"page"},{"location":"tutorial/pglib_opf/#Formulation","page":"Alternative forward models","title":"Formulation","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"For our model, we build a simple optimal power flow model with a single hydro-electric generator.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"The formulation of our optimal power flow problem depends on model_type, which must be one of the PowerModels formulations.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"function build_model(model_type)\n filename = joinpath(@__DIR__, \"pglib_opf_case5_pjm.m\")\n data = PowerModels.parse_file(filename)\n return SDDP.PolicyGraph(\n SDDP.UnicyclicGraph(0.95);\n sense = :Min,\n lower_bound = 0.0,\n optimizer = Ipopt.Optimizer,\n ) do sp, t\n power_model = PowerModels.instantiate_model(\n data,\n model_type,\n PowerModels.build_opf;\n jump_model = sp,\n )\n # Now add hydro power models. Assume that generator 5 is hydro, and the\n # rest are thermal.\n pg = power_model.var[:it][:pm][:nw][0][:pg][5]\n sp[:pg] = pg\n @variable(sp, x >= 0, SDDP.State, initial_value = 10.0)\n @variable(sp, deficit >= 0)\n @constraint(sp, balance, x.out == x.in - pg + deficit)\n @stageobjective(sp, objective_function(sp) + 1e6 * deficit)\n SDDP.parameterize(sp, [0, 2, 5]) do ω\n return SDDP.set_normalized_rhs(balance, ω)\n end\n return\n end\nend","category":"page"},{"location":"tutorial/pglib_opf/#Training-a-convex-model","page":"Alternative forward models","title":"Training a convex model","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"We can build and train a convex approximation of the optimal power flow problem.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"The problem with the convex model is that it does not accurately simulate the true dynamics of the problem. Therefore, it under-estimates the true cost of operation.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"convex = build_model(PowerModels.DCPPowerModel)\nSDDP.train(convex; iteration_limit = 10)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"To more accurately simulate the dynamics of the problem, a common approach is to write the cuts representing the policy to a file, and then read them into a non-convex model:","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"SDDP.write_cuts_to_file(convex, \"convex.cuts.json\")\nnon_convex = build_model(PowerModels.ACPPowerModel)\nSDDP.read_cuts_from_file(non_convex, \"convex.cuts.json\")","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"Now we can simulate non_convex to evaluate the policy.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"result = SDDP.simulate(non_convex, 1)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"A problem with reading and writing the cuts to file is that the cuts have been generated from trial points of the convex model. Therefore, the policy may be arbitrarily bad at points visited by the non-convex model.","category":"page"},{"location":"tutorial/pglib_opf/#Training-a-non-convex-model","page":"Alternative forward models","title":"Training a non-convex model","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"We can also build and train a non-convex formulation of the optimal power flow problem.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"The problem with the non-convex model is that because it is non-convex, SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the true cost of operation.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"non_convex = build_model(PowerModels.ACPPowerModel)\nSDDP.train(non_convex; iteration_limit = 10)\nresult = SDDP.simulate(non_convex, 1)","category":"page"},{"location":"tutorial/pglib_opf/#Combining-convex-and-non-convex-models","page":"Alternative forward models","title":"Combining convex and non-convex models","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"To summarize, training with the convex model constructs cuts at points that may never be visited by the non-convex model, and training with the non-convex model may construct arbitrarily poor cuts because a key assumption of SDDP is convexity.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"As a compromise, we can train a policy using a combination of the convex and non-convex models; we'll use the non-convex model to generate trial points on the forward pass, and we'll use the convex model to build cuts on the backward pass.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"convex = build_model(PowerModels.DCPPowerModel)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"non_convex = build_model(PowerModels.ACPPowerModel)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"To do so, we train convex using the SDDP.AlternativeForwardPass forward pass, which simulates the model using non_convex, and we use SDDP.AlternativePostIterationCallback as a post-iteration callback, which copies cuts from the convex model back into the non_convex model.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"SDDP.train(\n convex;\n forward_pass = SDDP.AlternativeForwardPass(non_convex),\n post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex),\n iteration_limit = 10,\n)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"In practice, if we were to simulate non_convex now, we should obtain a better policy than either of the two previous approaches.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"This page was generated using Literate.jl.","category":"page"},{"location":"","page":"Home","title":"Home","text":"CurrentModule = SDDP","category":"page"},{"location":"","page":"Home","title":"Home","text":"\"logo\"","category":"page"},{"location":"#Introduction","page":"Home","title":"Introduction","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Welcome to the documentation for SDDP.jl, a package for solving large multistage convex stochastic programming problems using stochastic dual dynamic programming.","category":"page"},{"location":"","page":"Home","title":"Home","text":"SDDP.jl is built on JuMP, so it supports a number of open-source and commercial solvers, making it a powerful and flexible tool for stochastic optimization.","category":"page"},{"location":"","page":"Home","title":"Home","text":"The implementation of the stochastic dual dynamic programming algorithm in SDDP.jl is state of the art, and it includes support for a number of advanced features not commonly found in other implementations. This includes support for:","category":"page"},{"location":"","page":"Home","title":"Home","text":"infinite horizon problems\nconvex risk measures\nmixed-integer state and control variables\npartially observable stochastic processes.","category":"page"},{"location":"#Installation","page":"Home","title":"Installation","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Install SDDP.jl as follows:","category":"page"},{"location":"","page":"Home","title":"Home","text":"julia> import Pkg\n\njulia> Pkg.add(\"SDDP\")","category":"page"},{"location":"#Resources-for-getting-started","page":"Home","title":"Resources for getting started","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"There are a few ways to get started with SDDP.jl:","category":"page"},{"location":"","page":"Home","title":"Home","text":"Become familiar with JuMP by reading the JuMP documentation\nRead the introductory tutorial An introduction to SDDP.jl\nBrowse some of the examples, such as Example: deterministic to stochastic","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you need help, please open a GitHub issue.","category":"page"},{"location":"#How-the-documentation-is-structured","page":"Home","title":"How the documentation is structured","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Having a high-level overview of how this documentation is structured will help you know where to look for certain things.","category":"page"},{"location":"","page":"Home","title":"Home","text":"Tutorials contains step-by-step explanations of how to use SDDP.jl. Once you've got SDDP.jl installed, start by reading An introduction to SDDP.jl.\nGuides contains \"how-to\" snippets that demonstrate specific topics within SDDP.jl. A good one to get started on is Debug a model.\nExplanation contains step-by-step explanations of the theory and algorithms that underpin SDDP.jl. If you want a basic understanding of the algorithm behind SDDP.jl, start with Introductory theory.\nExamples contain worked examples of various problems solved using SDDP.jl. A good one to get started on is the Hydro-thermal scheduling problem. In particular, it shows how to solve an infinite horizon problem.\nThe API Reference contains a complete list of the functions you can use in SDDP.jl. Look here if you want to know how to use a particular function.","category":"page"},{"location":"#Citing-SDDP.jl","page":"Home","title":"Citing SDDP.jl","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"If you use SDDP.jl, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_sddp.jl,\n\ttitle = {{SDDP}.jl: a {Julia} package for stochastic dual dynamic programming},\n\tjournal = {INFORMS Journal on Computing},\n\tauthor = {Dowson, O. and Kapelevich, L.},\n\tdoi = {https://doi.org/10.1287/ijoc.2020.0987},\n\tyear = {2021},\n\tvolume = {33},\n\tissue = {1},\n\tpages = {27-33},\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the infinite horizon functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_policy_graph,\n\ttitle = {The policy graph decomposition of multistage stochastic optimization problems},\n\tdoi = {https://doi.org/10.1002/net.21932},\n\tjournal = {Networks},\n\tauthor = {Dowson, O.},\n\tvolume = {76},\n\tissue = {1},\n\tpages = {3-23},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the partially observable functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_pomsp,\n\ttitle = {Partially observable multistage stochastic programming},\n\tdoi = {https://doi.org/10.1016/j.orl.2020.06.005},\n\tjournal = {Operations Research Letters},\n\tauthor = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.},\n\tvolume = {48},\n\tissue = {4},\n\tpages = {505-512},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the objective state functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{downward_objective,\n\ttitle = {Stochastic dual dynamic programming with stagewise-dependent objective uncertainty},\n\tdoi = {https://doi.org/10.1016/j.orl.2019.11.002},\n\tjournal = {Operations Research Letters},\n\tauthor = {Downward, A. and Dowson, O. and Baucke, R.},\n\tvolume = {48},\n\tissue = {1},\n\tpages = {33-39},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the entropic risk measure, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_entropic,\n\ttitle = {Incorporating convex risk measures into multistage stochastic programming algorithms},\n\tdoi = {https://doi.org/10.1007/s10479-022-04977-w},\n\tjournal = {Annals of Operations Research},\n\tauthor = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.},\n\tyear = {2022},\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"EditURL = \"all_blacks.jl\"","category":"page"},{"location":"examples/all_blacks/#Deterministic-All-Blacks","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"","category":"section"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"using SDDP, HiGHS, Test\n\nfunction all_blacks()\n # Number of time periods, number of seats, R_ij = revenue from selling seat\n # i at time j, offer_ij = whether an offer for seat i will come at time j\n (T, N, R, offer) = (3, 2, [3 3 6; 3 3 6], [1 1 0; 1 0 1])\n model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n # Seat remaining?\n @variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)\n # Action: accept offer, or don't accept offer\n @variable(sp, accept_offer, Bin)\n # Balance on seats\n @constraint(\n sp,\n [i in 1:N],\n x[i].out == x[i].in - offer[i, stage] * accept_offer\n )\n @stageobjective(\n sp,\n sum(R[i, stage] * offer[i, stage] * accept_offer for i in 1:N)\n )\n end\n SDDP.train(model; duality_handler = SDDP.LagrangianDuality())\n @test SDDP.calculate_bound(model) ≈ 9.0\n return\nend\n\nall_blacks()","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"EditURL = \"sldp_example_one.jl\"","category":"page"},{"location":"examples/sldp_example_one/#SLDP:-example-1","page":"SLDP: example 1","title":"SLDP: example 1","text":"","category":"section"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"This example is derived from Section 4.2 of the paper: Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz Dynamic Programming. Optimization Online. PDF","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"using SDDP, HiGHS, Test\n\nfunction sldp_example_one()\n model = SDDP.LinearPolicyGraph(\n stages = 8,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, x, SDDP.State, initial_value = 2.0)\n @variables(sp, begin\n x⁺ >= 0\n x⁻ >= 0\n 0 <= u <= 1, Bin\n ω\n end)\n @stageobjective(sp, 0.9^(t - 1) * (x⁺ + x⁻))\n @constraints(sp, begin\n x.out == x.in + 2 * u - 1 + ω\n x⁺ >= x.out\n x⁻ >= -x.out\n end)\n points = [\n -0.3089653673606697,\n -0.2718277412744214,\n -0.09611178608243474,\n 0.24645863921577763,\n 0.5204224537256875,\n ]\n return SDDP.parameterize(φ -> JuMP.fix(ω, φ), sp, [points; -points])\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) <= 1.1675\n return\nend\n\nsldp_example_one()","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Simulate-using-a-different-sampling-scheme","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"By default, SDDP.simulate will simulate the policy using the distributions of noise terms that were defined when the model was created. We call these in-sample simulations. However, in general the in-sample distributions are an approximation of some underlying probability model which we term the true process. Therefore, SDDP.jl makes it easy to simulate the policy using different probability distributions.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"To demonstrate the different ways of simulating the policy, we're going to use the model from the tutorial Markovian policy graphs.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"using SDDP, HiGHS\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75)\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64, 2}[\n [ 1.0 ]',\n [ 0.75 0.25 ],\n [ 0.75 0.25 ; 0.25 0.75 ]\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer\n) do subproblem, node\n # Unpack the stage and Markov index.\n t, markov_state = node\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end)\n # Note how we can use `markov_state` to dispatch an `if` statement.\n probability = if markov_state == 1 # wet climate state\n [1/6, 1/3, 1/2]\n else # dry climate state\n [1/2, 1/3, 1/6]\n end\n\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation)\n end\nend\n\nSDDP.train(model; iteration_limit = 10, print_level = 0);\n\nmodel\n\n# output\n\nA policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#In-sample-Monte-Carlo-simulation","page":"Simulate using a different sampling scheme","title":"In-sample Monte Carlo simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"To simulate the policy using the data defined when model was created, use SDDP.InSampleMonteCarlo.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"simulations = SDDP.simulate(\n model, 20, sampling_scheme = SDDP.InSampleMonteCarlo()\n)\n\nsort(unique(\n [node[:noise_term] for simulation in simulations for node in simulation]\n))\n\n# output\n\n3-element Array{NamedTuple{(:inflow, :fuel_multiplier),Tuple{Float64,Float64}},1}:\n (inflow = 0.0, fuel_multiplier = 1.5)\n (inflow = 50.0, fuel_multiplier = 1.0)\n (inflow = 100.0, fuel_multiplier = 0.75)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Out-of-sample-Monte-Carlo-simulation","page":"Simulate using a different sampling scheme","title":"Out-of-sample Monte Carlo simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Instead of using the in-sample data, we can perform an out-of-sample simulation of the policy using the SDDP.OutOfSampleMonteCarlo sampling scheme.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"For each node, the SDDP.OutOfSampleMonteCarlo needs to define a new distribution for the transition probabilities between nodes in the policy graph, and a new distribution for the stagewise independent noise terms.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"note: Note\nThe support of the distribution for the stagewise independent noise terms does not have to be the same as the in-sample distributions.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"sampling_scheme = SDDP.OutOfSampleMonteCarlo(model) do node\n stage, markov_state = node\n if stage == 0\n # Called from the root node. Transition to (1, 1) with probability 1.0.\n # Only return the list of children, _not_ a list of noise terms.\n return [SDDP.Noise((1, 1), 1.0)]\n elseif stage == 3\n # Called from the final node. Return an empty list for the children,\n # and a single, deterministic realization for the noise terms.\n children = SDDP.Noise[]\n noise_terms = [SDDP.Noise((inflow = 75.0, fuel_multiplier = 1.2), 1.0)]\n return children, noise_terms\n else\n # Called from a normal node. Return the in-sample distribution for the\n # noise terms, but modify the transition probabilities so that the\n # Markov switching probability is now 50%.\n probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]\n noise_terms = [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]\n children = [\n SDDP.Noise((stage + 1, 1), 0.5), SDDP.Noise((stage + 1, 2), 0.5)\n ]\n return children, noise_terms\n end\nend\n\nsimulations = SDDP.simulate(model, 1, sampling_scheme = sampling_scheme)\n\nsimulations[1][3][:noise_term]\n\n# output\n\n(inflow = 75.0, fuel_multiplier = 1.2)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Alternatively, if you only want to modify the stagewise independent noise terms, pass use_insample_transition = true.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"sampling_scheme = SDDP.OutOfSampleMonteCarlo(\n model, use_insample_transition = true\n) do node\nstage, markov_state = node\n if stage == 3\n # Called from the final node. Return a single, deterministic\n # realization for the noise terms. Don't return the children because we\n # use the in-sample data.\n return [SDDP.Noise((inflow = 65.0, fuel_multiplier = 1.1), 1.0)]\n else\n # Called from a normal node. Return the in-sample distribution for the\n # noise terms. Don't return the children because we use the in-sample\n # data.\n probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]\n return [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]\n end\nend\n\nsimulations = SDDP.simulate(model, 1, sampling_scheme = sampling_scheme)\n\nsimulations[1][3][:noise_term]\n\n# output\n\n(inflow = 65.0, fuel_multiplier = 1.1)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Historical-simulation","page":"Simulate using a different sampling scheme","title":"Historical simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"simulations = SDDP.simulate(\n model,\n sampling_scheme = SDDP.Historical(\n [((1, 1), Ω[1]), ((2, 2), Ω[3]), ((3, 1), Ω[2])],\n ),\n)\n\n[stage[:node_index] for stage in simulations[1]]\n\n# output\n\n3-element Vector{Tuple{Int64, Int64}}:\n (1, 1)\n (2, 2)\n (3, 1)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"You can also pass a vector of scenarios, which are sampled sequentially:","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"julia> SDDP.Historical(\n [\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 10.0, fuel_multiplier = 1.4)), # Can be out-of-sample\n (3, (inflow = 65.0, fuel_multiplier = 1.1)),\n ],\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 100.0, fuel_multiplier = 0.75)),\n (3, (inflow = 0.0, fuel_multiplier = 1.5)),\n ],\n ],\n )\nA Historical sampler with 2 scenarios sampled sequentially.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Or a vector of scenarios and a corresponding vector of probabilities so that the historical scenarios are sampled probabilistically:","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"julia> SDDP.Historical(\n [\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 10.0, fuel_multiplier = 1.4)), # Can be out-of-sample\n (3, (inflow = 65.0, fuel_multiplier = 1.1)),\n ],\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 100.0, fuel_multiplier = 0.75)),\n (3, (inflow = 0.0, fuel_multiplier = 1.5)),\n ],\n ],\n [0.3, 0.7],\n )\nA Historical sampler with 2 scenarios sampled probabilistically.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"tip: Tip\nYour sample space doesn't have to be a NamedTuple. It an be any Julia type! Use a Vector if that is easier, or define your own struct.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"EditURL = \"first_steps.jl\"","category":"page"},{"location":"tutorial/first_steps/#An-introduction-to-SDDP.jl","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"SDDP.jl is a solver for multistage stochastic optimization problems. By multistage, we mean problems in which an agent makes a sequence of decisions over time. By stochastic, we mean that the agent is making decisions in the presence of uncertainty that is gradually revealed over the multiple stages.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nMultistage stochastic programming has a lot in common with fields like stochastic optimal control, approximate dynamic programming, Markov decision processes, and reinforcement learning. If it helps, you can think of SDDP as Q-learning in which we approximate the value function using linear programming duality.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This tutorial is in two parts. First, it is an introduction to the background notation and theory we need, and second, it solves a simple multistage stochastic programming problem.","category":"page"},{"location":"tutorial/first_steps/#What-is-a-node?","page":"An introduction to SDDP.jl","title":"What is a node?","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A common feature of multistage stochastic optimization problems is that they model an agent controlling a system over time. To simplify things initially, we're going to start by describing what happens at an instant in time at which the agent makes a decision. Only after this will we extend our problem to multiple stages and the notion of time.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A node is a place at which the agent makes a decision.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nFor readers with a stochastic programming background, \"node\" is synonymous with \"stage\" in this section. However, for reasons that will become clear shortly, there can be more than one \"node\" per instant in time, which is why we prefer the term \"node\" over \"stage.\"","category":"page"},{"location":"tutorial/first_steps/#States,-controls,-and-random-variables","page":"An introduction to SDDP.jl","title":"States, controls, and random variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The system that we are modeling can be described by three types of variables.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"State variables track a property of the system over time.\nEach node has an associated incoming state variable (the value of the state at the start of the node), and an outgoing state variable (the value of the state at the end of the node).\nExamples of state variables include the volume of water in a reservoir, the number of units of inventory in a warehouse, or the spatial position of a moving vehicle.\nBecause state variables track the system over time, each node must have the same set of state variables.\nWe denote state variables by the letter x for the incoming state variable and x^prime for the outgoing state variable.\nControl variables are actions taken (implicitly or explicitly) by the agent within a node which modify the state variables.\nExamples of control variables include releases of water from the reservoir, sales or purchasing decisions, and acceleration or braking of the vehicle.\nControl variables are local to a node i, and they can differ between nodes. For example, some control variables may be available within certain nodes.\nWe denote control variables by the letter u.\nRandom variables are finite, discrete, exogenous random variables that the agent observes at the start of a node, before the control variables are decided.\nExamples of random variables include rainfall inflow into a reservoir, probabilistic perishing of inventory, and steering errors in a vehicle.\nRandom variables are local to a node i, and they can differ between nodes. For example, some nodes may have random variables, and some nodes may not.\nWe denote random variables by the Greek letter omega and the sample space from which they are drawn by Omega_i. The probability of sampling omega is denoted p_omega for simplicity.\nImportantly, the random variable associated with node i is independent of the random variables in all other nodes.","category":"page"},{"location":"tutorial/first_steps/#Dynamics","page":"An introduction to SDDP.jl","title":"Dynamics","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In a node i, the three variables are related by a transition function, which maps the incoming state, the controls, and the random variables to the outgoing state as follows: x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"As a result of entering a node i with the incoming state x, observing random variable omega, and choosing control u, the agent incurs a cost C_i(x u omega). (If the agent is a maximizer, this can be a profit, or a negative cost.) We call C_i the stage objective.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"To choose their control variables in node i, the agent uses a decision rule u = pi_i(x omega), which is a function that maps the incoming state variable and observation of the random variable to a control u. This control must satisfy some feasibility requirements u in U_i(x omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Here is a schematic which we can use to visualize a single node:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: Hazard-decision node)","category":"page"},{"location":"tutorial/first_steps/#Policy-graphs","page":"An introduction to SDDP.jl","title":"Policy graphs","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we have a node, we need to connect multiple nodes together to form a multistage stochastic program. We call the graph created by connecting nodes together a policy graph.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The simplest type of policy graph is a linear policy graph. Here's a linear policy graph with three nodes:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: Linear policy graph)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Here we have dropped the notations inside each node and replaced them by a label (1, 2, and 3) to represent nodes i=1, i=2, and i=3.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to nodes 1, 2, and 3, there is also a root node (the circle), and three arcs. Each arc has an origin node and a destination node, like 1 => 2, and a corresponding probability of transitioning from the origin to the destination. Unless specified, we assume that the arc probabilities are uniform over the number of outgoing arcs. Thus, in this picture the arc probabilities are all 1.0.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"State variables flow long the arcs of the graph. Thus, the outgoing state variable x^prime from node 1 becomes the incoming state variable x to node 2, and so on.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We denote the set of nodes by mathcalN, the root node by R, and the probability of transitioning from node i to node j by p_ij. (If no arc exists, then p_ij = 0.) We define the set of successors of node i as i^+ = j in mathcalN p_ij 0.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Each node in the graph corresponds to a place at which the agent makes a decision, and we call moments in time at which the agent makes a decision stages. By convention, we try to draw policy graphs from left-to-right, with the stages as columns. There can be more than one node in a stage! Here's an example of a structure we call Markovian policy graphs:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: Markovian policy graph)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Here each column represents a moment in time, the squiggly lines represent stochastic rainfall, and the rows represent the world in two discrete states: El Niño and La Niña. In the El Niño states, the distribution of the rainfall random variable is different to the distribution of the rainfall random variable in the La Niña states, and there is some switching probability between the two states that can be modelled by a Markov chain.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Moreover, policy graphs can have cycles! This allows them to model infinite horizon problems. Here's another example, taken from the paper Dowson (2020):","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: POWDer policy graph)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The columns represent time, and the rows represent different states of the world. In this case, the rows represent different prices that milk can be sold for at the end of each year. The squiggly lines denote a multivariate random variable that models the weekly amount of rainfall that occurs.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nThe sum of probabilities on the outgoing arcs of node i can be less than 1, i.e., sumlimits_jin i^+ p_ij le 1. What does this mean? One interpretation is that the probability is a discount factor. Another interpretation is that there is an implicit \"zero\" node that we have not modeled, with p_i0 = 1 - sumlimits_jin i^+ p_ij. This zero node has C_0(x u omega) = 0, and 0^+ = varnothing.","category":"page"},{"location":"tutorial/first_steps/#More-notation","page":"An introduction to SDDP.jl","title":"More notation","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Recall that each node i has a decision rule u = pi_i(x omega), which is a function that maps the incoming state variable and observation of the random variable to a control u.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The set of decision rules, with one element for each node in the policy graph, is called a policy.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The goal of the agent is to find a policy that minimizes the expected cost of starting at the root node with some initial condition x_R, and proceeding from node to node along the probabilistic arcs until they reach a node with no outgoing arcs (or it reaches an implicit \"zero\" node).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"min_pi mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"where","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"V_i^pi(x omega) = C_i(x u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"where u = pi_i(x omega) in U_i(x omega), and x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The expectations are a bit complicated, but they are equivalent to:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi) = sumlimits_j in i^+ p_ij sumlimits_varphi in Omega_j p_varphiV_j(x^prime varphi)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"An optimal policy is the set of decision rules that the agent can use to make decisions and achieve the smallest expected cost.","category":"page"},{"location":"tutorial/first_steps/#Assumptions","page":"An introduction to SDDP.jl","title":"Assumptions","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nThis section is important!","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The space of problems you can model with this framework is very large. Too large, in fact, for us to form tractable solution algorithms for! Stochastic dual dynamic programming requires the following assumptions in order to work:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 1: finite nodes","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There is a finite number of nodes in mathcalN.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 2: finite random variables","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The sample space Omega_i is finite and discrete for each node iinmathcalN.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 3: convex problems","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Given fixed omega, C_i(x u omega) is a convex function, T_i(x u omega) is linear, and U_i(x u omega) is a non-empty, bounded convex set with respect to x and u.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 4: no infinite loops","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For all loops in the policy graph, the product of the arc transition probabilities around the loop is strictly less than 1.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 5: relatively complete recourse","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This is a technical but important assumption. See Relatively complete recourse for more details.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nSDDP.jl relaxes assumption (3) to allow for integer state and control variables, but we won't go into the details here. Assumption (4) essentially means that we obtain a discounted-cost solution for infinite-horizon problems, instead of an average-cost solution; see Dowson (2020) for details.","category":"page"},{"location":"tutorial/first_steps/#Dynamic-programming-and-subproblems","page":"An introduction to SDDP.jl","title":"Dynamic programming and subproblems","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we have formulated our problem, we need some ways of computing optimal decision rules. One way is to just use a heuristic like \"choose a control randomly from the set of feasible controls.\" However, such a policy is unlikely to be optimal.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A better way of obtaining an optimal policy is to use Bellman's principle of optimality, a.k.a Dynamic Programming, and define a recursive subproblem as follows:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nWe add barx as a decision variable, along with the fishing constraint barx = x for two reasons: it makes it obvious that formulating a problem with x times u results in a bilinear program instead of a linear program (see Assumption 3), and it simplifies the implementation of the SDDP algorithm.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"These subproblems are very difficult to solve exactly, because they involve recursive optimization problems with lots of nested expectations.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Therefore, instead of solving them exactly, SDDP.jl works by iteratively approximating the expectation term of each subproblem, which is also called the cost-to-go term. For now, you don't need to understand the details, other than that there is a nasty cost-to-go term that we deal with behind-the-scenes.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The subproblem view of a multistage stochastic program is also important, because it provides a convenient way of communicating the different parts of the broader problem, and it is how we will communicate the problem to SDDP.jl. All we need to do is drop the cost-to-go term and fishing constraint, and define a new subproblem SP as:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx u omega) \n x^prime = T_i(barx u omega) \n u in U_i(barx omega)\nendaligned","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nWhen we talk about formulating a subproblem with SDDP.jl, this is the formulation we mean.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We've retained the transition function and uncertainty set because they help to motivate the different components of the subproblem. However, in general, the subproblem can be more general. A better (less restrictive) representation might be:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx x^prime u omega) \n (barx x^prime u) in mathcalX_i(omega)\nendaligned","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Note that the outgoing state variable can appear in the objective, and we can add constraints involving the incoming and outgoing state variables. It should be obvious how to map between the two representations.","category":"page"},{"location":"tutorial/first_steps/#Example:-hydro-thermal-scheduling","page":"An introduction to SDDP.jl","title":"Example: hydro-thermal scheduling","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Hydrothermal scheduling is the most common application of stochastic dual dynamic programming. To illustrate some of the basic functionality of SDDP.jl, we implement a very simple model of the hydrothermal scheduling problem.","category":"page"},{"location":"tutorial/first_steps/#Problem-statement","page":"An introduction to SDDP.jl","title":"Problem statement","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We consider the problem of scheduling electrical generation over three weeks in order to meet a known demand of 150 MWh in each week.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There are two generators: a thermal generator, and a hydro generator. In each week, the agent needs to decide how much energy to generate from thermal, and how much energy to generate from hydro.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The thermal generator has a short-run marginal cost of $50/MWh in the first stage, $100/MWh in the second stage, and $150/MWh in the third stage.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The hydro generator has a short-run marginal cost of $0/MWh.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The hydro generator draws water from a reservoir which has a maximum capacity of 200 MWh. (Although water is usually measured in m³, we measure it in the energy-equivalent MWh to simplify things. In practice, there is a conversion function between m³ flowing throw the turbine and MWh.) At the start of the first time period, the reservoir is full.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to the ability to generate electricity by passing water through the hydroelectric turbine, the hydro generator can also spill water down a spillway (bypassing the turbine) in order to prevent the water from over-topping the dam. We assume that there is no cost of spillage.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to water leaving the reservoir, water that flows into the reservoir through rainfall or rivers are referred to as inflows. These inflows are uncertain, and are the cause of the main trade-off in hydro-thermal scheduling: the desire to use water now to generate cheap electricity, against the risk that future inflows will be low, leading to blackouts or expensive thermal generation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For our simple model, we assume that the inflows can be modelled by a discrete distribution with the three outcomes given in the following table:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"ω 0 50 100\nP(ω) 1/3 1/3 1/3","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The value of the noise (the random variable) is observed by the agent at the start of each stage. This makes the problem a wait-and-see or hazard-decision formulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The goal of the agent is to minimize the expected cost of generation over the three weeks.","category":"page"},{"location":"tutorial/first_steps/#Formulating-the-problem","page":"An introduction to SDDP.jl","title":"Formulating the problem","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Before going further, we need to load SDDP.jl:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"using SDDP","category":"page"},{"location":"tutorial/first_steps/#Graph-structure","page":"An introduction to SDDP.jl","title":"Graph structure","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"First, we need to identify the structure of the policy graph. From the problem statement, we want to model the problem over three weeks in weekly stages. Therefore, the policy graph is a linear graph with three stages:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"graph = SDDP.LinearGraph(3)","category":"page"},{"location":"tutorial/first_steps/#Building-the-subproblem","page":"An introduction to SDDP.jl","title":"Building the subproblem","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Next, we need to construct the associated subproblem for each node in graph. To do so, we need to provide SDDP.jl a function which takes two arguments. The first is subproblem::Model, which is an empty JuMP model. The second is node, which is the name of each node in the policy graph. If the graph is linear, SDDP defaults to naming the nodes using the integers in 1:T. Here's an example that we are going to flesh out over the next few paragraphs:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # ... stuff to go here ...\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nIf you use a different type of graph, node may be a type different to Int. For example, in SDDP.MarkovianGraph, node is a Tuple{Int,Int}.","category":"page"},{"location":"tutorial/first_steps/#State-variables","page":"An introduction to SDDP.jl","title":"State variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The first part of the subproblem we need to identify are the state variables. Since we only have one reservoir, there is only one state variable, volume, the volume of water in the reservoir [MWh].","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The volume had bounds of [0, 200], and the reservoir was full at the start of time, so x_R = 200.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We add state variables to our subproblem using JuMP's @variable macro. However, in addition to the usual syntax, we also pass SDDP.State, and we need to provide the initial value (x_R) using the initial_value keyword.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The syntax for adding a state variable is a little obtuse, because volume is not single JuMP variable. Instead, volume is a struct with two fields, .in and .out, corresponding to the incoming and outgoing state variables respectively.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nWe don't need to add the fishing constraint barx = x; SDDP.jl does this automatically.","category":"page"},{"location":"tutorial/first_steps/#Control-variables","page":"An introduction to SDDP.jl","title":"Control variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The next part of the subproblem we need to identify are the control variables. The control variables for our problem are:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"thermal_generation: the quantity of energy generated from thermal [MWh/week]\nhydro_generation: the quantity of energy generated from hydro [MWh/week]\nhydro_spill: the volume of water spilled from the reservoir in each week [MWh/week]","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Each of these variables is non-negative.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We add control variables to our subproblem as normal JuMP variables, using @variable or @variables:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nModeling is an art, and a tricky part of that art is figuring out which variables are state variables, and which are control variables. A good rule is: if you need a value of a control variable in some future node to make a decision, it is a state variable instead.","category":"page"},{"location":"tutorial/first_steps/#Random-variables","page":"An introduction to SDDP.jl","title":"Random variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The next step is to identify any random variables. In our example, we had","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"inflow: the quantity of water that flows into the reservoir each week [MWh/week]","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"To add an uncertain variable to the model, we create a new JuMP variable inflow, and then call the function SDDP.parameterize. The SDDP.parameterize function takes three arguments: the subproblem, a vector of realizations, and a corresponding vector of probabilities.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Note how we use the JuMP function JuMP.fix to set the value of the inflow variable to ω.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nSDDP.parameterize can only be called once in each subproblem definition! If your random variable is multi-variate, read Add multi-dimensional noise terms.","category":"page"},{"location":"tutorial/first_steps/#Transition-function-and-constraints","page":"An introduction to SDDP.jl","title":"Transition function and constraints","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we've identified our variables, we can define the transition function and the constraints.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For our problem, the state variable is the volume of water in the reservoir. The volume of water decreases in response to water being used for hydro generation and spillage. So the transition function is: volume.out = volume.in - hydro_generation - hydro_spill + inflow. (Note how we use volume.in and volume.out to refer to the incoming and outgoing state variables.)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There is also a constraint that the total generation must sum to 150 MWh.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Both the transition function and any additional constraint are added using JuMP's @constraint and @constraints macro.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/#Objective-function","page":"An introduction to SDDP.jl","title":"Objective function","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Finally, we need to add an objective function using @stageobjective. The objective of the agent is to minimize the cost of thermal generation. This is complicated by a fuel cost that depends on the node.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"One possibility is to use an if statement on node to define the correct objective:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n # Stage-objective\n if node == 1\n @stageobjective(subproblem, 50 * thermal_generation)\n elseif node == 2\n @stageobjective(subproblem, 100 * thermal_generation)\n else\n @assert node == 3\n @stageobjective(subproblem, 150 * thermal_generation)\n end\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A second possibility is to use an array of fuel costs, and use node to index the correct value:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n # Stage-objective\n fuel_cost = [50, 100, 150]\n @stageobjective(subproblem, fuel_cost[node] * thermal_generation)\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/#Constructing-the-model","page":"An introduction to SDDP.jl","title":"Constructing the model","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we've written our subproblem, we need to construct the full model. For that, we're going to need a linear solver. Let's choose HiGHS:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"using HiGHS","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nIn larger problems, you should use a more robust commercial LP solver like Gurobi. Read Words of warning for more details.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then, we can create a full model using SDDP.PolicyGraph, passing our subproblem_builder function as the first argument, and our graph as the second:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"model = SDDP.PolicyGraph(\n subproblem_builder,\n graph;\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"sense: the optimization sense. Must be :Min or :Max.\nlower_bound: you must supply a valid bound on the objective. For our problem, we know that we cannot incur a negative cost so $0 is a valid lower bound.\noptimizer: This is borrowed directly from JuMP's Model constructor: Model(HiGHS.Optimizer)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Because linear policy graphs are the most commonly used structure, we can use SDDP.LinearPolicyGraph instead of passing SDDP.LinearGraph(3) to SDDP.PolicyGraph.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"model = SDDP.LinearPolicyGraph(\n subproblem_builder;\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There is also the option is to use Julia's do syntax to avoid needing to define a subproblem_builder function separately:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, node\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n # Stage-objective\n if node == 1\n @stageobjective(subproblem, 50 * thermal_generation)\n elseif node == 2\n @stageobjective(subproblem, 100 * thermal_generation)\n else\n @assert node == 3\n @stageobjective(subproblem, 150 * thermal_generation)\n end\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"info: Info\nJulia's do syntax is just a different way of passing an anonymous function inner to some function outer which takes inner as the first argument. For example, given:outer(inner::Function, x, y) = inner(x, y)thenouter(1, 2) do x, y\n return x^2 + y^2\nendis equivalent to:outer((x, y) -> x^2 + y^2, 1, 2)For our purpose, inner is subproblem_builder, and outer is SDDP.PolicyGraph.","category":"page"},{"location":"tutorial/first_steps/#Training-a-policy","page":"An introduction to SDDP.jl","title":"Training a policy","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now we have a model, which is a description of the policy graph, we need to train a policy. Models can be trained using the SDDP.train function. It accepts a number of keyword arguments. iteration_limit terminates the training after the provided number of iterations.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"SDDP.train(model; iteration_limit = 10)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There's a lot going on in this printout! Let's break it down.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The first section, \"problem,\" gives some problem statistics. In this example there are 3 nodes, 1 state variable, and 27 scenarios (3^3). We haven't solved this problem before so there are no existing cuts.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The \"options\" section lists some options we are using to solve the problem. For more information on the numerical stability report, read the Numerical stability report section.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The \"subproblem structure\" section also needs explaining. This looks at all of the nodes in the policy graph and reports the minimum and maximum number of variables and each constraint type in the corresponding subproblem. In this case each subproblem has 7 variables and various numbers of different constraint types. Note that the exact numbers may not correspond to the formulation as you wrote it, because SDDP.jl adds some extra variables for the cost-to-go function.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then comes the iteration log, which is the main part of the printout. It has the following columns:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"iteration: the SDDP iteration\nsimulation: the cost of the single forward pass simulation for that iteration. This value is stochastic and is not guaranteed to improve over time. However, it's useful to check that the units are reasonable, and that it is not deterministic if you intended for the problem to be stochastic, etc.\nbound: this is a lower bound (upper if maximizing) for the value of the optimal policy. This should be monotonically improving (increasing if minimizing, decreasing if maximizing).\ntime (s): the total number of seconds spent solving so far\nsolves: the total number of subproblem solves to date. This can be very large!\npid: the ID of the processor used to solve that iteration. This should be 1 unless you are using parallel computation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition, if the first character of a line is †, then SDDP.jl experienced numerical issues during the solve, but successfully recovered.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The printout finishes with some summary statistics:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"status: why did the solver stop?\ntotal time (s), best bound, and total solves are the values from the last iteration of the solve.\nsimulation ci: a confidence interval that estimates the quality of the policy from the Simulation column.\nnumeric issues: the number of iterations that experienced numerical issues.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nThe simulation ci result can be misleading if you run a small number of iterations, or if the initial simulations are very bad. On a more technical note, it is an in-sample simulation, which may not reflect the true performance of the policy. See Obtaining bounds for more details.","category":"page"},{"location":"tutorial/first_steps/#Obtaining-the-decision-rule","page":"An introduction to SDDP.jl","title":"Obtaining the decision rule","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"After training a policy, we can create a decision rule using SDDP.DecisionRule:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"rule = SDDP.DecisionRule(model; node = 1)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then, to evaluate the decision rule, we use SDDP.evaluate:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"solution = SDDP.evaluate(\n rule;\n incoming_state = Dict(:volume => 150.0),\n noise = 50.0,\n controls_to_record = [:hydro_generation, :thermal_generation],\n)","category":"page"},{"location":"tutorial/first_steps/#Simulating-the-policy","page":"An introduction to SDDP.jl","title":"Simulating the policy","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Once you have a trained policy, you can also simulate it using SDDP.simulate. The return value from simulate is a vector with one element for each replication. Each element is itself a vector, with one element for each stage. Each element, corresponding to a particular stage in a particular replication, is a dictionary that records information from the simulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"simulations = SDDP.simulate(\n # The trained model to simulate.\n model,\n # The number of replications.\n 100,\n # A list of names to record the values of.\n [:volume, :thermal_generation, :hydro_generation, :hydro_spill],\n)\n\nreplication = 1\nstage = 2\nsimulations[replication][stage]","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Ignore many of the entries for now; they will be relevant later.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"One element of interest is :volume.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"outgoing_volume = map(simulations[1]) do node\n return node[:volume].out\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Another is :thermal_generation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"thermal_generation = map(simulations[1]) do node\n return node[:thermal_generation]\nend","category":"page"},{"location":"tutorial/first_steps/#Obtaining-bounds","page":"An introduction to SDDP.jl","title":"Obtaining bounds","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Because the optimal policy is stochastic, one common approach to quantify the quality of the policy is to construct a confidence interval for the expected cost by summing the stage objectives along each simulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"objectives = map(simulations) do simulation\n return sum(stage[:stage_objective] for stage in simulation)\nend\n\nμ, ci = SDDP.confidence_interval(objectives)\nprintln(\"Confidence interval: \", μ, \" ± \", ci)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This confidence interval is an estimate for an upper bound of the policy's quality. We can calculate the lower bound using SDDP.calculate_bound.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"println(\"Lower bound: \", SDDP.calculate_bound(model))","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nThe upper- and lower-bounds are reversed if maximizing, i.e., SDDP.calculate_bound. returns an upper bound.","category":"page"},{"location":"tutorial/first_steps/#Custom-recorders","page":"An introduction to SDDP.jl","title":"Custom recorders","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to simulating the primal values of variables, we can also pass custom recorder functions. Each of these functions takes one argument, the JuMP subproblem corresponding to each node. This function gets called after we have solved each node as we traverse the policy graph in the simulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For example, the dual of the demand constraint (which we named demand_constraint) corresponds to the price we should charge for electricity, since it represents the cost of each additional unit of demand. To calculate this, we can go:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"simulations = SDDP.simulate(\n model,\n 1, ## Perform a single simulation\n custom_recorders = Dict{Symbol,Function}(\n :price => (sp::JuMP.Model) -> JuMP.dual(sp[:demand_constraint]),\n ),\n)\n\nprices = map(simulations[1]) do node\n return node[:price]\nend","category":"page"},{"location":"tutorial/first_steps/#Extracting-the-marginal-water-values","page":"An introduction to SDDP.jl","title":"Extracting the marginal water values","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nBy \"value function\" we mean mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi), note the function V_i(x omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"First, we construct a value function from the first subproblem:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"V = SDDP.ValueFunction(model; node = 1)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then we can evaluate V at a point:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"cost, price = SDDP.evaluate(V, Dict(\"volume\" => 10))","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This returns the cost-to-go (cost), and the gradient of the cost-to-go function with respect to each state variable. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"EditURL = \"StochDynamicProgramming.jl_stock.jl\"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/#StochDynamicProgramming:-the-stock-problem","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"","category":"section"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"This example comes from StochDynamicProgramming.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"using SDDP, HiGHS, Test\n\nfunction stock_example()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(5),\n lower_bound = -2,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(sp, 0 <= state <= 1, SDDP.State, initial_value = 0.5)\n @variable(sp, 0 <= control <= 0.5)\n @variable(sp, ξ)\n @constraint(sp, state.out == state.in - control + ξ)\n SDDP.parameterize(sp, 0.0:1/30:0.3) do ω\n return JuMP.fix(ξ, ω)\n end\n @stageobjective(sp, (sin(3 * stage) - 1) * control)\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ -1.471 atol = 0.001\n simulation_results = SDDP.simulate(model, 1_000)\n @test length(simulation_results) == 1_000\n μ = SDDP.Statistics.mean(\n sum(data[:stage_objective] for data in simulation) for\n simulation in simulation_results\n )\n @test μ ≈ -1.471 atol = 0.05\n return\nend\n\nstock_example()","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"EditURL = \"agriculture_mccardle_farm.jl\"","category":"page"},{"location":"examples/agriculture_mccardle_farm/#The-farm-planning-problem","page":"The farm planning problem","title":"The farm planning problem","text":"","category":"section"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"There are four stages. The first stage is a deterministic planning stage. The next three are wait-and-see operational stages. The uncertainty in the three operational stages is a Markov chain for weather. There are three Markov states: dry, normal, and wet.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"Inspired by R. McCardle, Farm management optimization. Masters thesis, University of Louisville, Louisville, Kentucky, United States of America (2009).","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"All data, including short variable names, is taken from that thesis.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"using SDDP, HiGHS, Test\n\nfunction test_mccardle_farm_model()\n S = [ # cutting, stage\n 0 1 2\n 0 0 1\n 0 0 0\n ]\n t = [60, 60, 245] # days in period\n D = [210, 210, 858] # demand\n q = [ # selling price per bale\n [4.5 4.5 4.5; 4.5 4.5 4.5; 4.5 4.5 4.5],\n [5.5 5.5 5.5; 5.5 5.5 5.5; 5.5 5.5 5.5],\n [6.5 6.5 6.5; 6.5 6.5 6.5; 6.5 6.5 6.5],\n ]\n b = [ # predicted yield (bales/acres) from cutting i in weather j.\n 30 75 37.5\n 15 37.5 18.25\n 7.5 18.75 9.325\n ]\n w = 3000 # max storage\n C = [50 50 50; 50 50 50; 50 50 50] # cost to grow hay\n r = [ # Cost per bale of hay from cutting i during weather condition j.\n [5 5 5; 5 5 5; 5 5 5],\n [6 6 6; 6 6 6; 6 6 6],\n [7 7 7; 7 7 7; 7 7 7],\n ]\n M = 60.0 # max acreage for planting\n H = 0.0 # initial inventory\n V = [0.05, 0.05, 0.05] # inventory cost\n L = 3000.0 # max demand for hay\n\n graph = SDDP.MarkovianGraph([\n ones(Float64, 1, 1),\n [0.14 0.69 0.17],\n [0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],\n [0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],\n ])\n\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, index\n stage, weather = index\n # ===================== State Variables =====================\n # Area planted.\n @variable(subproblem, 0 <= acres <= M, SDDP.State, initial_value = M)\n @variable(\n subproblem,\n bales[i = 1:3] >= 0,\n SDDP.State,\n initial_value = (i == 1 ? H : 0)\n )\n # ===================== Variables =====================\n @variables(subproblem, begin\n buy[1:3] >= 0 # Quantity of bales to buy from each cutting.\n sell[1:3] >= 0 # Quantity of bales to sell from each cutting.\n eat[1:3] >= 0 # Quantity of bales to eat from each cutting.\n pen_p[1:3] >= 0 # Penalties\n pen_n[1:3] >= 0 # Penalties\n end)\n # ===================== Constraints =====================\n if stage == 1\n @constraint(subproblem, acres.out <= acres.in)\n @constraint(subproblem, [i = 1:3], bales[i].in == bales[i].out)\n else\n @expression(\n subproblem,\n cut_ex[c = 1:3],\n bales[c].in + buy[c] - eat[c] - sell[c] + pen_p[c] - pen_n[c]\n )\n @constraints(\n subproblem,\n begin\n # Cannot plant more land than previously cropped.\n acres.out <= acres.in\n # In each stage we need to meet demand.\n sum(eat) >= D[stage-1]\n # We can buy and sell other cuttings.\n bales[stage-1].out ==\n cut_ex[stage-1] + acres.in * b[stage-1, weather]\n [c = 1:3; c != stage - 1], bales[c].out == cut_ex[c]\n # There is some maximum storage.\n sum(bales[i].out for i in 1:3) <= w\n # We can only sell what is in storage.\n [c = 1:3], sell[c] <= bales[c].in\n # Maximum sales quantity.\n sum(sell) <= L\n end\n )\n end\n # ===================== Stage objective =====================\n if stage == 1\n @stageobjective(subproblem, 0.0)\n else\n @stageobjective(\n subproblem,\n 1000 * (sum(pen_p) + sum(pen_n)) +\n # cost of growing\n C[stage-1, weather] * acres.in +\n sum(\n # inventory cost\n V[stage-1] * bales[cutting].in * t[stage-1] +\n # purchase cost\n r[cutting][stage-1, weather] * buy[cutting] +\n # feed cost\n S[cutting, stage-1] * eat[cutting] -\n # sell reward\n q[cutting][stage-1, weather] * sell[cutting] for\n cutting in 1:3\n )\n )\n end\n return\n end\n SDDP.train(model)\n @test SDDP.termination_status(model) == :simulation_stopping\n @test SDDP.calculate_bound(model) ≈ 4074.1391 atol = 1e-5\nend\n\ntest_mccardle_farm_model()","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"EditURL = \"vehicle_location.jl\"","category":"page"},{"location":"examples/vehicle_location/#Vehicle-location","page":"Vehicle location","title":"Vehicle location","text":"","category":"section"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"This problem is a version of the Ambulance dispatch problem. A hospital is located at 0 on the number line that stretches from 0 to 100. Ambulance bases are located at points 20, 40, 60, 80, and 100. When not responding to a call, Ambulances must be located at a base, or the hospital. In this example there are three ambulances.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"Example location:","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"H B B B B B\n0 ---- 20 ---- 40 ---- 60 ---- 80 ---- 100","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"Each stage, a call comes in from somewhere on the number line. The agent must decide which ambulance to dispatch. They pay the cost of twice the driving distance. If an ambulance is not dispatched in a stage, the ambulance can be relocated to a different base in preparation for future calls. This incurs a cost of the driving distance.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction vehicle_location_model(duality_handler)\n hospital_location = 0\n bases = vcat(hospital_location, [20, 40, 60, 80, 100])\n vehicles = [1, 2, 3]\n requests = 0:10:100\n shift_cost(src, dest) = abs(src - dest)\n function dispatch_cost(base, request)\n return 2 * (abs(request - hospital_location) + abs(request - base))\n end\n # Initial state of emergency vehicles at bases. All ambulances start at the\n # hospital.\n initial_state(b, v) = b == hospital_location ? 1.0 : 0.0\n model = SDDP.LinearPolicyGraph(\n stages = 10,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n # Current location of each vehicle at each base.\n @variable(\n sp,\n 0 <= location[b = bases, v = vehicles] <= 1,\n SDDP.State,\n initial_value = initial_state(b, v)\n )\n @variables(sp, begin\n # Which vehicle is dispatched?\n 0 <= dispatch[bases, vehicles] <= 1, Bin\n # Shifting vehicles between bases: [src, dest, vehicle]\n 0 <= shift[bases, bases, vehicles] <= 1, Bin\n end)\n # Flow of vehicles in and out of bases:\n @expression(\n sp,\n base_balance[b in bases, v in vehicles],\n location[b, v].in - dispatch[b, v] - sum(shift[b, :, v]) +\n sum(shift[:, b, v])\n )\n @constraints(\n sp,\n begin\n # Only one vehicle dispatched to call.\n sum(dispatch) == 1\n # Can only dispatch vehicle from base if vehicle is at that base.\n [b in bases, v in vehicles],\n dispatch[b, v] <= location[b, v].in\n # Can only shift vehicle if vehicle is at that src base.\n [b in bases, v in vehicles],\n sum(shift[b, :, v]) <= location[b, v].in\n # Can only shift vehicle if vehicle is not being dispatched.\n [b in bases, v in vehicles],\n sum(shift[b, :, v]) + dispatch[b, v] <= 1\n # Can't shift to same base.\n [b in bases, v in vehicles], shift[b, b, v] == 0\n # Update states for non-home/non-hospital bases.\n [b in bases[2:end], v in vehicles],\n location[b, v].out == base_balance[b, v]\n # Update states for home/hospital bases.\n [v in vehicles],\n location[hospital_location, v].out ==\n base_balance[hospital_location, v] + sum(dispatch[:, v])\n end\n )\n SDDP.parameterize(sp, requests) do request\n @stageobjective(\n sp,\n sum(\n # Distance to travel from base to emergency and then to hospital.\n dispatch[b, v] * dispatch_cost(b, request) +\n # Distance travelled by vehicles relocating bases.\n sum(\n shift_cost(b, dest) * shift[b, dest, v] for\n dest in bases\n ) for b in bases, v in vehicles\n )\n )\n end\n end\n if get(ARGS, 1, \"\") == \"--write\"\n # Run `$ julia vehicle_location.jl --write` to update the benchmark\n # model directory\n model_dir = joinpath(@__DIR__, \"..\", \"..\", \"..\", \"benchmarks\", \"models\")\n SDDP.write_to_file(\n model,\n joinpath(model_dir, \"vehicle_location.sof.json.gz\");\n test_scenarios = 100,\n )\n exit(0)\n end\n SDDP.train(\n model;\n iteration_limit = 20,\n log_frequency = 10,\n cut_deletion_minimum = 100,\n duality_handler = duality_handler,\n )\n Test.@test SDDP.calculate_bound(model) >= 1000\n return\nend\n\n# TODO(odow): find out why this fails\n# vehicle_location_model(SDDP.ContinuousConicDuality())","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/improve_computational_performance/#Improve-computational-performance","page":"Improve computational performance","title":"Improve computational performance","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP is a computationally intensive algorithm. Here are some suggestions for how the computational performance can be improved.","category":"page"},{"location":"guides/improve_computational_performance/#Numerical-stability-(again)","page":"Improve computational performance","title":"Numerical stability (again)","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"We've already discussed this in the Numerical stability section of Words of warning. But, it's so important that we're going to say it again: improving the problem scaling is one of the best ways to improve the numerical performance of your models.","category":"page"},{"location":"guides/improve_computational_performance/#Solver-selection","page":"Improve computational performance","title":"Solver selection","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"The majority of the solution time is spent inside the low-level solvers. Choosing which solver (and the associated settings) correctly can lead to big speed-ups.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Choose a commercial solver.\nOptions include CPLEX, Gurobi, and Xpress. Using free solvers such as CLP and HiGHS isn't a viable approach for large problems.\nTry different solvers.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Even commercial solvers can have wildly different solution times. We've seen models on which CPLEX was 50% fast than Gurobi, and vice versa.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Experiment with different solver options.\nUsing the default settings is usually a good option. However, sometimes it can pay to change these. In particular, forcing solvers to use the dual simplex algorithm (e.g., Method=1 in Gurobi ) is usually a performance win.","category":"page"},{"location":"guides/improve_computational_performance/#Single-cut-vs.-multi-cut","page":"Improve computational performance","title":"Single-cut vs. multi-cut","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There are two competing ways that cuts can be created in SDDP: single-cut and multi-cut. By default, SDDP.jl uses the single-cut version of SDDP.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"The performance of each method is problem-dependent. We recommend that you try both in order to see which one performs better. In general, the single-cut method works better when the number of realizations of the stagewise-independent random variable is large, whereas the multi-cut method works better on small problems. However, the multi-cut method can cause numerical stability problems, particularly if used in conjunction with objective or belief state variables.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"You can switch between the methods by passing the relevant flag to cut_type in SDDP.train.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.train(model; cut_type = SDDP.SINGLE_CUT)\nSDDP.train(model; cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"guides/improve_computational_performance/#Parallelism","page":"Improve computational performance","title":"Parallelism","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.jl can take advantage of the parallel nature of modern computers to solve problems across multiple cores.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nWe highly recommend that you read the Julia manual's section on parallel computing.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"You can start Julia from a command line with N processors using the -p flag:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"julia -p N","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Alternatively, you can use the Distributed.jl package:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"using Distributed\nDistributed.addprocs(N)","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"warning: Warning\nWorkers DON'T inherit their parent's Pkg environment. Therefore, if you started Julia with --project=/path/to/environment (or if you activated an environment from the REPL), you will need to put the following at the top of your script:using Distributed\n@everywhere begin\n import Pkg\n Pkg.activate(\"/path/to/environment\")\nend","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Currently SDDP.jl supports to parallel schemes, SDDP.Serial and SDDP.Asynchronous. Instances of these parallel schemes should be passed to the parallel_scheme argument of SDDP.train and SDDP.simulate.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"using SDDP, HiGHS\nmodel = SDDP.LinearPolicyGraph(\n stages = 2, lower_bound = 0, optimizer = HiGHS.Optimizer\n) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x.out)\nend\nSDDP.train(model; iteration_limit = 10, parallel_scheme = SDDP.Asynchronous())\nSDDP.simulate(model, 10; parallel_scheme = SDDP.Asynchronous())","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There is a large overhead for using the asynchronous solver. Even if you choose asynchronous mode, SDDP.jl will start in serial mode while the initialization takes place. Therefore, in the log you will see that the initial iterations take place on the master thread (Proc. ID = 1), and it is only after while that the solve switches to full parallelism.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nBecause of the large data communication requirements (all cuts have to be shared with all other cores), the solution time will not scale linearly with the number of cores.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nGiven the same number of iterations, the policy obtained from asynchronous mode will be worse than the policy obtained from serial mode. However, the asynchronous solver can take significantly less time to compute the same number of iterations.","category":"page"},{"location":"guides/improve_computational_performance/#Data-movement","page":"Improve computational performance","title":"Data movement","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"By default, data defined on the master process is not made available to the workers. Therefore, a model like the following:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"data = 1\nmodel = SDDP.LinearPolicyGraph(stages = 2, lower_bound = 0) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = data)\n @stageobjective(sp, x.out)\nend","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"will result in an UndefVarError error like UndefVarError: data not defined.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There are three solutions for this problem.","category":"page"},{"location":"guides/improve_computational_performance/#Option-1:-declare-data-inside-the-build-function","page":"Improve computational performance","title":"Option 1: declare data inside the build function","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"model = SDDP.LinearPolicyGraph(stages = 2) do sp, t\n data = 1\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\nend","category":"page"},{"location":"guides/improve_computational_performance/#Option-2:-use-@everywhere","page":"Improve computational performance","title":"Option 2: use @everywhere","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"@everywhere begin\n data = 1\nend\nmodel = SDDP.LinearPolicyGraph(stages = 2) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\nend","category":"page"},{"location":"guides/improve_computational_performance/#Option-3:-build-the-model-in-a-function","page":"Improve computational performance","title":"Option 3: build the model in a function","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"function build_model()\n data = 1\n return SDDP.LinearPolicyGraph(stages = 2) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\n end\nend\n\nmodel = build_model()","category":"page"},{"location":"guides/improve_computational_performance/#Initialization-hooks","page":"Improve computational performance","title":"Initialization hooks","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"warning: Warning\nThis is important if you use Gurobi!","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.Asynchronous accepts a pre-processing hook that is run on each worker process before the model is solved. The most useful situation is for solvers than need an initialization step. A good example is Gurobi, which can share an environment amongst all models on a worker. Notably, this environment cannot be shared amongst workers, so defining one environment at the top of a script will fail!","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"To initialize a new environment on each worker, use the following:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.train(\n model;\n parallel_scheme = SDDP.Asynchronous() do m::SDDP.PolicyGraph\n env = Gurobi.Env()\n set_optimizer(m, () -> Gurobi.Optimizer(env))\n end,\n)","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"EditURL = \"FAST_quickstart.jl\"","category":"page"},{"location":"examples/FAST_quickstart/#FAST:-the-quickstart-problem","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"","category":"section"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"An implementation of the QuickStart example from FAST","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"using SDDP, HiGHS, Test\n\nfunction fast_quickstart()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(2),\n lower_bound = -5,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 0.0)\n if t == 1\n @stageobjective(sp, x.out)\n else\n @variable(sp, s >= 0)\n @constraint(sp, s <= x.in)\n SDDP.parameterize(sp, [2, 3]) do ω\n return JuMP.set_upper_bound(s, ω)\n end\n @stageobjective(sp, -2s)\n end\n end\n\n det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\n set_silent(det)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) == -2\n\n SDDP.train(model)\n @test SDDP.calculate_bound(model) == -2\nend\n\nfast_quickstart()","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"EditURL = \"StructDualDynProg.jl_prob5.2_3stages.jl\"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/#StructDualDynProg:-Problem-5.2,-3-stages","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"","category":"section"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"This example comes from StochasticDualDynamicProgramming.jl.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"using SDDP, HiGHS, Test\n\nfunction test_prob52_3stages()\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n n = 4\n m = 3\n i_c = [16, 5, 32, 2]\n C = [25, 80, 6.5, 160]\n T = [8760, 7000, 1500] / 8760\n D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]\n p2 = [0.9, 0.1]\n @variable(sp, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n y[1:n, 1:m] >= 0\n v[1:n] >= 0\n penalty >= 0\n ξ[j = 1:m]\n end)\n @constraints(sp, begin\n [i = 1:n], x[i].out == x[i].in + v[i]\n [i = 1:n], sum(y[i, :]) <= x[i].in\n [j = 1:m], sum(y[:, j]) + penalty >= ξ[j]\n end)\n @stageobjective(sp, i_c'v + C' * y * T + 1e5 * penalty)\n if t != 1 # no uncertainty in first stage\n SDDP.parameterize(sp, 1:size(D2, 2), p2) do ω\n for j in 1:m\n JuMP.fix(ξ[j], D2[j, ω])\n end\n end\n end\n if t == 3\n @constraint(sp, sum(v) == 0)\n end\n end\n\n det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\n set_silent(det)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) ≈ 406712.49 atol = 0.1\n\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 406712.49 atol = 0.1\n return\nend\n\ntest_prob52_3stages()","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"EditURL = \"infinite_horizon_hydro_thermal.jl\"","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/#Infinite-horizon-hydro-thermal","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"","category":"section"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"using SDDP, HiGHS, Test, Statistics\n\nfunction infinite_hydro_thermal(; cut_type)\n Ω = [\n (inflow = 0.0, demand = 7.5),\n (inflow = 5.0, demand = 5),\n (inflow = 10.0, demand = 2.5),\n ]\n graph = SDDP.Graph(\n :root_node,\n [:week],\n [(:root_node => :week, 1.0), (:week => :week, 0.9)],\n )\n model = SDDP.PolicyGraph(\n graph;\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n @variable(\n subproblem,\n 5.0 <= reservoir <= 15.0,\n SDDP.State,\n initial_value = 10.0\n )\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n spill >= 0\n inflow\n demand\n end)\n @constraints(\n subproblem,\n begin\n reservoir.out == reservoir.in - hydro_generation - spill + inflow\n hydro_generation + thermal_generation == demand\n end\n )\n @stageobjective(subproblem, 10 * spill + thermal_generation)\n SDDP.parameterize(subproblem, Ω) do ω\n JuMP.fix(inflow, ω.inflow)\n return JuMP.fix(demand, ω.demand)\n end\n end\n SDDP.train(\n model;\n cut_type = cut_type,\n log_frequency = 100,\n sampling_scheme = SDDP.InSampleMonteCarlo(terminate_on_cycle = true),\n cycle_discretization_delta = 0.1,\n )\n @test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1\n\n results = SDDP.simulate(model, 500)\n objectives =\n [sum(s[:stage_objective] for s in simulation) for simulation in results]\n sample_mean = round(Statistics.mean(objectives); digits = 2)\n sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)\n println(\"Confidence_interval = $(sample_mean) ± $(sample_ci)\")\n @test sample_mean - sample_ci <= 119.167 <= sample_mean + sample_ci\n return\nend\n\ninfinite_hydro_thermal(cut_type = SDDP.SINGLE_CUT)\ninfinite_hydro_thermal(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"This page was generated using Literate.jl.","category":"page"},{"location":"apireference/#api_reference_list","page":"API Reference","title":"API Reference","text":"","category":"section"},{"location":"apireference/#Policy-graphs","page":"API Reference","title":"Policy graphs","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.Graph\nSDDP.add_node\nSDDP.add_edge\nSDDP.add_ambiguity_set\nSDDP.LinearGraph\nSDDP.MarkovianGraph\nSDDP.UnicyclicGraph\nSDDP.LinearPolicyGraph\nSDDP.MarkovianPolicyGraph\nSDDP.PolicyGraph","category":"page"},{"location":"apireference/#SDDP.Graph","page":"API Reference","title":"SDDP.Graph","text":"Graph(root_node::T) where T\n\nCreate an empty graph struture with the root node root_node.\n\nExample\n\njulia> graph = SDDP.Graph(0)\nRoot\n 0\nNodes\n {}\nArcs\n {}\n\njulia> graph = SDDP.Graph(:root)\nRoot\n root\nNodes\n {}\nArcs\n {}\n\njulia> graph = SDDP.Graph((0, 0))\nRoot\n (0, 0)\nNodes\n {}\nArcs\n {}\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.add_node","page":"API Reference","title":"SDDP.add_node","text":"add_node(graph::Graph{T}, node::T) where {T}\n\nAdd a node to the graph graph.\n\nExamples\n\njulia> graph = SDDP.Graph(:root);\n\njulia> SDDP.add_node(graph, :A)\n\njulia> graph\nRoot\n root\nNodes\n A\nArcs\n {}\n\njulia> graph = SDDP.Graph(0);\n\njulia> SDDP.add_node(graph, 2)\n\njulia> graph\nRoot\n 0\nNodes\n 2\nArcs\n {}\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_edge","page":"API Reference","title":"SDDP.add_edge","text":"add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T}\n\nAdd an edge to the graph graph.\n\nExamples\n\njulia> graph = SDDP.Graph(0);\n\njulia> SDDP.add_node(graph, 1)\n\njulia> SDDP.add_edge(graph, 0 => 1, 0.9)\n\njulia> graph\nRoot\n 0\nNodes\n 1\nArcs\n 0 => 1 w.p. 0.9\n\njulia> graph = SDDP.Graph(:root);\n\njulia> SDDP.add_node(graph, :A)\n\njulia> SDDP.add_edge(graph, :root => :A, 1.0)\n\njulia> graph\nRoot\n root\nNodes\n A\nArcs\n root => A w.p. 1.0\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_ambiguity_set","page":"API Reference","title":"SDDP.add_ambiguity_set","text":"add_ambiguity_set(\n graph::Graph{T},\n set::Vector{T},\n lipschitz::Vector{Float64},\n) where {T}\n\nAdd set to the belief partition of graph.\n\nlipschitz is a vector of Lipschitz constants, with one element for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.\n\nExamples\n\njulia> graph = SDDP.LinearGraph(3)\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n\njulia> SDDP.add_ambiguity_set(graph, [1, 2], [1e3, 1e2])\n\njulia> SDDP.add_ambiguity_set(graph, [3], [1e5])\n\njulia> graph\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\nPartitions\n {1, 2}\n {3}\n\n\n\n\n\nadd_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)\n\nAdd set to the belief partition of graph.\n\nlipschitz is a Lipschitz constant for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.\n\nExamples\n\njulia> graph = SDDP.LinearGraph(3);\n\njulia> SDDP.add_ambiguity_set(graph, [1, 2], 1e3)\n\njulia> SDDP.add_ambiguity_set(graph, [3], 1e5)\n\njulia> graph\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\nPartitions\n {1, 2}\n {3}\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.LinearGraph","page":"API Reference","title":"SDDP.LinearGraph","text":"LinearGraph(stages::Int)\n\nCreate a linear graph with stages number of nodes.\n\nExamples\n\njulia> graph = SDDP.LinearGraph(3)\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.MarkovianGraph","page":"API Reference","title":"SDDP.MarkovianGraph","text":"MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})\n\nConstruct a Markovian graph from the vector of transition matrices.\n\ntransition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.\n\nThe dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.\n\nExamples\n\njulia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]])\nRoot\n (0, 1)\nNodes\n (1, 1)\n (2, 1)\n (2, 2)\n (3, 1)\n (3, 2)\nArcs\n (0, 1) => (1, 1) w.p. 1.0\n (1, 1) => (2, 1) w.p. 0.5\n (1, 1) => (2, 2) w.p. 0.5\n (2, 1) => (3, 1) w.p. 0.8\n (2, 1) => (3, 2) w.p. 0.2\n (2, 2) => (3, 1) w.p. 0.2\n (2, 2) => (3, 2) w.p. 0.8\n\n\n\n\n\nMarkovianGraph(;\n stages::Int,\n transition_matrix::Matrix{Float64},\n root_node_transition::Vector{Float64},\n)\n\nConstruct a Markovian graph object with stages number of stages and time-independent Markov transition probabilities.\n\ntransition_matrix must be a square matrix, and the probability of transitioning from Markov state i in stage t to Markov state j in stage t + 1 is given by transition_matrix[i, j].\n\nroot_node_transition[i] is the probability of transitioning from the root node to Markov state i in the first stage.\n\nExamples\n\njulia> graph = SDDP.MarkovianGraph(;\n stages = 3,\n transition_matrix = [0.8 0.2; 0.2 0.8],\n root_node_transition = [0.5, 0.5],\n )\nRoot\n (0, 1)\nNodes\n (1, 1)\n (1, 2)\n (2, 1)\n (2, 2)\n (3, 1)\n (3, 2)\nArcs\n (0, 1) => (1, 1) w.p. 0.5\n (0, 1) => (1, 2) w.p. 0.5\n (1, 1) => (2, 1) w.p. 0.8\n (1, 1) => (2, 2) w.p. 0.2\n (1, 2) => (2, 1) w.p. 0.2\n (1, 2) => (2, 2) w.p. 0.8\n (2, 1) => (3, 1) w.p. 0.8\n (2, 1) => (3, 2) w.p. 0.2\n (2, 2) => (3, 1) w.p. 0.2\n (2, 2) => (3, 2) w.p. 0.8\n\n\n\n\n\nMarkovianGraph(\n simulator::Function;\n budget::Union{Int,Vector{Int}},\n scenarios::Int = 1000,\n)\n\nConstruct a Markovian graph by fitting Markov chain to scenarios generated by simulator().\n\nbudget is the total number of nodes in the resulting Markov chain. This can either be specified as a single Int, in which case we will attempt to intelligently distributed the nodes between stages. Alternatively, budget can be a Vector{Int}, which details the number of Markov state to have in each stage.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.UnicyclicGraph","page":"API Reference","title":"SDDP.UnicyclicGraph","text":"UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)\n\nConstruct a graph composed of num_nodes nodes that form a single cycle, with a probability of discount_factor of continuing the cycle.\n\nExamples\n\njulia> graph = SDDP.UnicyclicGraph(0.9; num_nodes = 2)\nRoot\n 0\nNodes\n 1\n 2\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 1 w.p. 0.9\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.LinearPolicyGraph","page":"API Reference","title":"SDDP.LinearPolicyGraph","text":"LinearPolicyGraph(builder::Function; stages::Int, kwargs...)\n\nCreate a linear policy graph with stages number of stages.\n\nKeyword arguments\n\nstages: the number of stages in the graph\nkwargs: other keyword arguments are passed to SDDP.PolicyGraph.\n\nExamples\n\njulia> SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t\n # ... build model ...\nend\nA policy graph with 2 nodes.\nNode indices: 1, 2\n\nis equivalent to\n\njulia> graph = SDDP.LinearGraph(2);\n\njulia> SDDP.PolicyGraph(graph; lower_bound = 0.0) do sp, t\n # ... build model ...\nend\nA policy graph with 2 nodes.\nNode indices: 1, 2\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.MarkovianPolicyGraph","page":"API Reference","title":"SDDP.MarkovianPolicyGraph","text":"MarkovianPolicyGraph(\n builder::Function;\n transition_matrices::Vector{Array{Float64,2}},\n kwargs...\n)\n\nCreate a Markovian policy graph based on the transition matrices given in transition_matrices.\n\nKeyword arguments\n\ntransition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t. The dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.\nkwargs: other keyword arguments are passed to SDDP.PolicyGraph.\n\nSee also\n\nSee SDDP.MarkovianGraph for other ways of specifying a Markovian policy graph.\n\nSee SDDP.PolicyGraph for the other keyword arguments.\n\nExamples\n\njulia> SDDP.MarkovianPolicyGraph(;\n transition_matrices = [ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]],\n lower_bound = 0.0,\n ) do sp, node\n # ... build model ...\n end\nA policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)\n\nis equivalent to\n\njulia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]]);\n\njulia> SDDP.PolicyGraph(graph; lower_bound = 0.0) do sp, t\n # ... build model ...\nend\nA policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.PolicyGraph","page":"API Reference","title":"SDDP.PolicyGraph","text":"PolicyGraph(\n builder::Function,\n graph::Graph{T};\n sense::Symbol = :Min,\n lower_bound = -Inf,\n upper_bound = Inf,\n optimizer = nothing,\n) where {T}\n\nConstruct a policy graph based on the graph structure of graph. (See SDDP.Graph for details.)\n\nKeyword arguments\n\nsense: whether we are minimizing (:Min) or maximizing (:Max).\nlower_bound: if mimimizing, a valid lower bound for the cost to go in all subproblems.\nupper_bound: if maximizing, a valid upper bound for the value to go in all subproblems.\noptimizer: the optimizer to use for each of the subproblems\n\nExamples\n\nfunction builder(subproblem::JuMP.Model, index)\n # ... subproblem definition ...\nend\n\nmodel = PolicyGraph(\n builder,\n graph;\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)\n\nOr, using the Julia do ... end syntax:\n\nmodel = PolicyGraph(\n graph;\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, index\n # ... subproblem definitions ...\nend\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Subproblem-definition","page":"API Reference","title":"Subproblem definition","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"@stageobjective\nSDDP.parameterize\nSDDP.add_objective_state\nSDDP.objective_state\nSDDP.Noise","category":"page"},{"location":"apireference/#SDDP.@stageobjective","page":"API Reference","title":"SDDP.@stageobjective","text":"@stageobjective(subproblem, expr)\n\nSet the stage-objective of subproblem to expr.\n\nExamples\n\n@stageobjective(subproblem, 2x + y)\n\n\n\n\n\n","category":"macro"},{"location":"apireference/#SDDP.parameterize","page":"API Reference","title":"SDDP.parameterize","text":"parameterize(\n modify::Function,\n subproblem::JuMP.Model,\n realizations::Vector{T},\n probability::Vector{Float64} = fill(1.0 / length(realizations))\n) where {T}\n\nAdd a parameterization function modify to subproblem. The modify function takes one argument and modifies subproblem based on the realization of the noise sampled from realizations with corresponding probabilities probability.\n\nIn order to conduct an out-of-sample simulation, modify should accept arguments that are not in realizations (but still of type T).\n\nExamples\n\nSDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω\n JuMP.set_upper_bound(x, ω)\nend\n\n\n\n\n\nparameterize(node::Node, noise)\n\nParameterize node node with the noise noise.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_objective_state","page":"API Reference","title":"SDDP.add_objective_state","text":"add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)\n\nAdd an objective state variable to subproblem.\n\nRequired kwargs are:\n\ninitial_value: The initial value of the objective state variable at the root node.\nlipschitz: The lipschitz constant of the objective state variable.\n\nSetting a tight value for the lipschitz constant can significantly improve the speed of convergence.\n\nOptional kwargs are:\n\nlower_bound: A valid lower bound for the objective state variable. Can be -Inf.\nupper_bound: A valid upper bound for the objective state variable. Can be +Inf.\n\nSetting tight values for these optional variables can significantly improve the speed of convergence.\n\nIf the objective state is N-dimensional, each keyword argument must be an NTuple{N,Float64}. For example, initial_value = (0.0, 1.0).\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.objective_state","page":"API Reference","title":"SDDP.objective_state","text":"objective_state(subproblem::JuMP.Model)\n\nReturn the current objective state of the problem.\n\nCan only be called from SDDP.parameterize.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.Noise","page":"API Reference","title":"SDDP.Noise","text":"Noise(support, probability)\n\nAn atom of a discrete random variable at the point of support support and associated probability probability.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Training-the-policy","page":"API Reference","title":"Training the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.numerical_stability_report\nSDDP.train\nSDDP.termination_status\nSDDP.write_cuts_to_file\nSDDP.read_cuts_from_file\nSDDP.write_log_to_csv","category":"page"},{"location":"apireference/#SDDP.numerical_stability_report","page":"API Reference","title":"SDDP.numerical_stability_report","text":"numerical_stability_report(\n [io::IO = stdout,]\n model::PolicyGraph;\n by_node::Bool = false,\n print::Bool = true,\n warn::Bool = true,\n)\n\nPrint a report identifying possible numeric stability issues.\n\nKeyword arguments\n\nIf by_node, print a report for each node in the graph.\nIf print, print to io.\nIf warn, warn if the coefficients may cause numerical issues.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.train","page":"API Reference","title":"SDDP.train","text":"SDDP.train(model::PolicyGraph; kwargs...)\n\nTrain the policy for model.\n\nKeyword arguments\n\niteration_limit::Int: number of iterations to conduct before termination.\ntime_limit::Float64: number of seconds to train before termination.\nstoping_rules: a vector of SDDP.AbstractStoppingRules. Defaults to SimulationStoppingRule.\nprint_level::Int: control the level of printing to the screen. Defaults to 1. Set to 0 to disable all printing.\nlog_file::String: filepath at which to write a log of the training progress. Defaults to SDDP.log.\nlog_frequency::Int: control the frequency with which the logging is outputted (iterations/log). Defaults to 1.\nlog_every_seconds::Float64: control the frequency with which the logging is outputted (seconds/log). Defaults to 0.0.\nrun_numerical_stability_report::Bool: generate (and print) a numerical stability report prior to solve. Defaults to true.\nrefine_at_similar_nodes::Bool: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In almost all cases this should be set to true.\ncut_deletion_minimum::Int: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver specific; however, smaller values result in smaller subproblems (and therefore quicker solves), at the expense of more time spent performing cut selection.\nrisk_measure: the risk measure to use at each node. Defaults to Expectation.\nsampling_scheme: a sampling scheme to use on the forward pass of the algorithm. Defaults to InSampleMonteCarlo.\nbackward_sampling_scheme: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to CompleteSampler.\ncut_type: choose between SDDP.SINGLE_CUT and SDDP.MULTI_CUT versions of SDDP.\ndashboard::Bool: open a visualization of the training over time. Defaults to false.\nparallel_scheme::AbstractParallelScheme: specify a scheme for solving in parallel. Defaults to Serial().\nforward_pass::AbstractForwardPass: specify a scheme to use for the forward passes.\nforward_pass_resampling_probability::Union{Nothing,Float64}: set to a value in (0, 1) to enable RiskAdjustedForwardPass. Defaults to nothing (disabled).\nadd_to_existing_cuts::Bool: set to true to allow training a model that was previously trained. Defaults to false.\nduality_handler::AbstractDualityHandler: specify a duality handler to use when creating cuts.\npost_iteration_callback::Function: a callback with the signature post_iteration_callback(::IterationResult) that is evaluated after each iteration of the algorithm.\n\nThere is also a special option for infinite horizon problems\n\ncycle_discretization_delta: the maximum distance between states allowed on the forward pass. This is for advanced users only and needs to be used in conjunction with a different sampling_scheme.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.termination_status","page":"API Reference","title":"SDDP.termination_status","text":"termination_status(model::PolicyGraph)::Symbol\n\nQuery the reason why the training stopped.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.write_cuts_to_file","page":"API Reference","title":"SDDP.write_cuts_to_file","text":"write_cuts_to_file(\n model::PolicyGraph{T},\n filename::String;\n node_name_parser::Function = string,\n) where {T}\n\nWrite the cuts that form the policy in model to filename in JSON format.\n\nnode_name_parser is a function which converts the name of each node into a string representation. It has the signature: node_name_parser(::T)::String.\n\nSee also SDDP.read_cuts_from_file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.read_cuts_from_file","page":"API Reference","title":"SDDP.read_cuts_from_file","text":"read_cuts_from_file(\n model::PolicyGraph{T},\n filename::String;\n node_name_parser::Function = _node_name_parser,\n) where {T}\n\nRead cuts (saved using SDDP.write_cuts_to_file) from filename into model.\n\nSince T can be an arbitrary Julia type, the conversion to JSON is lossy. When reading, read_cuts_from_file only supports T=Int, T=NTuple{N, Int}, and T=Symbol. If you have manually created a policy graph with a different node type T, provide a function node_name_parser with the signature node_name_parser(T, name::String)::T where {T} that returns the name of each node given the string name name.\n\nIf node_name_parser returns nothing, those cuts are skipped.\n\nSee also SDDP.write_cuts_to_file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.write_log_to_csv","page":"API Reference","title":"SDDP.write_log_to_csv","text":"write_log_to_csv(model::PolicyGraph, filename::String)\n\nWrite the log of the most recent training to a csv for post-analysis.\n\nAssumes that the model has been trained via SDDP.train.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#api_stopping_rules","page":"API Reference","title":"Stopping rules","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractStoppingRule\nSDDP.stopping_rule_status\nSDDP.convergence_test\nSDDP.IterationLimit\nSDDP.TimeLimit\nSDDP.Statistical\nSDDP.BoundStalling\nSDDP.StoppingChain\nSDDP.SimulationStoppingRule\nSDDP.FirstStageStoppingRule","category":"page"},{"location":"apireference/#SDDP.AbstractStoppingRule","page":"API Reference","title":"SDDP.AbstractStoppingRule","text":"AbstractStoppingRule\n\nThe abstract type for the stopping-rule interface.\n\nYou need to define the following methods:\n\nSDDP.stopping_rule_status\nSDDP.convergence_test\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.stopping_rule_status","page":"API Reference","title":"SDDP.stopping_rule_status","text":"stopping_rule_status(::AbstractStoppingRule)::Symbol\n\nReturn a symbol describing the stopping rule.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.convergence_test","page":"API Reference","title":"SDDP.convergence_test","text":"convergence_test(\n model::PolicyGraph,\n log::Vector{Log},\n ::AbstractStoppingRule,\n)::Bool\n\nReturn a Bool indicating if the algorithm should terminate the training.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.IterationLimit","page":"API Reference","title":"SDDP.IterationLimit","text":"IterationLimit(limit::Int)\n\nTeriminate the algorithm after limit number of iterations.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.TimeLimit","page":"API Reference","title":"SDDP.TimeLimit","text":"TimeLimit(limit::Float64)\n\nTeriminate the algorithm after limit seconds of computation.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Statistical","page":"API Reference","title":"SDDP.Statistical","text":"Statistical(;\n num_replications,\n iteration_period = 1,\n z_score = 1.96,\n verbose = true,\n)\n\nPerform an in-sample Monte Carlo simulation of the policy with num_replications replications every iteration_periods. Terminate if the deterministic bound (lower if minimizing) calls into the confidence interval for the mean of the simulated cost. If verbose = true, print the confidence interval.\n\nNote that this tests assumes that the simulated values are normally distributed. In infinite horizon models, this is almost never the case. The distribution is usually closer to exponential or log-normal.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.BoundStalling","page":"API Reference","title":"SDDP.BoundStalling","text":"BoundStalling(num_previous_iterations::Int, tolerance::Float64)\n\nTeriminate the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) fails to improve by more than tolerance in absolute terms for more than num_previous_iterations consecutve iterations, provided it has improved relative to the bound after the first iteration.\n\nChecking for an improvement relative to the first iteration avoids early termination in a situation where the bound fails to improve for the first N iterations. This frequently happens in models with a large number of stages, where it takes time for the cuts to propogate backward enough to modify the bound of the root node.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.StoppingChain","page":"API Reference","title":"SDDP.StoppingChain","text":"StoppingChain(rules::AbstractStoppingRule...)\n\nTerminate once all of the rules are statified.\n\nThis stopping rule short-circuits, so subsequent rules are only tested if the previous pass.\n\nExamples\n\nA stopping rule that runs 100 iterations, then checks for the bound stalling:\n\nStoppingChain(IterationLimit(100), BoundStalling(5, 0.1))\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.SimulationStoppingRule","page":"API Reference","title":"SDDP.SimulationStoppingRule","text":"SimulationStoppingRule(;\n sampling_scheme::AbstractSamplingScheme = SDDP.InSampleMonteCarlo(),\n replications::Int = -1,\n period::Int = -1,\n distance_tol::Float64 = 1e-2,\n bound_tol::Float64 = 1e-4,\n)\n\nTerminate the algorithm using a mix of heuristics. Unless you know otherwise, this is typically a good default.\n\nTermination criteria\n\nFirst, we check that the deterministic bound has stabilized. That is, over the last five iterations, the deterministic bound has changed by less than an absolute or relative tolerance of bound_tol.\n\nThen, if we have not done one in the last period iterations, we perform a primal simulation of the policy using replications out-of-sample realizations from sampling_scheme. The realizations are stored and re-used in each simulation. From each simulation, we record the value of the stage objective. We terminate the policy if each of the trajectories in two consecutive simulations differ by less than distance_tol.\n\nBy default, replications and period are -1, and SDDP.jl will guess good values for these. Over-ride the default behavior by setting an appropriate value.\n\nExample\n\nSDDP.train(model; stopping_rules = [SimulationStoppingRule()])\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.FirstStageStoppingRule","page":"API Reference","title":"SDDP.FirstStageStoppingRule","text":"FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)\n\nTerminate the algorithm when the outgoing values of the first-stage state variables have not changed by more than atol for iterations number of consecutive iterations.\n\nExample\n\nSDDP.train(model; stopping_rules = [FirstStageStoppingRule()])\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Sampling-schemes","page":"API Reference","title":"Sampling schemes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractSamplingScheme\nSDDP.sample_scenario\nSDDP.InSampleMonteCarlo\nSDDP.OutOfSampleMonteCarlo\nSDDP.Historical\nSDDP.PSRSamplingScheme\nSDDP.SimulatorSamplingScheme","category":"page"},{"location":"apireference/#SDDP.AbstractSamplingScheme","page":"API Reference","title":"SDDP.AbstractSamplingScheme","text":"AbstractSamplingScheme\n\nThe abstract type for the sampling-scheme interface.\n\nYou need to define the following methods:\n\nSDDP.sample_scenario\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.sample_scenario","page":"API Reference","title":"SDDP.sample_scenario","text":"sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T}\n\nSample a scenario from the policy graph graph based on the sampling scheme.\n\nReturns ::Tuple{Vector{Tuple{T, <:Any}}, Bool}, where the first element is the scenario, and the second element is a Boolean flag indicating if the scenario was terminated due to the detection of a cycle.\n\nThe scenario is a list of tuples (type Vector{Tuple{T, <:Any}}) where the first component of each tuple is the index of the node, and the second component is the stagewise-independent noise term observed in that node.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.InSampleMonteCarlo","page":"API Reference","title":"SDDP.InSampleMonteCarlo","text":"InSampleMonteCarlo(;\n max_depth::Int = 0,\n terminate_on_cycle::Function = false,\n terminate_on_dummy_leaf::Function = true,\n rollout_limit::Function = (i::Int) -> typemax(Int),\n initial_node::Any = nothing,\n)\n\nA Monte Carlo sampling scheme using the in-sample data from the policy graph definition.\n\nIf terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.\n\nNote that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.\n\nControl which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.\n\nYou can use rollout_limit to set iteration specific depth limits. For example:\n\nInSampleMonteCarlo(rollout_limit = i -> 2 * i)\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.OutOfSampleMonteCarlo","page":"API Reference","title":"SDDP.OutOfSampleMonteCarlo","text":"OutOfSampleMonteCarlo(\n f::Function,\n graph::PolicyGraph;\n use_insample_transition::Bool = false,\n max_depth::Int = 0,\n terminate_on_cycle::Bool = false,\n terminate_on_dummy_leaf::Bool = true,\n rollout_limit::Function = i -> typemax(Int),\n initial_node = nothing,\n)\n\nCreate a Monte Carlo sampler using out-of-sample probabilities and/or supports for the stagewise-independent noise terms, and out-of-sample probabilities for the node-transition matrix.\n\nf is a function that takes the name of a node and returns a tuple containing a vector of new SDDP.Noise terms for the children of that node, and a vector of new SDDP.Noise terms for the stagewise-independent noise.\n\nIf f is called with the name of the root node (e.g., 0 in a linear policy graph, (0, 1) in a Markovian Policy Graph), then return a vector of SDDP.Noise for the children of the root node.\n\nIf use_insample_transition, the in-sample transition probabilities will be used. Therefore, f should only return a vector of the stagewise-independent noise terms, and f will not be called for the root node.\n\nIf terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.\n\nNote that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.\n\nControl which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.\n\nIf a node is deterministic, pass [SDDP.Noise(nothing, 1.0)] as the vector of noise terms.\n\nYou can use rollout_limit to set iteration specific depth limits. For example:\n\nOutOfSampleMonteCarlo(rollout_limit = i -> 2 * i)\n\nExamples\n\nGiven linear policy graph graph with T stages:\n\nsampler = OutOfSampleMonteCarlo(graph) do node\n if node == 0\n return [SDDP.Noise(1, 1.0)]\n else\n noise_terms = [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]\n children = node < T ? [SDDP.Noise(node + 1, 0.9)] : SDDP.Noise{Int}[]\n return children, noise_terms\n end\nend\n\nGiven linear policy graph graph with T stages:\n\nsampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node\n return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]\nend\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Historical","page":"API Reference","title":"SDDP.Historical","text":"Historical(\n scenarios::Vector{Vector{Tuple{T,S}}},\n probability::Vector{Float64};\n terminate_on_cycle::Bool = false,\n) where {T,S}\n\nA sampling scheme that samples a scenario from the vector of scenarios scenarios according to probability.\n\nExamples\n\nHistorical(\n [\n [(1, 0.5), (2, 1.0), (3, 0.5)],\n [(1, 0.5), (2, 0.0), (3, 1.0)],\n [(1, 1.0), (2, 0.0), (3, 0.0)]\n ],\n [0.2, 0.5, 0.3],\n)\n\n\n\n\n\nHistorical(\n scenarios::Vector{Vector{Tuple{T,S}}};\n terminate_on_cycle::Bool = false,\n) where {T,S}\n\nA deterministic sampling scheme that iterates through the vector of provided scenarios.\n\nExamples\n\nHistorical([\n [(1, 0.5), (2, 1.0), (3, 0.5)],\n [(1, 0.5), (2, 0.0), (3, 1.0)],\n [(1, 1.0), (2, 0.0), (3, 0.0)],\n])\n\n\n\n\n\nHistorical(\n scenario::Vector{Tuple{T,S}};\n terminate_on_cycle::Bool = false,\n) where {T,S}\n\nA deterministic sampling scheme that always samples scenario.\n\nExamples\n\nHistorical([(1, 0.5), (2, 1.5), (3, 0.75)])\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.PSRSamplingScheme","page":"API Reference","title":"SDDP.PSRSamplingScheme","text":"PSRSamplingScheme(N::Int; sampling_scheme = InSampleMonteCarlo())\n\nA sampling scheme with N scenarios, similar to how PSR does it.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.SimulatorSamplingScheme","page":"API Reference","title":"SDDP.SimulatorSamplingScheme","text":"SimulatorSamplingScheme(simulator::Function)\n\nCreate a sampling scheme based on a univariate scenario generator simulator, which returns a Vector{Float64} when called with no arguments like simulator().\n\nThis sampling scheme must be used with a Markovian graph constructed from the same simulator.\n\nThe sample space for SDDP.parameterize must be a tuple in which the first element is the Markov state.\n\nThis sampling scheme generates a new scenario by calling simulator(), and then picking the sequence of nodes in the Markovian graph that is closest to the new trajectory.\n\nExample\n\njulia> using SDDP\n\njulia> import HiGHS\n\njulia> simulator() = cumsum(rand(10))\nsimulator (generic function with 1 method)\n\njulia> model = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(simulator; budget = 20, scenarios = 100);\n sense = :Max,\n upper_bound = 12,\n optimizer = HiGHS.Optimizer,\n ) do sp, node\n t, markov_state = node\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @variable(sp, u >= 0)\n @constraint(sp, x.out == x.in - u)\n # Elements of Ω must be a tuple in which `markov_state` is the first\n # element.\n Ω = [(markov_state, (u = u_max,)) for u_max in (0.0, 0.5)]\n SDDP.parameterize(sp, Ω) do (markov_state, ω)\n set_upper_bound(u, ω.u)\n @stageobjective(sp, markov_state * u)\n end\n end;\n\njulia> SDDP.train(\n model;\n print_level = 0,\n iteration_limit = 10,\n sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),\n )\n\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Parallel-schemes","page":"API Reference","title":"Parallel schemes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractParallelScheme\nSDDP.Serial\nSDDP.Asynchronous","category":"page"},{"location":"apireference/#SDDP.AbstractParallelScheme","page":"API Reference","title":"SDDP.AbstractParallelScheme","text":"AbstractParallelScheme\n\nAbstract type for different parallelism schemes.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Serial","page":"API Reference","title":"SDDP.Serial","text":"Serial()\n\nRun SDDP in serial mode.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Asynchronous","page":"API Reference","title":"SDDP.Asynchronous","text":"Asynchronous(\n [init_callback::Function,]\n slave_pids::Vector{Int} = workers();\n use_master::Bool = true,\n)\n\nRun SDDP in asynchronous mode workers with pid's slave_pids.\n\nAfter initializing the models on each worker, call init_callback(model). Note that init_callback is run locally on the worker and not on the master thread.\n\nIf use_master is true, iterations are also conducted on the master process.\n\n\n\n\n\nAsynchronous(\n solver::Any,\n slave_pids::Vector{Int} = workers();\n use_master::Bool = true,\n)\n\nRun SDDP in asynchronous mode workers with pid's slave_pids.\n\nSet the optimizer on each worker by calling JuMP.set_optimizer(model, solver).\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Forward-passes","page":"API Reference","title":"Forward passes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractForwardPass\nSDDP.DefaultForwardPass\nSDDP.RevisitingForwardPass\nSDDP.RiskAdjustedForwardPass\nSDDP.AlternativeForwardPass\nSDDP.AlternativePostIterationCallback\nSDDP.RegularizedForwardPass","category":"page"},{"location":"apireference/#SDDP.AbstractForwardPass","page":"API Reference","title":"SDDP.AbstractForwardPass","text":"AbstractForwardPass\n\nAbstract type for different forward passes.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.DefaultForwardPass","page":"API Reference","title":"SDDP.DefaultForwardPass","text":"DefaultForwardPass(; include_last_node::Bool = true)\n\nThe default forward pass.\n\nIf include_last_node = false and the sample terminated due to a cycle, then the last node (which forms the cycle) is omitted. This can be useful option to set when training, but it comes at the cost of not knowing which node formed the cycle (if there are multiple possibilities).\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.RevisitingForwardPass","page":"API Reference","title":"SDDP.RevisitingForwardPass","text":"RevisitingForwardPass(\n period::Int = 500;\n sub_pass::AbstractForwardPass = DefaultForwardPass(),\n)\n\nA forward pass scheme that generate period new forward passes (using sub_pass), then revisits all previously explored forward passes. This can be useful to encourage convergence at a diversity of points in the state-space.\n\nSet period = typemax(Int) to disable.\n\nFor example, if period = 2, then the forward passes will be revisited as follows: 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ....\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.RiskAdjustedForwardPass","page":"API Reference","title":"SDDP.RiskAdjustedForwardPass","text":"RiskAdjustedForwardPass(;\n forward_pass::AbstractForwardPass,\n risk_measure::AbstractRiskMeasure,\n resampling_probability::Float64,\n rejection_count::Int = 5,\n)\n\nA forward pass that resamples a previous forward pass with resampling_probability probability, and otherwise samples a new forward pass using forward_pass.\n\nThe forward pass to revisit is chosen based on the risk-adjusted (using risk_measure) probability of the cumulative stage objectives.\n\nNote that this objective corresponds to the first time we visited the trajectory. Subsequent visits may have improved things, but we don't have the mechanisms in-place to update it. Therefore, remove the forward pass from resampling consideration after rejection_count revisits.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.AlternativeForwardPass","page":"API Reference","title":"SDDP.AlternativeForwardPass","text":"AlternativeForwardPass(\n forward_model::SDDP.PolicyGraph{T};\n forward_pass::AbstractForwardPass = DefaultForwardPass(),\n)\n\nA forward pass that simulates using forward_model, which may be different to the model used in the backwards pass.\n\nWhen using this forward pass, you should almost always pass SDDP.AlternativePostIterationCallback to the post_iteration_callback argument of SDDP.train.\n\nThis forward pass is most useful when the forward_model is non-convex and we use a convex approximation of the model in the backward pass.\n\nFor example, in optimal power flow models, we can use an AC-OPF formulation as the forward_model and a DC-OPF formulation as the backward model.\n\nFor more details see the paper:\n\nRosemberg, A., and Street, A., and Garcia, J.D., and Valladão, D.M., and Silva, T., and Dowson, O. (2021). Assessing the cost of network simplifications in long-term hydrothermal dispatch planning models. IEEE Transactions on Sustainable Energy. 13(1), 196-206.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.AlternativePostIterationCallback","page":"API Reference","title":"SDDP.AlternativePostIterationCallback","text":"AlternativePostIterationCallback(forward_model::PolicyGraph)\n\nA post-iteration callback that should be used whenever SDDP.AlternativeForwardPass is used.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.RegularizedForwardPass","page":"API Reference","title":"SDDP.RegularizedForwardPass","text":"RegularizedForwardPass(;\n rho::Float64 = 0.05,\n forward_pass::AbstractForwardPass = DefaultForwardPass(),\n)\n\nA forward pass that regularizes the outgoing first-stage state variables with an L-infty trust-region constraint about the previous iteration's solution. Specifically, the bounds of the outgoing state variable x are updated from (l, u) to max(l, x^k - rho * (u - l)) <= x <= min(u, x^k + rho * (u - l)), where x^k is the optimal solution of x in the previous iteration. On the first iteration, the value of the state at the root node is used.\n\nBy default, rho is set to 5%, which seems to work well empirically.\n\nPass a different forward_pass to control the forward pass within the regularized forward pass.\n\nThis forward pass is largely intended to be used for investment problems in which the first stage makes a series of capacity decisions that then influence the rest of the graph. An error is thrown if the first stage problem is not deterministic, and states are silently skipped if they do not have finite bounds.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Risk-Measures","page":"API Reference","title":"Risk Measures","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractRiskMeasure\nSDDP.adjust_probability","category":"page"},{"location":"apireference/#SDDP.AbstractRiskMeasure","page":"API Reference","title":"SDDP.AbstractRiskMeasure","text":"AbstractRiskMeasure\n\nThe abstract type for the risk measure interface.\n\nYou need to define the following methods:\n\nSDDP.adjust_probability\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.adjust_probability","page":"API Reference","title":"SDDP.adjust_probability","text":"adjust_probability(\n measure::Expectation\n risk_adjusted_probability::Vector{Float64},\n original_probability::Vector{Float64},\n noise_support::Vector{Noise{T}},\n objective_realizations::Vector{Float64},\n is_minimization::Bool,\n) where {T}\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Duality-handlers","page":"API Reference","title":"Duality handlers","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractDualityHandler\nSDDP.ContinuousConicDuality\nSDDP.LagrangianDuality\nSDDP.StrengthenedConicDuality\nSDDP.BanditDuality","category":"page"},{"location":"apireference/#SDDP.AbstractDualityHandler","page":"API Reference","title":"SDDP.AbstractDualityHandler","text":"AbstractDualityHandler\n\nThe abstract type for the duality handler interface.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.ContinuousConicDuality","page":"API Reference","title":"SDDP.ContinuousConicDuality","text":"ContinuousConicDuality()\n\nCompute dual variables in the backward pass using conic duality, relaxing any binary or integer restrictions as necessary.\n\nTheory\n\nGiven the problem\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n x̄ - x == 0 [λ]\n\nwhere S ⊆ ℝ×ℤ, we relax integrality and using conic duality to solve for λ in the problem:\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w)\n x̄ - x == 0 [λ]\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.LagrangianDuality","page":"API Reference","title":"SDDP.LagrangianDuality","text":"LagrangianDuality(;\n method::LocalImprovementSearch.AbstractSearchMethod =\n LocalImprovementSearch.BFGS(100),\n)\n\nObtain dual variables in the backward pass using Lagrangian duality.\n\nArguments\n\nmethod: the LocalImprovementSearch method for maximizing the Lagrangian dual problem.\n\nTheory\n\nGiven the problem\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n x̄ - x == 0 [λ]\n\nwhere S ⊆ ℝ×ℤ, we solve the problem max L(λ), where:\n\nL(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' h(x̄)\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n\nand where h(x̄) = x̄ - x.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.StrengthenedConicDuality","page":"API Reference","title":"SDDP.StrengthenedConicDuality","text":"StrengthenedConicDuality()\n\nObtain dual variables in the backward pass using strengthened conic duality.\n\nTheory\n\nGiven the problem\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n x̄ - x == 0 [λ]\n\nwe first obtain an estimate for λ using ContinuousConicDuality.\n\nThen, we evaluate the Lagrangian function:\n\nL(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' (x̄ - x`)\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n\nto obtain a better estimate of the intercept.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.BanditDuality","page":"API Reference","title":"SDDP.BanditDuality","text":"BanditDuality()\n\nFormulates the problem of choosing a duality handler as a multi-armed bandit problem. The arms to choose between are:\n\nContinuousConicDuality\nStrengthenedConicDuality\nLagrangianDuality\n\nOur problem isn't a typical multi-armed bandit for a two reasons:\n\nThe reward distribution is non-stationary (each arm converges to 0 as it keeps getting pulled.\nThe distribution of rewards is dependent on the history of the arms that were chosen.\n\nWe choose a very simple heuristic: pick the arm with the best mean + 1 standard deviation. That should ensure we consistently pick the arm with the best likelihood of improving the value function.\n\nIn future, we should consider discounting the rewards of earlier iterations, and focus more on the more-recent rewards.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Simulating-the-policy","page":"API Reference","title":"Simulating the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.simulate\nSDDP.calculate_bound\nSDDP.add_all_cuts","category":"page"},{"location":"apireference/#SDDP.simulate","page":"API Reference","title":"SDDP.simulate","text":"simulate(\n model::PolicyGraph,\n number_replications::Int = 1,\n variables::Vector{Symbol} = Symbol[];\n sampling_scheme::AbstractSamplingScheme =\n InSampleMonteCarlo(),\n custom_recorders = Dict{Symbol, Function}(),\n duality_handler::Union{Nothing,AbstractDualityHandler} = nothing,\n skip_undefined_variables::Bool = false,\n parallel_scheme::AbstractParallelScheme = Serial(),\n incoming_state::Dict{String,Float64} = _initial_state(model),\n )::Vector{Vector{Dict{Symbol,Any}}}\n\nPerform a simulation of the policy model with number_replications replications.\n\nReturn data structure\n\nReturns a vector with one element for each replication. Each element is a vector with one-element for each node in the scenario that was sampled. Each element in that vector is a dictionary containing information about the subproblem that was solved.\n\nIn that dictionary there are four special keys:\n\n:node_index, which records the index of the sampled node in the policy model\n:noise_term, which records the noise observed at the node\n:stage_objective, which records the stage-objective of the subproblem\n:bellman_term, which records the cost/value-to-go of the node.\n\nThe sum of :stage_objective + :bellman_term will equal the objective value of the solved subproblem.\n\nIn addition to the special keys, the dictionary will contain the result of key => JuMP.value(subproblem[key]) for each key in variables. This is useful to obtain the primal value of the state and control variables.\n\nPositonal arguments\n\nmodel: the model to simulate\nnumber_replications::Int = 1: the number of simulation replications to conduct, that is, the length of the simulation vector that is returned by this function. If omitted, this defaults to 1.`\nvariables::Vector{Symbol} = Symbol[]: a list of the variable names to record the value of in each stage.\n\nKeyword arguments\n\nsampling_scheme: the sampling scheme used when simulating.\ncustom_recorders: see Custom recorders section below.\nduality_handler: the SDDP.AbstractDualityHandler used to compute dual variables. If you do not require dual variables (or if they are not available), pass duality_handler = nothing.\nskip_undefined_variables: If you attempt to simulate the value of a variable that is only defined in some of the stage problems, an error will be thrown. To over-ride this (and return a NaN instead), pass skip_undefined_variables = true.\nparallel_scheme: Use parallel_scheme::[AbstractParallelScheme](@ref) to specify a scheme for simulating in parallel. Defaults to Serial.\ninitial_state: Use incoming_state to pass an initial value of the state variable, if it differs from that at the root node. Each key should be the string name of the state variable.\n\nCustom recorders\n\nFor more complicated data, the custom_recorders keyword argument can be used.\n\nFor example, to record the dual of a constraint named my_constraint, pass the following:\n\nsimulation_results = SDDP.simulate(model, 2;\n custom_recorders = Dict{Symbol, Function}(\n :constraint_dual => sp -> JuMP.dual(sp[:my_constraint])\n )\n)\n\nThe value of the dual in the first stage of the second replication can be accessed as:\n\nsimulation_results[2][1][:constraint_dual]\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.calculate_bound","page":"API Reference","title":"SDDP.calculate_bound","text":"SDDP.calculate_bound(\n model::PolicyGraph,\n state::Dict{Symbol,Float64} = model.initial_root_state;\n risk_measure::AbstractRiskMeasure = Expectation(),\n)\n\nCalculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is risk_measure.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_all_cuts","page":"API Reference","title":"SDDP.add_all_cuts","text":"add_all_cuts(model::PolicyGraph)\n\nAdd all cuts that may have been deleted back into the model.\n\nExplanation\n\nDuring the solve, SDDP.jl may decide to remove cuts for a variety of reasons.\n\nThese can include cuts that define the optimal value function, particularly around the extremes of the state-space (e.g., reservoirs empty).\n\nThis function ensures that all cuts discovered are added back into the model.\n\nYou should call this after train and before simulate.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Decision-rules","page":"API Reference","title":"Decision rules","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.DecisionRule\nSDDP.evaluate","category":"page"},{"location":"apireference/#SDDP.DecisionRule","page":"API Reference","title":"SDDP.DecisionRule","text":"DecisionRule(model::PolicyGraph{T}; node::T)\n\nCreate a decision rule for node node in model.\n\nExample\n\nrule = SDDP.DecisionRule(model; node = 1)\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.evaluate","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n rule::DecisionRule;\n incoming_state::Dict{Symbol,Float64},\n noise = nothing,\n controls_to_record = Symbol[],\n)\n\nEvalute the decision rule rule at the point described by the incoming_state and noise.\n\nIf the node is deterministic, omit the noise argument.\n\nPass a list of symbols to controls_to_record to save the optimal primal solution corresponding to the names registered in the model.\n\n\n\n\n\nevaluate(\n V::ValueFunction,\n point::Dict{Union{Symbol,String},<:Real}\n objective_state = nothing,\n belief_state = nothing\n)\n\nEvaluate the value function V at point in the state-space.\n\nReturns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.\n\nExamples\n\nevaluate(V, Dict(:volume => 1.0))\n\nIf the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:\n\nevaluate(V, Dict(Symbol(\"volume[1]\") => 1.0))\n\nYou can also use strings or symbols for the keys.\n\nevaluate(V, Dict(\"volume[1]\" => 1))\n\n\n\n\n\nevalute(V::ValueFunction{Nothing, Nothing}; kwargs...)\n\nEvalute the value function V at the point in the state-space specified by kwargs.\n\nExamples\n\nevaluate(V; volume = 1)\n\n\n\n\n\nevaluate(\n model::PolicyGraph{T},\n validation_scenarios::ValidationScenarios{T,S},\n) where {T,S}\n\nEvaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.\n\nExamples\n\nmodel, validation_scenarios = read_from_file(\"my_model.sof.json\")\ntrain(model; iteration_limit = 100)\nsimulations = evaluate(model, validation_scenarios)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Visualizing-the-policy","page":"API Reference","title":"Visualizing the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.SpaghettiPlot\nSDDP.add_spaghetti\nSDDP.publication_plot\nSDDP.ValueFunction\nSDDP.evaluate(::SDDP.ValueFunction, ::Dict{Symbol,Float64})\nSDDP.plot","category":"page"},{"location":"apireference/#SDDP.SpaghettiPlot","page":"API Reference","title":"SDDP.SpaghettiPlot","text":"SDDP.SpaghettiPlot(; stages, scenarios)\n\nInitialize a new SpaghettiPlot with stages stages and scenarios number of replications.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.add_spaghetti","page":"API Reference","title":"SDDP.add_spaghetti","text":"SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)\n\nDescription\n\nAdd a new figure to the SpaghettiPlot plt, where the y-value of the scenarioth line when x = stage is given by data_function(plt.simulations[scenario][stage]).\n\nKeyword arguments\n\nxlabel: set the xaxis label\nylabel: set the yaxis label\ntitle: set the title of the plot\nymin: set the minimum y value\nymax: set the maximum y value\ncumulative: plot the additive accumulation of the value across the stages\ninterpolate: interpolation method for lines between stages.\n\nDefaults to \"linear\" see the d3 docs \tfor all options.\n\nExamples\n\nsimulations = simulate(model, 10)\nplt = SDDP.spaghetti_plot(simulations)\nSDDP.add_spaghetti(plt; title = \"Stage objective\") do data\n return data[:stage_objective]\nend\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.publication_plot","page":"API Reference","title":"SDDP.publication_plot","text":"SDDP.publication_plot(\n data_function, simulations;\n quantile = [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0],\n kwargs...)\n\nCreate a Plots.jl recipe plot of the simulations.\n\nSee Plots.jl for the list of keyword arguments.\n\nExamples\n\nSDDP.publication_plot(simulations; title = \"My title\") do data\n return data[:stage_objective]\nend\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.ValueFunction","page":"API Reference","title":"SDDP.ValueFunction","text":"ValueFunction\n\nA representation of the value function. SDDP.jl uses the following unique representation of the value function that is undocumented in the literature.\n\nIt supports three types of state variables:\n\nx - convex \"resource\" states\nb - concave \"belief\" states\ny - concave \"objective\" states\n\nIn addition, we have three types of cuts:\n\nSingle-cuts (also called \"average\" cuts in the literature), which involve the risk-adjusted expectation of the cost-to-go.\nMulti-cuts, which use a different cost-to-go term for each realization w.\nRisk-cuts, which correspond to the facets of the dual interpretation of a coherent risk measure.\n\nTherefore, ValueFunction returns a JuMP model of the following form:\n\nV(x, b, y) = min: μᵀb + νᵀy + θ\n s.t. # \"Single\" / \"Average\" cuts\n μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J\n # \"Multi\" cuts\n μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K\n # \"Risk-set\" cuts\n θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.evaluate-Tuple{SDDP.ValueFunction, Dict{Symbol, Float64}}","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n V::ValueFunction,\n point::Dict{Union{Symbol,String},<:Real}\n objective_state = nothing,\n belief_state = nothing\n)\n\nEvaluate the value function V at point in the state-space.\n\nReturns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.\n\nExamples\n\nevaluate(V, Dict(:volume => 1.0))\n\nIf the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:\n\nevaluate(V, Dict(Symbol(\"volume[1]\") => 1.0))\n\nYou can also use strings or symbols for the keys.\n\nevaluate(V, Dict(\"volume[1]\" => 1))\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.plot","page":"API Reference","title":"SDDP.plot","text":"plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)\n\nThe SpaghettiPlot plot plt to filename. If filename is not given, it will be saved to a temporary directory. If open = true, then a browser window will be opened to display the resulting HTML file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Debugging-the-model","page":"API Reference","title":"Debugging the model","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.write_subproblem_to_file\nSDDP.deterministic_equivalent","category":"page"},{"location":"apireference/#SDDP.write_subproblem_to_file","page":"API Reference","title":"SDDP.write_subproblem_to_file","text":"write_subproblem_to_file(\n node::Node,\n filename::String;\n throw_error::Bool = false,\n)\n\nWrite the subproblem contained in node to the file filename.\n\nThe throw_error is an argument used internally by SDDP.jl. If set, an error will be thrown.\n\nExample\n\nSDDP.write_subproblem_to_file(model[1], \"subproblem_1.lp\")\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.deterministic_equivalent","page":"API Reference","title":"SDDP.deterministic_equivalent","text":"deterministic_equivalent(\n pg::PolicyGraph{T},\n optimizer = nothing;\n time_limit::Union{Real,Nothing} = 60.0,\n)\n\nForm a JuMP model that represents the deterministic equivalent of the problem.\n\nExamples\n\ndeterministic_equivalent(model)\n\ndeterministic_equivalent(model, HiGHS.Optimizer)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#StochOptFormat","page":"API Reference","title":"StochOptFormat","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.write_to_file\nSDDP.read_from_file\nBase.write(::IO, ::SDDP.PolicyGraph)\nBase.read(::IO, ::Type{SDDP.PolicyGraph})\nSDDP.evaluate(::SDDP.PolicyGraph{T}, ::SDDP.ValidationScenarios{T}) where {T}\nSDDP.ValidationScenarios\nSDDP.ValidationScenario","category":"page"},{"location":"apireference/#SDDP.write_to_file","page":"API Reference","title":"SDDP.write_to_file","text":"write_to_file(\n model::PolicyGraph,\n filename::String;\n compression::MOI.FileFormats.AbstractCompressionScheme =\n MOI.FileFormats.AutomaticCompression(),\n kwargs...\n)\n\nWrite model to filename in the StochOptFormat file format.\n\nPass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.\n\nSee Base.write(::IO, ::PolicyGraph) for information on the keyword arguments that can be provided.\n\nwarning: Warning\nThis function is experimental. See the full warning in Base.write(::IO, ::PolicyGraph).\n\nExamples\n\nwrite_to_file(model, \"my_model.sof.json\"; validation_scenarios = 10)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.read_from_file","page":"API Reference","title":"SDDP.read_from_file","text":"read_from_file(\n filename::String;\n compression::MOI.FileFormats.AbstractCompressionScheme =\n MOI.FileFormats.AutomaticCompression(),\n kwargs...\n)::Tuple{PolicyGraph, ValidationScenarios}\n\nReturn a tuple containing a PolicyGraph object and a ValidationScenarios read from filename in the StochOptFormat file format.\n\nPass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.\n\nSee Base.read(::IO, ::Type{PolicyGraph}) for information on the keyword arguments that can be provided.\n\nwarning: Warning\nThis function is experimental. See the full warning in Base.read(::IO, ::Type{PolicyGraph}).\n\nExamples\n\nmodel, validation_scenarios = read_from_file(\"my_model.sof.json\")\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Base.write-Tuple{IO, SDDP.PolicyGraph}","page":"API Reference","title":"Base.write","text":"Base.write(\n io::IO,\n model::PolicyGraph;\n validation_scenarios::Union{Nothing,Int,ValidationScenarios} = nothing,\n sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),\n kwargs...\n)\n\nWrite model to io in the StochOptFormat file format.\n\nPass an Int to validation_scenarios (default nothing) to specify the number of test scenarios to generate using the sampling_scheme sampling scheme. Alternatively, pass a ValidationScenarios object to manually specify the test scenarios to use.\n\nAny additional kwargs passed to write will be stored in the top-level of the resulting StochOptFormat file. Valid arguments include name, author, date, and description.\n\nCompatibility\n\nwarning: Warning\nTHIS FUNCTION IS EXPERIMENTAL. THINGS MAY CHANGE BETWEEN COMMITS. YOU SHOULD NOT RELY ON THIS FUNCTIONALITY AS A LONG-TERM FILE FORMAT (YET).\n\nIn addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:\n\nJuMP.fix\nJuMP.set_lower_bound\nJuMP.set_upper_bound\nJuMP.set_normalized_rhs\nChanges to the constant or affine terms in a stage objective.\n\nIf your model uses something other than this, this function will silently write an incorrect formulation of the problem.\n\nExamples\n\nopen(\"my_model.sof.json\", \"w\") do io\n write(\n io,\n model;\n validation_scenarios = 10,\n name = \"MyModel\",\n author = \"@odow\",\n date = \"2020-07-20\",\n description = \"Example problem for the SDDP.jl documentation\",\n )\nend\n\n\n\n\n\n","category":"method"},{"location":"apireference/#Base.read-Tuple{IO, Type{SDDP.PolicyGraph}}","page":"API Reference","title":"Base.read","text":"Base.read(\n io::IO,\n ::Type{PolicyGraph};\n bound::Float64 = 1e6,\n)::Tuple{PolicyGraph,ValidationScenarios}\n\nReturn a tuple containing a PolicyGraph object and a ValidationScenarios read from io in the StochOptFormat file format.\n\nSee also: evaluate.\n\nCompatibility\n\nwarning: Warning\nThis function is experimental. Things may change between commits. You should not rely on this functionality as a long-term file format (yet).\n\nIn addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:\n\nAdditive random variables in the constraints or in the objective\nMultiplicative random variables in the objective\n\nIf your model uses something other than this, this function may throw an error or silently build a non-convex model.\n\nExamples\n\nopen(\"my_model.sof.json\", \"r\") do io\n model, validation_scenarios = read(io, PolicyGraph)\nend\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.evaluate-Union{Tuple{T}, Tuple{SDDP.PolicyGraph{T}, SDDP.ValidationScenarios{T}}} where T","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n model::PolicyGraph{T},\n validation_scenarios::ValidationScenarios{T,S},\n) where {T,S}\n\nEvaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.\n\nExamples\n\nmodel, validation_scenarios = read_from_file(\"my_model.sof.json\")\ntrain(model; iteration_limit = 100)\nsimulations = evaluate(model, validation_scenarios)\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.ValidationScenarios","page":"API Reference","title":"SDDP.ValidationScenarios","text":"ValidationScenario{T,S}(scenarios::Vector{ValidationScenario{T,S}})\n\nAn AbstractSamplingScheme based on a vector of scenarios.\n\nEach scenario is a vector of Tuple{T, S} where the first element is the node to visit and the second element is the realization of the stagewise-independent noise term. Pass nothing if the node is deterministic.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.ValidationScenario","page":"API Reference","title":"SDDP.ValidationScenario","text":"ValidationScenario{T,S}(scenario::Vector{Tuple{T,S}})\n\nA single scenario for testing.\n\nSee also: ValidationScenarios.\n\n\n\n\n\n","category":"type"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"EditURL = \"markov_uncertainty.jl\"","category":"page"},{"location":"tutorial/markov_uncertainty/#Markovian-policy-graphs","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"In our previous tutorials (An introduction to SDDP.jl and Uncertainty in the objective function), we formulated a simple hydrothermal scheduling problem with stagewise-independent random variables in the right-hand side of the constraints and in the objective function. Now, in this tutorial, we introduce some stagewise-dependent uncertainty using a Markov chain.","category":"page"},{"location":"tutorial/markov_uncertainty/#Formulating-the-problem","page":"Markovian policy graphs","title":"Formulating the problem","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"In this tutorial we consider a Markov chain with two climate states: wet and dry. Each Markov state is associated with an integer, in this case the wet climate state is Markov state 1 and the dry climate state is Markov state 2. In the wet climate state, the probability of the high inflow increases to 50%, and the probability of the low inflow decreases to 1/6. In the dry climate state, the converse happens. There is also persistence in the climate state: the probability of remaining in the current state is 75%, and the probability of transitioning to the other climate state is 25%. We assume that the first stage starts in the wet climate state.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"Here is a picture of the model we're going to implement.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"(Image: Markovian policy graph)","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"There are five nodes in our graph. Each node is named by a tuple (t, i), where t is the stage for t=1,2,3, and i is the Markov state for i=1,2. As before, the wavy lines denote the stagewise-independent random variable.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"For each stage, we need to provide a Markov transition matrix. This is an MxN matrix, where the element A[i, j] gives the probability of transitioning from Markov state i in the previous stage to Markov state j in the current stage. The first stage is special because we assume there is a \"zero'th\" stage which has one Markov state (the round node in the graph above). Furthermore, the number of columns in the transition matrix of a stage (i.e. the number of Markov states) must equal the number of rows in the next stage's transition matrix. For our example, the vector of Markov transition matrices is given by:","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"T = Array{Float64,2}[[1.0]', [0.75 0.25], [0.75 0.25; 0.25 0.75]]","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"note: Note\nMake sure to add the ' after the first transition matrix so Julia can distinguish between a vector and a matrix.","category":"page"},{"location":"tutorial/markov_uncertainty/#Creating-a-model","page":"Markovian policy graphs","title":"Creating a model","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"using SDDP, HiGHS\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75),\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64,2}[\n [1.0]',\n [0.75 0.25],\n [0.75 0.25; 0.25 0.75],\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, node\n # Unpack the stage and Markov index.\n t, markov_state = node\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end\n )\n # Note how we can use `markov_state` to dispatch an `if` statement.\n probability = if markov_state == 1 # wet climate state\n [1 / 6, 1 / 3, 1 / 2]\n else # dry climate state\n [1 / 2, 1 / 3, 1 / 6]\n end\n\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(\n subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"tip: Tip\nFor more information on SDDP.MarkovianPolicyGraphs, read Create a general policy graph.","category":"page"},{"location":"tutorial/markov_uncertainty/#Training-and-simulating-the-policy","page":"Markovian policy graphs","title":"Training and simulating the policy","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"As in the previous three tutorials, we train the policy:","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"SDDP.train(model)","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"simulations = SDDP.simulate(\n model,\n sampling_scheme = SDDP.Historical([\n ((1, 1), Ω[1]),\n ((2, 2), Ω[3]),\n ((3, 1), Ω[2]),\n ]),\n)\n\n[stage[:node_index] for stage in simulations[1]]","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"EditURL = \"FAST_hydro_thermal.jl\"","category":"page"},{"location":"examples/FAST_hydro_thermal/#FAST:-the-hydro-thermal-problem","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"","category":"section"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"An implementation of the Hydro-thermal example from FAST","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"using SDDP, HiGHS, Test\n\nfunction fast_hydro_thermal()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(2),\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x <= 8, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n y >= 0\n p >= 0\n ξ\n end)\n @constraints(sp, begin\n p + y >= 6\n x.out <= x.in - y + ξ\n end)\n RAINFALL = (t == 1 ? [6] : [2, 10])\n SDDP.parameterize(sp, RAINFALL) do ω\n return JuMP.fix(ξ, ω)\n end\n @stageobjective(sp, 5 * p)\n end\n\n det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\n set_silent(det)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) == 10\n SDDP.train(model)\n @test SDDP.calculate_bound(model) == 10\n return\nend\n\nfast_hydro_thermal()","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"EditURL = \"StochDynamicProgramming.jl_multistock.jl\"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/#StochDynamicProgramming:-the-multistock-problem","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"","category":"section"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"This example comes from StochDynamicProgramming.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"using SDDP, HiGHS, Test\n\nfunction test_multistock_example()\n model = SDDP.LinearPolicyGraph(\n stages = 5,\n lower_bound = -5.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n @variable(\n subproblem,\n 0 <= stock[i = 1:3] <= 1,\n SDDP.State,\n initial_value = 0.5\n )\n @variables(subproblem, begin\n 0 <= control[i = 1:3] <= 0.5\n ξ[i = 1:3] # Dummy for RHS noise.\n end)\n @constraints(\n subproblem,\n begin\n sum(control) - 0.5 * 3 <= 0\n [i = 1:3], stock[i].out == stock[i].in + control[i] - ξ[i]\n end\n )\n Ξ = collect(\n Base.product((0.0, 0.15, 0.3), (0.0, 0.15, 0.3), (0.0, 0.15, 0.3)),\n )[:]\n SDDP.parameterize(subproblem, Ξ) do ω\n return JuMP.fix.(ξ, ω)\n end\n @stageobjective(subproblem, (sin(3 * stage) - 1) * sum(control))\n end\n SDDP.train(\n model;\n iteration_limit = 100,\n cut_type = SDDP.SINGLE_CUT,\n log_frequency = 10,\n )\n @test SDDP.calculate_bound(model) ≈ -4.349 atol = 0.01\n\n simulation_results = SDDP.simulate(model, 5000)\n @test length(simulation_results) == 5000\n μ = SDDP.Statistics.mean(\n sum(data[:stage_objective] for data in simulation) for\n simulation in simulation_results\n )\n @test μ ≈ -4.349 atol = 0.1\n return\nend\n\ntest_multistock_example()","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"EditURL = \"plotting.jl\"","category":"page"},{"location":"tutorial/plotting/#Plotting-tools","page":"Plotting tools","title":"Plotting tools","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"In our previous tutorials, we formulated, solved, and simulated multistage stochastic optimization problems. However, we haven't really investigated what the solution looks like. Luckily, SDDP.jl includes a number of plotting tools to help us do that. In this tutorial, we explain the tools and make some pretty pictures.","category":"page"},{"location":"tutorial/plotting/#Preliminaries","page":"Plotting tools","title":"Preliminaries","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"The next two plot types help visualize the policy. Thus, we first need to create a policy and simulate some trajectories. So, let's take the model from Markovian policy graphs, train it for 20 iterations, and then simulate 100 Monte Carlo realizations of the policy.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"using SDDP, HiGHS\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75),\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64,2}[\n [1.0]',\n [0.75 0.25],\n [0.75 0.25; 0.25 0.75],\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, node\n t, markov_state = node\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end\n )\n probability =\n markov_state == 1 ? [1 / 6, 1 / 3, 1 / 2] : [1 / 2, 1 / 3, 1 / 6]\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(\n subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend\n\nSDDP.train(model, iteration_limit = 20, run_numerical_stability_report = false)\n\nsimulations = SDDP.simulate(\n model,\n 100,\n [:volume, :thermal_generation, :hydro_generation, :hydro_spill],\n)\n\nprintln(\"Completed $(length(simulations)) simulations.\")","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Great! Now we have some data in simulations to visualize.","category":"page"},{"location":"tutorial/plotting/#Spaghetti-plots","page":"Plotting tools","title":"Spaghetti plots","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"The first plotting utility we discuss is a spaghetti plot (you'll understand the name when you see the graph).","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"To create a spaghetti plot, begin by creating a new SDDP.SpaghettiPlot instance as follows:","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"plt = SDDP.SpaghettiPlot(simulations)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"We can add plots to plt using the SDDP.add_spaghetti function.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.add_spaghetti(plt; title = \"Reservoir volume\") do data\n return data[:volume].out\nend","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"In addition to returning values from the simulation, you can compute things:","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.add_spaghetti(plt; title = \"Fuel cost\", ymin = 0, ymax = 250) do data\n if data[:thermal_generation] > 0\n return data[:stage_objective] / data[:thermal_generation]\n else # No thermal generation, so return 0.0.\n return 0.0\n end\nend","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Note that there are many keyword arguments in addition to title. For example, we fixed the minimum and maximum values of the y-axis using ymin and ymax. See the SDDP.add_spaghetti documentation for all the arguments.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Having built the plot, we now need to display it using SDDP.plot.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.plot(plt, \"spaghetti_plot.html\")","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"This should open a webpage that looks like this one.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Using the mouse, you can highlight individual trajectories by hovering over them. This makes it possible to visualize a single trajectory across multiple dimensions.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"If you click on the plot, then trajectories that are close to the mouse pointer are shown darker and those further away are shown lighter.","category":"page"},{"location":"tutorial/plotting/#Publication-plots","page":"Plotting tools","title":"Publication plots","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Instead of the interactive Javascript plots, you can also create some publication ready plots using the SDDP.publication_plot function.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"info: Info\nYou need to install the Plots.jl package for this to work. We used the GR backend (gr()), but any Plots.jl backend should work.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.publication_plot implements a plot recipe to create ribbon plots of each variable against the stages. The first argument is the vector of simulation dictionaries and the second argument is the dictionary key that you want to plot. Standard Plots.jl keyword arguments such as title and xlabel can be used to modify the look of each plot. By default, the plot displays ribbons of the 0-100, 10-90, and 25-75 percentiles. The dark, solid line in the middle is the median (i.e. 50'th percentile).","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"import Plots\nPlots.plot(\n SDDP.publication_plot(simulations, title = \"Outgoing volume\") do data\n return data[:volume].out\n end,\n SDDP.publication_plot(simulations, title = \"Thermal generation\") do data\n return data[:thermal_generation]\n end;\n xlabel = \"Stage\",\n ylims = (0, 200),\n layout = (1, 2),\n)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"You can save this plot as a PDF using the Plots.jl function savefig:","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Plots.savefig(\"my_picture.pdf\")","category":"page"},{"location":"tutorial/plotting/#Plotting-the-value-function","page":"Plotting tools","title":"Plotting the value function","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"You can obtain an object representing the value function of a node using SDDP.ValueFunction.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"V = SDDP.ValueFunction(model[(1, 1)])","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"The value function can be evaluated using SDDP.evaluate.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.evaluate(V; volume = 1)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"evaluate returns the height of the value function, and a subgradient with respect to the convex state variables.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"You can also plot the value function using SDDP.plot","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.plot(V, volume = 0:200, filename = \"value_function.html\")","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"This should open a webpage that looks like this one.","category":"page"},{"location":"tutorial/plotting/#Convergence-dashboard","page":"Plotting tools","title":"Convergence dashboard","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"If the text-based logging isn't to your liking, you can open a visualization of the training by passing dashboard = true to SDDP.train.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.train(model; dashboard = true)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"By default, dashboard = false because there is an initial overhead associated with opening and preparing the plot.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"warning: Warning\nThe dashboard is experimental. There are known bugs associated with it, e.g., SDDP.jl#226.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"EditURL = \"the_farmers_problem.jl\"","category":"page"},{"location":"examples/the_farmers_problem/#The-farmer's-problem","page":"The farmer's problem","title":"The farmer's problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"This problem is taken from Section 1.1 of the book Birge, J. R., & Louveaux, F. (2011). Introduction to Stochastic Programming. New York, NY: Springer New York. Paragraphs in quotes are taken verbatim.","category":"page"},{"location":"examples/the_farmers_problem/#Problem-description","page":"The farmer's problem","title":"Problem description","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Consider a European farmer who specializes in raising wheat, corn, and sugar beets on his 500 acres of land. During the winter, [they want] to decide how much land to devote to each crop.The farmer knows that at least 200 tons (T) of wheat and 240 T of corn are needed for cattle feed. These amounts can be raised on the farm or bought from a wholesaler. Any production in excess of the feeding requirement would be sold.Over the last decade, mean selling prices have been $170 and $150 per ton of wheat and corn, respectively. The purchase prices are 40% more than this due to the wholesaler’s margin and transportation costs.Another profitable crop is sugar beet, which [they expect] to sell at $36/T; however, the European Commission imposes a quota on sugar beet production. Any amount in excess of the quota can be sold only at $10/T. The farmer’s quota for next year is 6000 T.\"Based on past experience, the farmer knows that the mean yield on [their] land is roughly 2.5 T, 3 T, and 20 T per acre for wheat, corn, and sugar beets, respectively.[To introduce uncertainty,] assume some correlation among the yields of the different crops. A very simplified representation of this would be to assume that years are good, fair, or bad for all crops, resulting in above average, average, or below average yields for all crops. To fix these ideas, above and below average indicate a yield 20% above or below the mean yield.","category":"page"},{"location":"examples/the_farmers_problem/#Problem-data","page":"The farmer's problem","title":"Problem data","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The area of the farm.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MAX_AREA = 500.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"There are three crops:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"CROPS = [:wheat, :corn, :sugar_beet]","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Each of the crops has a different planting cost ($/acre).","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"PLANTING_COST = Dict(:wheat => 150.0, :corn => 230.0, :sugar_beet => 260.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The farmer requires a minimum quantity of wheat and corn, but not of sugar beet (tonnes).","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MIN_QUANTITIES = Dict(:wheat => 200.0, :corn => 240.0, :sugar_beet => 0.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"In Europe, there is a quota system for producing crops. The farmer owns the following quota for each crop (tonnes):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"QUOTA_MAX = Dict(:wheat => Inf, :corn => Inf, :sugar_beet => 6_000.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The farmer can sell crops produced under the quota for the following amounts ($/tonne):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SELL_IN_QUOTA = Dict(:wheat => 170.0, :corn => 150.0, :sugar_beet => 36.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"If they sell more than their allotted quota, the farmer earns the following on each tonne of crop above the quota ($/tonne):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SELL_NO_QUOTA = Dict(:wheat => 0.0, :corn => 0.0, :sugar_beet => 10.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The purchase prices for wheat and corn are 40% more than their sales price. However, the description does not address the purchase price of sugar beet. Therefore, we use a large value of $1,000/tonne.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"BUY_PRICE = Dict(:wheat => 238.0, :corn => 210.0, :sugar_beet => 1_000.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"On average, each crop has the following yield in tonnes/acre:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MEAN_YIELD = Dict(:wheat => 2.5, :corn => 3.0, :sugar_beet => 20.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"However, the yield is random. In good years, the yield is +20% above average, and in bad years, the yield is -20% below average.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"YIELD_MULTIPLIER = Dict(:good => 1.2, :fair => 1.0, :bad => 0.8)","category":"page"},{"location":"examples/the_farmers_problem/#Mathematical-formulation","page":"The farmer's problem","title":"Mathematical formulation","text":"","category":"section"},{"location":"examples/the_farmers_problem/#SDDP.jl-code","page":"The farmer's problem","title":"SDDP.jl code","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"note: Note\nIn what follows, we make heavy use of the fact that you can look up variables by their symbol name in a JuMP model as follows:@variable(model, x)\nmodel[:x]Read the JuMP documentation if this isn't familiar to you.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First up, load SDDP.jl and a solver. For this example, we use HiGHS.jl.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"using SDDP, HiGHS","category":"page"},{"location":"examples/the_farmers_problem/#State-variables","page":"The farmer's problem","title":"State variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"State variables are the information that flows between stages. In our example, the state variables are the areas of land devoted to growing each crop.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function add_state_variables(subproblem)\n @variable(subproblem, area[c = CROPS] >= 0, SDDP.State, initial_value = 0)\nend","category":"page"},{"location":"examples/the_farmers_problem/#First-stage-problem","page":"The farmer's problem","title":"First stage problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We can only plant a maximum of 500 acres, and we want to minimize the planting cost","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function create_first_stage_problem(subproblem)\n @constraint(\n subproblem,\n sum(subproblem[:area][c].out for c in CROPS) <= MAX_AREA\n )\n @stageobjective(\n subproblem,\n -sum(PLANTING_COST[c] * subproblem[:area][c].out for c in CROPS)\n )\nend","category":"page"},{"location":"examples/the_farmers_problem/#Second-stage-problem","page":"The farmer's problem","title":"Second stage problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now let's consider the second stage problem. This is more complicated than the first stage, so we've broken it down into four sections:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"control variables\nconstraints\nthe objective\nthe uncertainty","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First, let's add the second stage control variables.","category":"page"},{"location":"examples/the_farmers_problem/#Variables","page":"The farmer's problem","title":"Variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We add four types of control variables. Technically, the yield isn't a control variable. However, we add it as a dummy \"helper\" variable because it will be used when we add uncertainty.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_variables(subproblem)\n @variables(subproblem, begin\n 0 <= yield[c = CROPS] # tonnes/acre\n 0 <= buy[c = CROPS] # tonnes\n 0 <= sell_in_quota[c = CROPS] <= QUOTA_MAX[c] # tonnes\n 0 <= sell_no_quota[c = CROPS] # tonnes\n end)\nend","category":"page"},{"location":"examples/the_farmers_problem/#Constraints","page":"The farmer's problem","title":"Constraints","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We need to define is the minimum quantity constraint. This ensures that MIN_QUANTITIES[c] of each crop is produced.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_constraint_min_quantity(subproblem)\n @constraint(\n subproblem,\n [c = CROPS],\n subproblem[:yield][c] + subproblem[:buy][c] -\n subproblem[:sell_in_quota][c] - subproblem[:sell_no_quota][c] >=\n MIN_QUANTITIES[c]\n )\nend","category":"page"},{"location":"examples/the_farmers_problem/#Objective","page":"The farmer's problem","title":"Objective","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The objective of the second stage is to maximise revenue from selling crops, less the cost of buying corn and wheat if necessary to meet the minimum quantity constraint.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_objective(subproblem)\n @stageobjective(\n subproblem,\n sum(\n SELL_IN_QUOTA[c] * subproblem[:sell_in_quota][c] +\n SELL_NO_QUOTA[c] * subproblem[:sell_no_quota][c] -\n BUY_PRICE[c] * subproblem[:buy][c] for c in CROPS\n )\n )\nend","category":"page"},{"location":"examples/the_farmers_problem/#Random-variables","page":"The farmer's problem","title":"Random variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Then, in the SDDP.parameterize function, we set the coefficient using JuMP.set_normalized_coefficient.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_uncertainty(subproblem)\n @constraint(\n subproblem,\n uncertainty[c = CROPS],\n 1.0 * subproblem[:area][c].in == subproblem[:yield][c]\n )\n SDDP.parameterize(subproblem, [:good, :fair, :bad]) do ω\n for c in CROPS\n JuMP.set_normalized_coefficient(\n uncertainty[c],\n subproblem[:area][c].in,\n MEAN_YIELD[c] * YIELD_MULTIPLIER[ω],\n )\n end\n end\nend","category":"page"},{"location":"examples/the_farmers_problem/#Putting-it-all-together","page":"The farmer's problem","title":"Putting it all together","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now we're ready to build the multistage stochastic programming model. In addition to the things already discussed, we need a few extra pieces of information.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First, we are maximizing, so we set sense = :Max. Second, we need to provide a valid upper bound. (See Choosing an initial bound for more on this.) We know from Birge and Louveaux that the optimal solution is $108,390. So, let's choose $500,000 just to be safe.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Here is the full model.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"model = SDDP.LinearPolicyGraph(\n stages = 2,\n sense = :Max,\n upper_bound = 500_000.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, stage\n add_state_variables(subproblem)\n if stage == 1\n create_first_stage_problem(subproblem)\n else\n second_stage_variables(subproblem)\n second_stage_constraint_min_quantity(subproblem)\n second_stage_uncertainty(subproblem)\n second_stage_objective(subproblem)\n end\nend","category":"page"},{"location":"examples/the_farmers_problem/#Training-a-policy","page":"The farmer's problem","title":"Training a policy","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now that we've built a model, we need to train it using SDDP.train. The keyword iteration_limit stops the training after 20 iterations. See Choose a stopping rule for other ways to stop the training.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SDDP.train(model; iteration_limit = 20)","category":"page"},{"location":"examples/the_farmers_problem/#Checking-the-policy","page":"The farmer's problem","title":"Checking the policy","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Birge and Louveaux report that the optimal objective value is $108,390. Check that we got the correct solution using SDDP.calculate_bound:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"@assert isapprox(SDDP.calculate_bound(model), 108_390.0, atol = 0.1)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"EditURL = \"warnings.jl\"","category":"page"},{"location":"tutorial/warnings/#Words-of-warning","page":"Words of warning","title":"Words of warning","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"SDDP is a powerful solution technique for multistage stochastic programming. However, there are a number of subtle things to be aware of before creating your own models.","category":"page"},{"location":"tutorial/warnings/#Relatively-complete-recourse","page":"Words of warning","title":"Relatively complete recourse","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Models built in SDDP.jl need a property called relatively complete recourse.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"One definition of relatively complete recourse is that all feasible decisions (not necessarily optimal) in a subproblem lead to feasible decisions in future subproblems.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"For example, in the following problem, one feasible first stage decision is x.out = 0. But this causes an infeasibility in the second stage which requires x.in >= 1. This will throw an error about infeasibility if you try to solve.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n if t == 2\n @constraint(sp, x.in >= 1)\n end\n @stageobjective(sp, x.out)\nend\n\ntry #hide\n SDDP.train(model, iteration_limit = 1, print_level = 0)\ncatch err #hide\n showerror(stderr, err) #hide\nend #hide","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"warning: Warning\nThe actual constraints causing the infeasibilities can be deceptive! A good strategy to debug is to comment out all constraints. Then, one-by-one, un-comment the constraints and try resolving the model to check if it finds a feasible solution.","category":"page"},{"location":"tutorial/warnings/#Numerical-stability","page":"Words of warning","title":"Numerical stability","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"If you aren't aware, SDDP builds an outer-approximation to a convex function using cutting planes. This results in a formulation that is particularly hard for solvers like HiGHS, Gurobi, and CPLEX to deal with. As a result, you may run into weird behavior. This behavior could include:","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Iterations suddenly taking a long time (the solver stalled)\nSubproblems turning infeasible or unbounded after many iterations\nSolvers returning \"Numerical Error\" statuses","category":"page"},{"location":"tutorial/warnings/#Problem-scaling","page":"Words of warning","title":"Problem scaling","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"In almost all cases, the cause of this is poor problem scaling. For our purpose, poor problem scaling means having variables with very large numbers and variables with very small numbers in the same model.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"tip: Tip\nGurobi has an excellent set of articles on numerical issues and how to avoid them.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Consider, for example, the hydro-thermal scheduling problem we have been discussing in previous tutorials.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"If we define the volume of the reservoir in terms of m³, then a lake might have a capacity of 10^10 m³: @variable(subproblem, 0 <= volume <= 10^10). Moreover, the cost per cubic meter might be around $0.05/m³. To calculate the value of water in our reservoir, we need to multiple a variable on the order of 10^10, by one on the order of 10⁻²! That is twelve orders of magnitude!","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"To improve the performance of the SDDP algorithm (and reduce the chance of weird behavior), try to re-scale the units of the problem in order to reduce the largest difference in magnitude. For example, if we talk in terms of million m³, then we have a capacity of 10⁴ million m³, and a price of $50,000 per million m³. Now things are only one order of magnitude apart.","category":"page"},{"location":"tutorial/warnings/#Numerical-stability-report","page":"Words of warning","title":"Numerical stability report","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"To aid in the diagnose of numerical issues, you can call SDDP.numerical_stability_report. By default, this aggregates all of the nodes into a single report. You can produce a stability report for each node by passing by_node=true.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP\n\nmodel = SDDP.LinearPolicyGraph(stages = 2, lower_bound = -1e10) do subproblem, t\n @variable(subproblem, x >= -1e7, SDDP.State, initial_value = 1e-5)\n @constraint(subproblem, 1e9 * x.out >= 1e-6 * x.in + 1e-8)\n @stageobjective(subproblem, 1e9 * x.out)\nend\n\nSDDP.numerical_stability_report(model)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"The report analyses the magnitude (in absolute terms) of the coefficients in the constraint matrix, the objective function, any variable bounds, and in the RHS of the constraints. A warning will be thrown in SDDP.jl detects very large or small values. As discussed in Problem scaling, this is an indication that you should reformulate your model.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"By default, a numerical stability check is run when you call SDDP.train, although it can be turned off by passing run_numerical_stability_report = false.","category":"page"},{"location":"tutorial/warnings/#Solver-specific-options","page":"Words of warning","title":"Solver-specific options","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"If you have a particularly troublesome model, you should investigate setting solver-specific options to improve the numerical stability of each solver. For example, Gurobi has a NumericFocus option.","category":"page"},{"location":"tutorial/warnings/#Choosing-an-initial-bound","page":"Words of warning","title":"Choosing an initial bound","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"One of the important requirements when building a SDDP model is to choose an appropriate bound on the objective (lower if minimizing, upper if maximizing). However, it can be hard to choose a bound if you don't know the solution! (Which is very likely.)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"The bound should not be as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen too small, it may cut off the feasible region and lead to a sub-optimal solution.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Consider the following simple model, where we first set lower_bound to 0.0.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 2)\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(subproblem, x.out == x.in - u)\n @constraint(subproblem, u + v == 1.5)\n @stageobjective(subproblem, t * v)\nend\n\nSDDP.train(model, iteration_limit = 5, run_numerical_stability_report = false)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Now consider the case when we set the lower_bound to 10.0:","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 10.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 2)\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(subproblem, x.out == x.in - u)\n @constraint(subproblem, u + v == 1.5)\n @stageobjective(subproblem, t * v)\nend\n\nSDDP.train(model, iteration_limit = 5, run_numerical_stability_report = false)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"How do we tell which is more appropriate? There are a few clues that you should look out for.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"The bound converges to a value above (if minimizing) the simulated cost of the policy. In this case, the problem is deterministic, so it is easy to tell. But you can also check by performing a Monte Carlo simulation like we did in An introduction to SDDP.jl.\nThe bound converges to different values when we change the bound. This is another clear give-away. The bound provided by the user is only used in the initial iterations. It should not change the value of the converged policy. Thus, if you don't know an appropriate value for the bound, choose an initial value, and then increase (or decrease) the value of the bound to confirm that the value of the policy doesn't change.\nThe bound converges to a value close to the bound provided by the user. This varies between models, but notice that 11.0 is quite close to 10.0 compared with 3.5 and 0.0.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/#Add-a-multi-dimensional-state-variable","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"","category":"section"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"Just like normal JuMP variables, it is possible to create containers of state variables.","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"julia> model = SDDP.LinearPolicyGraph(\n stages=1, lower_bound = 0, optimizer = HiGHS.Optimizer\n ) do subproblem, t\n # A scalar state variable.\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 0)\n println(\"Lower bound of outgoing x is: \", JuMP.lower_bound(x.out))\n # A vector of state variables.\n @variable(subproblem, y[i = 1:2] >= i, SDDP.State, initial_value = i)\n println(\"Lower bound of outgoing y[1] is: \", JuMP.lower_bound(y[1].out))\n # A JuMP.Containers.DenseAxisArray of state variables.\n @variable(subproblem,\n z[i = 3:4, j = [:A, :B]] >= i, SDDP.State, initial_value = i)\n println(\"Lower bound of outgoing z[3, :B] is: \", JuMP.lower_bound(z[3, :B].out))\n end;\nLower bound of outgoing x is: 0.0\nLower bound of outgoing y[1] is: 1.0\nLower bound of outgoing z[3, :B] is: 3.0","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"EditURL = \"objective_uncertainty.jl\"","category":"page"},{"location":"tutorial/objective_uncertainty/#Uncertainty-in-the-objective-function","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"","category":"section"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"In the previous tutorial, An introduction to SDDP.jl, we created a stochastic hydro-thermal scheduling model. In this tutorial, we extend the problem by adding uncertainty to the fuel costs.","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"Previously, we assumed that the fuel cost was deterministic: $50/MWh in the first stage, $100/MWh in the second stage, and $150/MWh in the third stage. For this tutorial, we assume that in addition to these base costs, the actual fuel cost is correlated with the inflows.","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"Our new model for the uncertainty is given by the following table:","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"ω 1 2 3\nP(ω) 1/3 1/3 1/3\ninflow 0 50 100\nfuel multiplier 1.5 1.0 0.75","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"In stage t, the objective is now to minimize:","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"fuel_multiplier * fuel_cost[t] * thermal_generation","category":"page"},{"location":"tutorial/objective_uncertainty/#Creating-a-model","page":"Uncertainty in the objective function","title":"Creating a model","text":"","category":"section"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"To add an uncertain objective, we can simply call @stageobjective from inside the SDDP.parameterize function.","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end\n )\n fuel_cost = [50.0, 100.0, 150.0]\n # Parameterize the subproblem.\n Ω = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75),\n ]\n SDDP.parameterize(subproblem, Ω, [1 / 3, 1 / 3, 1 / 3]) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(\n subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend","category":"page"},{"location":"tutorial/objective_uncertainty/#Training-and-simulating-the-policy","page":"Uncertainty in the objective function","title":"Training and simulating the policy","text":"","category":"section"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"As in the previous two tutorials, we train and simulate the policy:","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"SDDP.train(model)\n\nsimulations = SDDP.simulate(model, 500)\n\nobjective_values =\n [sum(stage[:stage_objective] for stage in sim) for sim in simulations]\n\nusing Statistics\n\nμ = round(mean(objective_values), digits = 2)\nci = round(1.96 * std(objective_values) / sqrt(500), digits = 2)\n\nprintln(\"Confidence interval: \", μ, \" ± \", ci)\nprintln(\"Lower bound: \", round(SDDP.calculate_bound(model), digits = 2))","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_a_risk_measure/#Add-a-risk-measure","page":"Add a risk measure","title":"Add a risk measure","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_a_risk_measure/#Training-a-risk-averse-model","page":"Add a risk measure","title":"Training a risk-averse model","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.jl supports a variety of risk measures. Two common ones are SDDP.Expectation and SDDP.WorstCase. Let's see how to train a policy using them. There are three possible ways.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"If the same risk measure is used at every node in the policy graph, we can just pass an instance of one of the risk measures to the risk_measure keyword argument of the SDDP.train function.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = SDDP.WorstCase(),\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"However, if you want different risk measures at different nodes, there are two options. First, you can pass risk_measure a dictionary of risk measures, with one entry for each node. The keys of the dictionary are the indices of the nodes.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = Dict(\n 1 => SDDP.Expectation(),\n 2 => SDDP.WorstCase()\n ),\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"An alternative method is to pass risk_measure a function that takes one argument, the index of a node, and returns an instance of a risk measure:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = (node_index) -> begin\n if node_index == 1\n return SDDP.Expectation()\n else\n return SDDP.WorstCase()\n end\n end,\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"note: Note\nIf you simulate the policy, the simulated value is the risk-neutral value of the policy.","category":"page"},{"location":"guides/add_a_risk_measure/#Risk-measures","page":"Add a risk measure","title":"Risk measures","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"To illustrate the risk-measures included in SDDP.jl, we consider a discrete random variable with four outcomes.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"The random variable is supported on the values 1, 2, 3, and 4:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"noise_supports = [1, 2, 3, 4]","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"The associated probability of each outcome is as follows:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"nominal_probability = [0.1, 0.2, 0.3, 0.4]","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"With each outcome ω, the agent observes a cost Z(ω):","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"cost_realizations = [5.0, 4.0, 6.0, 2.0]","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"We assume that we are minimizing:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"is_minimization = true","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"Finally, we create a vector that will be used to store the risk-adjusted probabilities:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"risk_adjusted_probability = zeros(4)","category":"page"},{"location":"guides/add_a_risk_measure/#Expectation","page":"Add a risk measure","title":"Expectation","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Expectation","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Expectation","page":"Add a risk measure","title":"SDDP.Expectation","text":"Expectation()\n\nThe Expectation risk measure. Identical to taking the expectation with respect to the nominal distribution.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"using SDDP\nSDDP.adjust_probability(\n SDDP.Expectation(),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Expectation is the default risk measure in SDDP.jl.","category":"page"},{"location":"guides/add_a_risk_measure/#Worst-case","page":"Add a risk measure","title":"Worst-case","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.WorstCase","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.WorstCase","page":"Add a risk measure","title":"SDDP.WorstCase","text":"WorstCase()\n\nThe worst-case risk measure. Places all of the probability weight on the worst outcome.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.WorstCase(),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Average-value-at-risk-(AV@R)","page":"Add a risk measure","title":"Average value at risk (AV@R)","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.AVaR","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.AVaR","page":"Add a risk measure","title":"SDDP.AVaR","text":"AVaR(β)\n\nThe average value at risk (AV@R) risk measure.\n\nComputes the expectation of the β fraction of worst outcomes. β must be in [0, 1]. When β=1, this is equivalent to the Expectation risk measure. When β=0, this is equivalent to the WorstCase risk measure.\n\nAV@R is also known as the conditional value at risk (CV@R) or expected shortfall.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.AVaR(0.5),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Convex-combination-of-risk-measures","page":"Add a risk measure","title":"Convex combination of risk measures","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"Using the axioms of coherent risk measures, it is easy to show that any convex combination of coherent risk measures is also a coherent risk measure. Convex combinations of risk measures can be created directly:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"cvx_comb_measure = 0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()\nSDDP.adjust_probability(\n cvx_comb_measure,\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"As a special case, the SDDP.EAVaR risk-measure is a convex combination of SDDP.Expectation and SDDP.AVaR:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.EAVaR(beta=0.25, lambda=0.4)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.EAVaR","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.EAVaR","page":"Add a risk measure","title":"SDDP.EAVaR","text":"EAVaR(;lambda=1.0, beta=1.0)\n\nA risk measure that is a convex combination of Expectation and Average Value @ Risk (also called Conditional Value @ Risk).\n\n λ * E[x] + (1 - λ) * AV@R(β)[x]\n\nKeyword Arguments\n\nlambda: Convex weight on the expectation ((1-lambda) weight is put on the AV@R component. Inreasing values of lambda are less risk averse (more weight on expectation).\nbeta: The quantile at which to calculate the Average Value @ Risk. Increasing values of beta are less risk averse. If beta=0, then the AV@R component is the worst case risk measure.\n\n\n\n\n\n","category":"function"},{"location":"guides/add_a_risk_measure/#Distributionally-robust","page":"Add a risk measure","title":"Distributionally robust","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.jl supports two types of distributionally robust risk measures: the modified Χ² method of Philpott et al. (2018), and a method based on the Wasserstein distance metric.","category":"page"},{"location":"guides/add_a_risk_measure/#Modified-Chi-squard","page":"Add a risk measure","title":"Modified Chi-squard","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.ModifiedChiSquared","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.ModifiedChiSquared","page":"Add a risk measure","title":"SDDP.ModifiedChiSquared","text":"ModifiedChiSquared(radius::Float64; minimum_std=1e-5)\n\nThe distributionally robust SDDP risk measure of Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP. Computational Management Science (2018) 165:431-454.\n\nExplanation\n\nIn a Distributionally Robust Optimization (DRO) approach, we modify the probabilities we associate with all future scenarios so that the resulting probability distribution is the \"worst case\" probability distribution, in some sense.\n\nIn each backward pass we will compute a worst case probability distribution vector p. We compute p so that:\n\np ∈ argmax p'z\n s.t. [r; p - a] in SecondOrderCone()\n sum(p) == 1\n p >= 0\n\nwhere\n\nz is a vector of future costs. We assume that our aim is to minimize future cost p'z. If we maximize reward, we would have p ∈ argmin{p'z}.\na is the uniform distribution\nr is a user specified radius - the larger the radius, the more conservative the policy.\n\nNotes\n\nThe largest radius that will work with S scenarios is sqrt((S-1)/S).\n\nIf the uncorrected standard deviation of the objecive realizations is less than minimum_std, then the risk-measure will default to Expectation().\n\nThis code was contributed by Lea Kapelevich.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.ModifiedChiSquared(0.5),\n risk_adjusted_probability,\n [0.25, 0.25, 0.25, 0.25],\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Wasserstein","page":"Add a risk measure","title":"Wasserstein","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Wasserstein","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Wasserstein","page":"Add a risk measure","title":"SDDP.Wasserstein","text":"Wasserstein(norm::Function, solver_factory; alpha::Float64)\n\nA distributionally-robust risk measure based on the Wasserstein distance.\n\nAs alpha increases, the measure becomes more risk-averse. When alpha=0, the measure is equivalent to the expectation operator. As alpha increases, the measure approaches the Worst-case risk measure.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"import HiGHS\nSDDP.adjust_probability(\n SDDP.Wasserstein(HiGHS.Optimizer; alpha=0.5) do x, y\n return abs(x - y)\n end,\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Entropic","page":"Add a risk measure","title":"Entropic","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Entropic","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Entropic","page":"Add a risk measure","title":"SDDP.Entropic","text":"Entropic(γ::Float64)\n\nThe entropic risk measure as described by:\n\nDowson, O., Morton, D.P. & Pagnoncelli, B.K. Incorporating convex risk\nmeasures into multistage stochastic programming algorithms. Annals of\nOperations Research (2022). [doi](https://doi.org/10.1007/s10479-022-04977-w).\n\nAs γ increases, the measure becomes more risk-averse.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.Entropic(0.1),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"EditURL = \"infinite_horizon_trivial.jl\"","category":"page"},{"location":"examples/infinite_horizon_trivial/#Infinite-horizon-trivial","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"","category":"section"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"using SDDP, HiGHS, Test\n\nfunction infinite_trivial()\n graph = SDDP.Graph(\n :root_node,\n [:week],\n [(:root_node => :week, 1.0), (:week => :week, 0.9)],\n )\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n @variable(subproblem, state, SDDP.State, initial_value = 0)\n @constraint(subproblem, state.in == state.out)\n @stageobjective(subproblem, 2.0)\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 2.0 / (1 - 0.9) atol = 1e-3\n return\nend\n\ninfinite_trivial()","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"EditURL = \"air_conditioning.jl\"","category":"page"},{"location":"examples/air_conditioning/#Air-conditioning","page":"Air conditioning","title":"Air conditioning","text":"","category":"section"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Taken from Anthony Papavasiliou's notes on SDDP","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Consider the following problem","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Produce air conditioners for 3 months\n200 units/month at 100 $/unit\nOvertime costs 300 $/unit\nKnown demand of 100 units for period 1\nEqually likely demand, 100 or 300 units, for periods 2, 3\nStorage cost is 50 $/unit\nAll demand must be met","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"The known optimal solution is $62,500","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"using SDDP, HiGHS, Test\n\nfunction air_conditioning_model(duality_handler)\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(\n sp,\n 0 <= stored_production <= 100,\n Int,\n SDDP.State,\n initial_value = 0\n )\n @variable(sp, 0 <= production <= 200, Int)\n @variable(sp, overtime >= 0, Int)\n @variable(sp, demand)\n DEMAND = [[100.0], [100.0, 300.0], [100.0, 300.0]]\n SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, DEMAND[stage])\n @constraint(\n sp,\n stored_production.out ==\n stored_production.in + production + overtime - demand\n )\n @stageobjective(\n sp,\n 100 * production + 300 * overtime + 50 * stored_production.out\n )\n end\n SDDP.train(model; duality_handler = duality_handler)\n @test isapprox(SDDP.calculate_bound(model), 62_500.0, atol = 0.1)\n return\nend\n\nfor duality_handler in [SDDP.LagrangianDuality(), SDDP.ContinuousConicDuality()]\n air_conditioning_model(duality_handler)\nend","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"EditURL = \"sldp_example_two.jl\"","category":"page"},{"location":"examples/sldp_example_two/#SLDP:-example-2","page":"SLDP: example 2","title":"SLDP: example 2","text":"","category":"section"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"This example is derived from Section 4.3 of the paper: Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz Dynamic Programming. Optimization Online. PDF","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction sldp_example_two(; first_stage_integer::Bool = true, N = 2)\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = -100.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x[1:2] <= 5, SDDP.State, initial_value = 0.0)\n if t == 1\n if first_stage_integer\n @variable(sp, 0 <= u[1:2] <= 5, Int)\n @constraint(sp, [i = 1:2], u[i] == x[i].out)\n end\n @stageobjective(sp, -1.5 * x[1].out - 4 * x[2].out)\n else\n @variable(sp, 0 <= y[1:4] <= 1, Bin)\n @variable(sp, ω[1:2])\n @stageobjective(sp, -16 * y[1] - 19 * y[2] - 23 * y[3] - 28 * y[4])\n @constraint(\n sp,\n 2 * y[1] + 3 * y[2] + 4 * y[3] + 5 * y[4] <= ω[1] - x[1].in\n )\n @constraint(\n sp,\n 6 * y[1] + 1 * y[2] + 3 * y[3] + 2 * y[4] <= ω[2] - x[2].in\n )\n steps = range(5, stop = 15, length = N)\n SDDP.parameterize(sp, [[i, j] for i in steps for j in steps]) do φ\n return JuMP.fix.(ω, φ)\n end\n end\n end\n if get(ARGS, 1, \"\") == \"--write\"\n # Run `$ julia sldp_example_two.jl --write` to update the benchmark\n # model directory\n model_dir = joinpath(@__DIR__, \"..\", \"..\", \"..\", \"benchmarks\", \"models\")\n SDDP.write_to_file(\n model,\n joinpath(model_dir, \"sldp_example_two_$(N).sof.json.gz\");\n test_scenarios = 30,\n )\n return\n end\n SDDP.train(model; log_frequency = 10)\n bound = SDDP.calculate_bound(model)\n\n if N == 2\n Test.@test bound <= -57.0\n elseif N == 3\n Test.@test bound <= -59.33\n elseif N == 6\n Test.@test bound <= -61.22\n end\n return\nend\n\nsldp_example_two(N = 2)\nsldp_example_two(N = 3)\nsldp_example_two(N = 6)","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"EditURL = \"objective_states.jl\"","category":"page"},{"location":"tutorial/objective_states/#Objective-states","page":"Objective states","title":"Objective states","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"There are many applications in which we want to model a price process that follows some auto-regressive process. Common examples include stock prices on financial exchanges and spot-prices in energy markets.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"However, it is well known that these cannot be incorporated in to SDDP because they result in cost-to-go functions that are convex with respect to some state variables (e.g., the reservoir levels) and concave with respect to other state variables (e.g., the spot price in the current stage).","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"To overcome this problem, the approach in the literature has been to discretize the price process in order to model it using a Markovian policy graph like those discussed in Markovian policy graphs.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"However, recent work offers a way to include stagewise-dependent objective uncertainty into the objective function of SDDP subproblems. Readers are directed to the following works for an introduction:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Downward, A., Dowson, O., and Baucke, R. (2017). Stochastic dual dynamic programming with stagewise dependent objective uncertainty. Optimization Online. link\nDowson, O. PhD Thesis. University of Auckland, 2018. link","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"The method discussed in the above works introduces the concept of an objective state into SDDP. Unlike normal state variables in SDDP (e.g., the volume of water in the reservoir), the cost-to-go function is concave with respect to the objective states. Thus, the method builds an outer approximation of the cost-to-go function in the normal state-space, and an inner approximation of the cost-to-go function in the objective state-space.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"warning: Warning\nSupport for objective states in SDDP.jl is experimental. Models are considerably more computational intensive, the interface is less user-friendly, and there are subtle gotchas to be aware of. Only use this if you have read and understood the theory behind the method.","category":"page"},{"location":"tutorial/objective_states/#One-dimensional-objective-states","page":"Objective states","title":"One-dimensional objective states","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Let's assume that the fuel cost is not fixed, but instead evolves according to a multiplicative auto-regressive process: fuel_cost[t] = ω * fuel_cost[t-1], where ω is drawn from the sample space [0.75, 0.9, 1.1, 1.25] with equal probability.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"An objective state can be added to a subproblem using the SDDP.add_objective_state function. This can only be called once per subproblem. If you want to add a multi-dimensional objective state, read Multi-dimensional objective states. SDDP.add_objective_state takes a number of keyword arguments. The two required ones are","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"initial_value: the value of the objective state at the root node of the policy graph (i.e., identical to the initial_value when defining normal state variables.\nlipschitz: the Lipschitz constant of the cost-to-go function with respect to the objective state. In other words, this value is the maximum change in the cost-to-go function at any point in the state space, given a one-unit change in the objective state.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"There are also two optional keyword arguments: lower_bound and upper_bound, which give SDDP.jl hints (importantly, not constraints) about the domain of the objective state. Setting these bounds appropriately can improve the speed of convergence.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Finally, SDDP.add_objective_state requires an update function. This function takes two arguments. The first is the incoming value of the objective state, and the second is the realization of the stagewise-independent noise term (set using SDDP.parameterize). The function should return the value of the objective state to be used in the current subproblem.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This connection with the stagewise-independent noise term means that SDDP.parameterize must be called in a subproblem that defines an objective state. Inside SDDP.parameterize, the value of the objective state to be used in the current subproblem (i.e., after the update function), can be queried using SDDP.objective_state.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Here is the full model with the objective state.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n\n # Add an objective state. ω will be the same value that is called in\n # `SDDP.parameterize`.\n\n SDDP.add_objective_state(\n subproblem,\n initial_value = 50.0,\n lipschitz = 10_000.0,\n lower_bound = 50.0,\n upper_bound = 150.0,\n ) do fuel_cost, ω\n return ω.fuel * fuel_cost\n end\n\n # Create the cartesian product of a multi-dimensional random variable.\n\n Ω = [\n (fuel = f, inflow = w) for f in [0.75, 0.9, 1.1, 1.25] for\n w in [0.0, 50.0, 100.0]\n ]\n\n SDDP.parameterize(subproblem, Ω) do ω\n # Query the current fuel cost.\n fuel_cost = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, fuel_cost * thermal_generation)\n return JuMP.fix(inflow, ω.inflow)\n end\nend","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"After creating our model, we can train and simulate as usual.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"SDDP.train(model; run_numerical_stability_report = false)\n\nsimulations = SDDP.simulate(model, 1)\n\nprint(\"Finished training and simulating.\")","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"To demonstrate how the objective states are updated, consider the sequence of noise observations:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"[stage[:noise_term] for stage in simulations[1]]","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This, the fuel cost in the first stage should be 0.75 * 50 = 37.5. The fuel cost in the second stage should be 1.1 * 37.5 = 41.25. The fuel cost in the third stage should be 0.75 * 41.25 = 30.9375.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"To confirm this, the values of the objective state in a simulation can be queried using the :objective_state key.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"[stage[:objective_state] for stage in simulations[1]]","category":"page"},{"location":"tutorial/objective_states/#Multi-dimensional-objective-states","page":"Objective states","title":"Multi-dimensional objective states","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"You can construct multi-dimensional price processes using NTuples. Just replace every scalar value associated with the objective state by a tuple. For example, initial_value = 1.0 becomes initial_value = (1.0, 2.0).","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Here is an example:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n\n SDDP.add_objective_state(\n subproblem,\n initial_value = (50.0, 50.0),\n lipschitz = (10_000.0, 10_000.0),\n lower_bound = (50.0, 50.0),\n upper_bound = (150.0, 150.0),\n ) do fuel_cost, ω\n # fuel_cost is a tuple, containing the (fuel_cost[t-1], fuel_cost[t-2])\n # This function returns a new tuple containing\n # (fuel_cost[t], fuel_cost[t-1]). Thus, we need to compute the new\n # cost:\n new_cost = fuel_cost[1] + 0.5 * (fuel_cost[1] - fuel_cost[2]) + ω.fuel\n # And then return the appropriate tuple:\n return (new_cost, fuel_cost[1])\n end\n\n Ω = [\n (fuel = f, inflow = w) for f in [-10.0, -5.0, 5.0, 10.0] for\n w in [0.0, 50.0, 100.0]\n ]\n\n SDDP.parameterize(subproblem, Ω) do ω\n fuel_cost, _ = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, fuel_cost * thermal_generation)\n return JuMP.fix(inflow, ω.inflow)\n end\nend\n\nSDDP.train(model; run_numerical_stability_report = false)\n\nsimulations = SDDP.simulate(model, 1)\n\nprint(\"Finished training and simulating.\")","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This time, since our objective state is two-dimensional, the objective states are tuples with two elements:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"[stage[:objective_state] for stage in simulations[1]]","category":"page"},{"location":"tutorial/objective_states/#objective_state_warnings","page":"Objective states","title":"Warnings","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"There are number of things to be aware of when using objective states.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"The key assumption is that price is independent of the states and actions in the model.\nThat means that the price cannot appear in any @constraints. Nor can you use any @variables in the update function.\nChoosing an appropriate Lipschitz constant is difficult.\nThe points discussed in Choosing an initial bound are relevant. The Lipschitz constant should not be chosen as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen to small, it may cut of the feasible region and lead to a sub-optimal solution.\nYou need to ensure that the cost-to-go function is concave with respect to the objective state before the update.\nIf the update function is linear, this is always the case. In some situations, the update function can be nonlinear (e.g., multiplicative as we have above). In general, placing constraints on the price (e.g., clamp(price, 0, 1)) will destroy concavity. Caveat emptor. It's up to you if this is a problem. If it isn't you'll get a good heuristic with no guarantee of global optimality.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"EditURL = \"air_conditioning_forward.jl\"","category":"page"},{"location":"examples/air_conditioning_forward/#Training-with-a-different-forward-model","page":"Training with a different forward model","title":"Training with a different forward model","text":"","category":"section"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction create_air_conditioning_model(; convex::Bool)\n return SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x <= 100, SDDP.State, initial_value = 0)\n @variable(sp, 0 <= u_production <= 200)\n @variable(sp, u_overtime >= 0)\n if !convex\n set_integer(x.out)\n set_integer(u_production)\n set_integer(u_overtime)\n end\n @constraint(sp, demand, x.in - x.out + u_production + u_overtime == 0)\n Ω = [[100.0], [100.0, 300.0], [100.0, 300.0]]\n SDDP.parameterize(ω -> JuMP.set_normalized_rhs(demand, ω), sp, Ω[t])\n @stageobjective(sp, 100 * u_production + 300 * u_overtime + 50 * x.out)\n end\nend\n\nconvex = create_air_conditioning_model(; convex = true)\nnon_convex = create_air_conditioning_model(; convex = false)\nSDDP.train(\n convex;\n forward_pass = SDDP.AlternativeForwardPass(non_convex),\n post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex),\n iteration_limit = 10,\n)\nTest.@test isapprox(SDDP.calculate_bound(non_convex), 62_500.0, atol = 0.1)\nTest.@test isapprox(SDDP.calculate_bound(convex), 62_500.0, atol = 0.1)","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"EditURL = \"objective_state_newsvendor.jl\"","category":"page"},{"location":"examples/objective_state_newsvendor/#Newsvendor","page":"Newsvendor","title":"Newsvendor","text":"","category":"section"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"This example is based on the classical newsvendor problem, but features an AR(1) spot-price.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":" V(x[t-1], ω[t]) = max p[t] × u[t]\n subject to x[t] = x[t-1] - u[t] + ω[t]\n u[t] ∈ [0, 1]\n x[t] ≥ 0\n p[t] = p[t-1] + ϕ[t]","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"The initial conditions are","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"x[0] = 2.0\np[0] = 1.5\nω[t] ~ {0, 0.05, 0.10, ..., 0.45, 0.5} with uniform probability.\nϕ[t] ~ {-0.25, -0.125, 0.125, 0.25} with uniform probability.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"using SDDP, HiGHS, Statistics, Test\n\nfunction joint_distribution(; kwargs...)\n names = tuple([first(kw) for kw in kwargs]...)\n values = tuple([last(kw) for kw in kwargs]...)\n output_type = NamedTuple{names,Tuple{eltype.(values)...}}\n distribution = map(output_type, Base.product(values...))\n return distribution[:]\nend\n\nfunction newsvendor_example(; cut_type)\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(3),\n sense = :Max,\n upper_bound = 50.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n @variables(subproblem, begin\n x >= 0, (SDDP.State, initial_value = 2)\n 0 <= u <= 1\n w\n end)\n @constraint(subproblem, x.out == x.in - u + w)\n SDDP.add_objective_state(\n subproblem,\n initial_value = 1.5,\n lower_bound = 0.75,\n upper_bound = 2.25,\n lipschitz = 100.0,\n ) do y, ω\n return y + ω.price_noise\n end\n noise_terms = joint_distribution(\n demand = 0:0.05:0.5,\n price_noise = [-0.25, -0.125, 0.125, 0.25],\n )\n SDDP.parameterize(subproblem, noise_terms) do ω\n JuMP.fix(w, ω.demand)\n price = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, price * u)\n end\n end\n SDDP.train(\n model;\n log_frequency = 10,\n time_limit = 20.0,\n cut_type = cut_type,\n )\n @test SDDP.calculate_bound(model) ≈ 4.04 atol = 0.05\n results = SDDP.simulate(model, 500)\n objectives =\n [sum(s[:stage_objective] for s in simulation) for simulation in results]\n @test round(Statistics.mean(objectives); digits = 2) ≈ 4.04 atol = 0.1\n return\nend\n\nnewsvendor_example(cut_type = SDDP.SINGLE_CUT)\nnewsvendor_example(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"EditURL = \"arma.jl\"","category":"page"},{"location":"tutorial/arma/#Auto-regressive-stochastic-processes","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"SDDP.jl assumes that the random variable in each node is independent of the random variables in all other nodes. However, a common request is to model the random variables by some auto-regressive process.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"There are two ways to do this:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model the random variable as a Markov chain\nuse the \"state-space expansion\" trick","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"info: Info\nThis tutorial is in the context of a hydro-thermal scheduling example, but it should be apparent how the ideas transfer to other applications.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"using SDDP\nimport HiGHS","category":"page"},{"location":"tutorial/arma/#state-space-expansion","page":"Auto-regressive stochastic processes","title":"The state-space expansion trick","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"In An introduction to SDDP.jl, we assumed that the inflows were stagewise-independent. However, in many cases this is not correct, and inflow models are more accurately described by an auto-regressive process such as:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"inflow_t = inflow_t-1 + varepsilon","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Here varepsilon is a random variable, and the inflow in stage t is the inflow in stage t-1 plus varepsilon (which might be negative).","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"For simplicity, we omit any coefficients and other terms, but this could easily be extended to a model like","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"inflow_t = a times inflow_t-1 + b + varepsilon","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"In practice, you can estimate a distribution for varepsilon by fitting the chosen statistical model to historical data, and then using the empirical residuals.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"To implement the auto-regressive model in SDDP.jl, we introduce inflow as a state variable.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"tip: Tip\nOur rule of thumb for \"when is something a state variable?\" is: if you need the value of a variable from a previous stage to compute something in stage t, then that variable is a state variable.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, g_h + g_t == 150)\n c = [50, 100, 150]\n @stageobjective(sp, c[t] * g_t)\n # =========================================================================\n # New stuff below Here\n # Add inflow as a state\n @variable(sp, inflow, SDDP.State, initial_value = 50.0)\n # Add the random variable as a control variable\n @variable(sp, ε)\n # The equation describing our statistical model\n @constraint(sp, inflow.out == inflow.in + ε)\n # The new water balance constraint using the state variable\n @constraint(sp, x.out == x.in - g_h - s + inflow.out)\n # Assume we have some empirical residuals:\n Ω = [-10.0, 0.1, 9.6]\n SDDP.parameterize(sp, Ω) do ω\n return JuMP.fix(ε, ω)\n end\nend","category":"page"},{"location":"tutorial/arma/#When-can-this-trick-be-used?","page":"Auto-regressive stochastic processes","title":"When can this trick be used?","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The state-space expansion trick should be used when:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The random variable appears additively in the objective or in the constraints. Something like inflow * decision_variable will not work.\nThe statistical model is linear, or can be written using the JuMP @constraint macro.\nThe dimension of the random variable is small (see Vector auto-regressive models for the multi-variate case).","category":"page"},{"location":"tutorial/arma/#The-Markov-chain-approach","page":"Auto-regressive stochastic processes","title":"The Markov chain approach","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"In the Markov chain approach, we model the stochastic process for inflow by a discrete Markov chain. Markov chains are nodes with transition probabilities between the nodes. SDDP.jl has good support for solving problems in which the uncertainty is formulated as a Markov chain.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The first step of the Markov chain approach is to write a function which simulates the stochastic process. Here is a simulator for our inflow model:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"function simulator()\n inflow = zeros(3)\n current = 50.0\n Ω = [-10.0, 0.1, 9.6]\n for t in 1:3\n current += rand(Ω)\n inflow[t] = current\n end\n return inflow\nend","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"When called with no arguments, it produces a vector of inflows:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"simulator()","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"warning: Warning\nThe simulator must return a Vector{Float64}, so it is limited to a uni-variate random variable. It is possible to do something similar for multi-variate random variable, but you'll have to manually construct the Markov transition matrix, and solution times scale poorly, even in the two-dimensional case.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The next step is to call SDDP.MarkovianGraph with our simulator. This function will attempt to fit a Markov chain to the stochastic process produced by your simulator. There are two key arguments:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"budget is the total number of nodes we want in the Markov chain\nscenarios is a limit on the number of times we can call simulator","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Here we can see we have created a MarkovianGraph with nodes like (2, 59.7). The first element of each node is the stage, and the second element is the inflow.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Create a SDDP.PolicyGraph using graph as follows:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model = SDDP.PolicyGraph(\n graph, # <--- New stuff\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n t, inflow = node # <--- New stuff\n @variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, g_h + g_t == 150)\n c = [50, 100, 150]\n @stageobjective(sp, c[t] * g_t)\n # The new water balance constraint using the node:\n @constraint(sp, x.out == x.in - g_h - s + inflow)\nend","category":"page"},{"location":"tutorial/arma/#When-can-this-trick-be-used?-2","page":"Auto-regressive stochastic processes","title":"When can this trick be used?","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The Markov chain approach should be used when:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The random variable is uni-variate\nThe random variable appears in the objective function or as a variable coefficient in the constraint matrix\nIt's non-trivial to write the stochastic process as a series of constraints (for example, it uses nonlinear terms)\nThe number of nodes is modest (for example, a budget of hundreds, up to perhaps 1000)","category":"page"},{"location":"tutorial/arma/#Vector-auto-regressive-models","page":"Auto-regressive stochastic processes","title":"Vector auto-regressive models","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The state-space expansion section assumed that the random variable was uni-variate. However, the approach naturally extends to vector auto-regressive models. For example, if inflow is a 2-dimensional vector, then we can model a vector auto-regressive model to it as follows:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"inflow_t = A times inflow_t-1 + b + varepsilon","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Here A is a 2-by-2 matrix, and b and varepsilon are 2-by-1 vectors.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, g_h + g_t == 150)\n c = [50, 100, 150]\n @stageobjective(sp, c[t] * g_t)\n # =========================================================================\n # New stuff below Here\n # Add inflow as a state\n @variable(sp, inflow[1:2], SDDP.State, initial_value = 50.0)\n # Add the random variable as a control variable\n @variable(sp, ε[1:2])\n # The equation describing our statistical model\n A = [0.8 0.2; 0.2 0.8]\n @constraint(\n sp,\n [i = 1:2],\n inflow[i].out == sum(A[i, j] * inflow[j].in for j in 1:2) + ε[i],\n )\n # The new water balance constraint using the state variable\n @constraint(sp, x.out == x.in - g_h - s + inflow[1].out + inflow[2].out)\n # Assume we have some empirical residuals:\n Ω₁ = [-10.0, 0.1, 9.6]\n Ω₂ = [-10.0, 0.1, 9.6]\n Ω = [(ω₁, ω₂) for ω₁ in Ω₁ for ω₂ in Ω₂]\n SDDP.parameterize(sp, Ω) do ω\n JuMP.fix(ε[1], ω[1])\n JuMP.fix(ε[2], ω[2])\n return\n end\nend","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"EditURL = \"mdps.jl\"","category":"page"},{"location":"tutorial/mdps/#Example:-Markov-Decision-Processes","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"","category":"section"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.jl can be used to solve a variety of Markov Decision processes. If the problem has continuous state and control spaces, and the objective and transition function are convex, then SDDP.jl can find a globally optimal policy. In other cases, SDDP.jl will find a locally optimal policy.","category":"page"},{"location":"tutorial/mdps/#A-simple-example","page":"Example: Markov Decision Processes","title":"A simple example","text":"","category":"section"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"A simple demonstration of this is the example taken from page 98 of the book \"Markov Decision Processes: Discrete stochastic Dynamic Programming\", by Martin L. Putterman.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The example, as described in Section 4.6.3 of the book, is to minimize a sum of squares of N non-negative variables, subject to a budget constraint that the variable values add up to M. Put mathematically, that is:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"beginaligned\nmin sumlimits_i=1^N x_i^2 \nst sumlimits_i=1^N x_i = M \n x_i ge 0 quad i in 1ldotsN\nendaligned","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The optimal objective value is M^2N, and the optimal solution is x_i = M N, which can be shown by induction.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"This can be reformulated as a Markov Decision Process by introducing a state variable, s, which tracks the un-spent budget over N stages.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"beginaligned\nV_t(s) = min x^2 + V_t+1(s^prime) \nst s^prime = s - x \n x le s \n x ge 0 \n s ge 0\nendaligned","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"and in the last stage V_N, there is an additional constraint that s^prime = 0.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The budget of M is computed by solving for V_1(M).","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"info: Info\nSince everything here is continuous and convex, SDDP.jl will find the globally optimal policy.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"If the reformulation from the single problem into the recursive form of the Markov Decision Process is not obvious, consult Putterman's book.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"We can model and solve this problem using SDDP.jl as follows:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"using SDDP\nimport Ipopt\n\nM, N = 5, 3\n\nmodel = SDDP.LinearPolicyGraph(\n stages = N,\n lower_bound = 0.0,\n optimizer = Ipopt.Optimizer,\n) do subproblem, node\n @variable(subproblem, s >= 0, SDDP.State, initial_value = M)\n @variable(subproblem, x >= 0)\n @stageobjective(subproblem, x^2)\n @constraint(subproblem, x <= s.in)\n @constraint(subproblem, s.out == s.in - x)\n if node == N\n fix(s.out, 0.0; force = true)\n end\n return\nend\n\nSDDP.train(model)","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Check that we got the theoretical optimum:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.calculate_bound(model), M^2 / N","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"And check that we found the theoretical value for each x_i:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"simulations = SDDP.simulate(model, 1, [:x])\nfor data in simulations[1]\n println(\"x_$(data[:node_index]) = $(data[:x])\")\nend","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Close enough! We don't get exactly 5/3 because of numerical tolerances within our choice of optimization solver (in this case, Ipopt).","category":"page"},{"location":"tutorial/mdps/#A-more-complicated-policy","page":"Example: Markov Decision Processes","title":"A more complicated policy","text":"","category":"section"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.jl is also capable of finding policies for other types of Markov Decision Processes. A classic example of a Markov Decision Process is the problem of finding a path through a maze.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Here's one example of a maze. Try changing the parameters to explore different mazes:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"M, N = 3, 4\ninitial_square = (1, 1)\nreward, illegal_squares, penalties = (3, 4), [(2, 2)], [(3, 1), (2, 4)]\npath = fill(\"⋅\", M, N)\npath[initial_square...] = \"1\"\nfor (k, v) in (illegal_squares => \"▩\", penalties => \"†\", [reward] => \"*\")\n for (i, j) in k\n path[i, j] = v\n end\nend\nprint(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\\n'))","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Our goal is to get from square 1 to square *. If we step on a †, we incur a penalty of 1. Squares with ▩ are blocked; we cannot move there.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"There are a variety of ways that we can solve this problem. We're going to solve it using a stationary binary stochastic programming formulation.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Our state variable will be a matrix of binary variables x_ij, where each element is 1 if the agent is in the square and 0 otherwise. In each period, we incur a reward of 1 if we are in the reward square and a penalty of -1 if we are in a penalties square. We cannot move to the illegal_squares, so those x_ij = 0. Feasibility between moves is modelled by constraints of the form:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"x^prime_ij le sumlimits_(ab)in P x_ab","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"where P is the set of squares from which it is valid to move from (a, b) to (i, j).","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Because we are looking for a stationary policy, we need a unicyclic graph with a discount factor:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"discount_factor = 0.9\ngraph = SDDP.UnicyclicGraph(discount_factor)","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Then we can formulate our full model:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"import HiGHS\n\nmodel = SDDP.PolicyGraph(\n graph;\n sense = :Max,\n upper_bound = 1 / (1 - discount_factor),\n optimizer = HiGHS.Optimizer,\n) do sp, _\n # Our state is a binary variable for each square\n @variable(\n sp,\n x[i = 1:M, j = 1:N],\n Bin,\n SDDP.State,\n initial_value = (i, j) == initial_square,\n )\n # Can only be in one square at a time\n @constraint(sp, sum(x[i, j].out for i in 1:M, j in 1:N) == 1)\n # Incur rewards and penalties\n @stageobjective(\n sp,\n x[reward...].out - sum(x[i, j].out for (i, j) in penalties)\n )\n # Some squares are illegal\n @constraint(sp, [(i, j) in illegal_squares], x[i, j].out <= 0)\n # Constraints on valid moves\n for i in 1:M, j in 1:N\n moves = [(i - 1, j), (i + 1, j), (i, j), (i, j + 1), (i, j - 1)]\n filter!(v -> 1 <= v[1] <= M && 1 <= v[2] <= N, moves)\n @constraint(sp, x[i, j].out <= sum(x[a, b].in for (a, b) in moves))\n end\n return\nend","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The upper bound is obtained by assuming that we reach the reward square in one move and stay there.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"warning: Warning\nSince there are discrete decisions here, SDDP.jl is not guaranteed to find the globally optimal policy.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.train(model)","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Simulating a cyclic policy graph requires an explicit sampling_scheme that does not terminate early based on the cycle probability:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"simulations = SDDP.simulate(\n model,\n 1,\n [:x];\n sampling_scheme = SDDP.InSampleMonteCarlo(\n max_depth = 5,\n terminate_on_dummy_leaf = false,\n ),\n);\n","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Fill in the path with the time-step in which we visit the square:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"for (t, data) in enumerate(simulations[1]), i in 1:M, j in 1:N\n if data[:x][i, j].in > 0.5\n path[i, j] = \"$t\"\n end\nend\n\nprint(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\\n'))","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"tip: Tip\nThis formulation will likely struggle as the number of cells in the maze increases. Can you think of an equivalent formulation that uses fewer state variables?","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"EditURL = \"Hydro_thermal.jl\"","category":"page"},{"location":"examples/Hydro_thermal/#Hydro-thermal-scheduling","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/Hydro_thermal/#Problem-Description","page":"Hydro-thermal scheduling","title":"Problem Description","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"In a hydro-thermal problem, the agent controls a hydro-electric generator and reservoir. Each time period, they need to choose a generation quantity from thermal g_t, and hydro g_h, in order to meet demand w_d, which is a stagewise-independent random variable. The state variable, x, is the quantity of water in the reservoir at the start of each time period, and it has a minimum level of 5 units and a maximum level of 15 units. We assume that there are 10 units of water in the reservoir at the start of time, so that x_0 = 10. The state-variable is connected through time by the water balance constraint: x.out = x.in - g_h - s + w_i, where x.out is the quantity of water at the end of the time period, x.in is the quantity of water at the start of the time period, s is the quantity of water spilled from the reservoir, and w_i is a stagewise-independent random variable that represents the inflow into the reservoir during the time period.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"We assume that there are three stages, t=1, 2, 3, representing summer-fall, winter, and spring, and that we are solving this problem in an infinite-horizon setting with a discount factor of 0.95.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"In each stage, the agent incurs the cost of spillage, plus the cost of thermal generation. We assume that the cost of thermal generation is dependent on the stage t = 1, 2, 3, and that in each stage, w is drawn from the set (w_i, w_d) = {(0, 7.5), (3, 5), (10, 2.5)} with equal probability.","category":"page"},{"location":"examples/Hydro_thermal/#Importing-packages","page":"Hydro-thermal scheduling","title":"Importing packages","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"For this example, in addition to SDDP, we need HiGHS as a solver and Statisitics to compute the mean of our simulations.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"using HiGHS\nusing SDDP\nusing Statistics","category":"page"},{"location":"examples/Hydro_thermal/#Constructing-the-policy-graph","page":"Hydro-thermal scheduling","title":"Constructing the policy graph","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"There are three stages in our infinite-horizon problem, so we construct a unicyclic policy graph using SDDP.UnicyclicGraph:","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"graph = SDDP.UnicyclicGraph(0.95; num_nodes = 3)","category":"page"},{"location":"examples/Hydro_thermal/#Constructing-the-model","page":"Hydro-thermal scheduling","title":"Constructing the model","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Much of the macro code (i.e., lines starting with @) in the first part of the following should be familiar to users of JuMP.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Inside the do-end block, sp is a standard JuMP model, and t is an index for the state variable that will be called with t = 1, 2, 3.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"The state variable x, constructed by passing the SDDP.State tag to @variable is actually a Julia struct with two fields: x.in and x.out corresponding to the incoming and outgoing state variables respectively. Both x.in and x.out are standard JuMP variables. The initial_value keyword provides the value of the state variable in the root node (i.e., x_0).","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Compared to a JuMP model, one key difference is that we use @stageobjective instead of @objective. The SDDP.parameterize function takes a list of supports for w and parameterizes the JuMP model sp by setting the right-hand sides of the appropriate constraints (note how the constraints initially have a right-hand side of 0). By default, it is assumed that the realizations have uniform probability, but a probability mass vector can also be provided.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"model = SDDP.PolicyGraph(\n graph,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 5 <= x <= 15, SDDP.State, initial_value = 10)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, balance, x.out - x.in + g_h + s == 0)\n @constraint(sp, demand, g_h + g_t == 0)\n @stageobjective(sp, s + t * g_t)\n SDDP.parameterize(sp, [[0, 7.5], [3, 5], [10, 2.5]]) do w\n set_normalized_rhs(balance, w[1])\n return set_normalized_rhs(demand, w[2])\n end\nend","category":"page"},{"location":"examples/Hydro_thermal/#Training-the-policy","page":"Hydro-thermal scheduling","title":"Training the policy","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Once a model has been constructed, the next step is to train the policy. This can be achieved using SDDP.train. There are many options that can be passed, but iteration_limit terminates the training after the prescribed number of SDDP iterations.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"SDDP.train(model, iteration_limit = 100)","category":"page"},{"location":"examples/Hydro_thermal/#Simulating-the-policy","page":"Hydro-thermal scheduling","title":"Simulating the policy","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"After training, we can simulate the policy using SDDP.simulate.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"sims = SDDP.simulate(model, 100, [:g_t])\nmu = round(mean([s[1][:g_t] for s in sims]), digits = 2)\nprintln(\"On average, $(mu) units of thermal are used in the first stage.\")","category":"page"},{"location":"examples/Hydro_thermal/#Extracting-the-water-values","page":"Hydro-thermal scheduling","title":"Extracting the water values","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"V = SDDP.ValueFunction(model[1])\ncost, price = SDDP.evaluate(V, x = 10)","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"EditURL = \"hydro_valley.jl\"","category":"page"},{"location":"examples/hydro_valley/#Hydro-valleys","page":"Hydro valleys","title":"Hydro valleys","text":"","category":"section"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"This problem is a version of the hydro-thermal scheduling problem. The goal is to operate two hydro-dams in a valley chain over time in the face of inflow and price uncertainty.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"Turbine response curves are modelled by piecewise linear functions which map the flow rate into a power. These can be controlled by specifying the breakpoints in the piecewise linear function as the knots in the Turbine struct.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"The model can be created using the hydro_valley_model function. It has a few keyword arguments to allow automated testing of the library. hasstagewiseinflows determines if the RHS noise constraint should be added. hasmarkovprice determines if the price uncertainty (modelled by a Markov chain) should be added.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"In the third stage, the Markov chain has some unreachable states to test some code-paths in the library.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"We can also set the sense to :Min or :Max (the objective and bound are flipped appropriately).","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"using SDDP, HiGHS, Test, Random\n\nstruct Turbine\n flowknots::Vector{Float64}\n powerknots::Vector{Float64}\nend\n\nstruct Reservoir\n min::Float64\n max::Float64\n initial::Float64\n turbine::Turbine\n spill_cost::Float64\n inflows::Vector{Float64}\nend\n\nfunction hydro_valley_model(;\n hasstagewiseinflows::Bool = true,\n hasmarkovprice::Bool = true,\n sense::Symbol = :Max,\n)\n valley_chain = [\n Reservoir(\n 0,\n 200,\n 200,\n Turbine([50, 60, 70], [55, 65, 70]),\n 1000,\n [0, 20, 50],\n ),\n Reservoir(\n 0,\n 200,\n 200,\n Turbine([50, 60, 70], [55, 65, 70]),\n 1000,\n [0, 0, 20],\n ),\n ]\n\n turbine(i) = valley_chain[i].turbine\n\n # Prices[t, Markov state]\n prices = [\n 1 2 0\n 2 1 0\n 3 4 0\n ]\n\n # Transition matrix\n if hasmarkovprice\n transition =\n Array{Float64,2}[[1.0]', [0.6 0.4], [0.6 0.4 0.0; 0.3 0.7 0.0]]\n else\n transition = [ones(Float64, (1, 1)) for t in 1:3]\n end\n\n flipobj = (sense == :Max) ? 1.0 : -1.0\n lower = (sense == :Max) ? -Inf : -1e6\n upper = (sense == :Max) ? 1e6 : Inf\n\n N = length(valley_chain)\n\n # Initialise SDDP Model\n return m = SDDP.MarkovianPolicyGraph(\n sense = sense,\n lower_bound = lower,\n upper_bound = upper,\n transition_matrices = transition,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n t, markov_state = node\n\n # ------------------------------------------------------------------\n # SDDP State Variables\n # Level of upper reservoir\n @variable(\n subproblem,\n valley_chain[r].min <= reservoir[r = 1:N] <= valley_chain[r].max,\n SDDP.State,\n initial_value = valley_chain[r].initial\n )\n\n # ------------------------------------------------------------------\n # Additional variables\n @variables(\n subproblem,\n begin\n outflow[r = 1:N] >= 0\n spill[r = 1:N] >= 0\n inflow[r = 1:N] >= 0\n generation_quantity >= 0 # Total quantity of water\n # Proportion of levels to dispatch on\n 0 <=\n dispatch[r = 1:N, level = 1:length(turbine(r).flowknots)] <=\n 1\n rainfall[i = 1:N]\n end\n )\n\n # ------------------------------------------------------------------\n # Constraints\n @constraints(\n subproblem,\n begin\n # flow from upper reservoir\n reservoir[1].out ==\n reservoir[1].in + inflow[1] - outflow[1] - spill[1]\n\n # other flows\n flow[i = 2:N],\n reservoir[i].out ==\n reservoir[i].in + inflow[i] - outflow[i] - spill[i] +\n outflow[i-1] +\n spill[i-1]\n\n # Total quantity generated\n generation_quantity == sum(\n turbine(r).powerknots[level] * dispatch[r, level] for\n r in 1:N for level in 1:length(turbine(r).powerknots)\n )\n\n # ------------------------------------------------------------------\n # Flow out\n turbineflow[r = 1:N],\n outflow[r] == sum(\n turbine(r).flowknots[level] * dispatch[r, level] for\n level in 1:length(turbine(r).flowknots)\n )\n\n # Dispatch combination of levels\n dispatched[r = 1:N],\n sum(\n dispatch[r, level] for\n level in 1:length(turbine(r).flowknots)\n ) <= 1\n end\n )\n\n # rainfall noises\n if hasstagewiseinflows && t > 1 # in future stages random inflows\n @constraint(subproblem, inflow_noise[i = 1:N], inflow[i] <= rainfall[i])\n\n SDDP.parameterize(\n subproblem,\n [\n (valley_chain[1].inflows[i], valley_chain[2].inflows[i]) for i in 1:length(transition)\n ],\n ) do ω\n for i in 1:N\n JuMP.fix(rainfall[i], ω[i])\n end\n end\n else # in the first stage deterministic inflow\n @constraint(\n subproblem,\n initial_inflow_noise[i = 1:N],\n inflow[i] <= valley_chain[i].inflows[1]\n )\n end\n\n # ------------------------------------------------------------------\n # Objective Function\n if hasmarkovprice\n @stageobjective(\n subproblem,\n flipobj * (\n prices[t, markov_state] * generation_quantity -\n sum(valley_chain[i].spill_cost * spill[i] for i in 1:N)\n )\n )\n else\n @stageobjective(\n subproblem,\n flipobj * (\n prices[t, 1] * generation_quantity -\n sum(valley_chain[i].spill_cost * spill[i] for i in 1:N)\n )\n )\n end\n end\nend\n\nfunction test_hydro_valley_model()\n\n # For repeatability\n Random.seed!(11111)\n\n # deterministic\n deterministic_model =\n hydro_valley_model(hasmarkovprice = false, hasstagewiseinflows = false)\n SDDP.train(\n deterministic_model,\n iteration_limit = 10,\n cut_deletion_minimum = 1,\n print_level = 0,\n )\n @test SDDP.calculate_bound(deterministic_model) ≈ 835.0 atol = 1e-3\n\n # stagewise inflows\n stagewise_model = hydro_valley_model(hasmarkovprice = false)\n SDDP.train(stagewise_model, iteration_limit = 20, print_level = 0)\n @test SDDP.calculate_bound(stagewise_model) ≈ 838.33 atol = 1e-2\n\n # Markov prices\n markov_model = hydro_valley_model(hasstagewiseinflows = false)\n SDDP.train(markov_model, iteration_limit = 10, print_level = 0)\n @test SDDP.calculate_bound(markov_model) ≈ 851.8 atol = 1e-2\n\n # stagewise inflows and Markov prices\n markov_stagewise_model =\n hydro_valley_model(hasstagewiseinflows = true, hasmarkovprice = true)\n SDDP.train(markov_stagewise_model, iteration_limit = 10, print_level = 0)\n @test SDDP.calculate_bound(markov_stagewise_model) ≈ 855.0 atol = 1.0\n\n # risk averse stagewise inflows and Markov prices\n riskaverse_model = hydro_valley_model()\n SDDP.train(\n riskaverse_model,\n risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.66),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(riskaverse_model) ≈ 828.157 atol = 1.0\n\n # stagewise inflows and Markov prices\n worst_case_model = hydro_valley_model(sense = :Min)\n SDDP.train(\n worst_case_model,\n risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.0),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(worst_case_model) ≈ -780.867 atol = 1.0\n\n # stagewise inflows and Markov prices\n cutselection_model = hydro_valley_model()\n SDDP.train(\n cutselection_model,\n iteration_limit = 10,\n print_level = 0,\n cut_deletion_minimum = 2,\n )\n @test SDDP.calculate_bound(cutselection_model) ≈ 855.0 atol = 1.0\n\n # Distributionally robust Optimization\n dro_model = hydro_valley_model(hasmarkovprice = false)\n SDDP.train(\n dro_model,\n risk_measure = SDDP.ModifiedChiSquared(sqrt(2 / 3) - 1e-6),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(dro_model) ≈ 835.0 atol = 1.0\n\n dro_model = hydro_valley_model(hasmarkovprice = false)\n SDDP.train(\n dro_model,\n risk_measure = SDDP.ModifiedChiSquared(1 / 6),\n iteration_limit = 20,\n print_level = 0,\n )\n @test SDDP.calculate_bound(dro_model) ≈ 836.695 atol = 1.0\n # (Note) radius ≈ sqrt(2/3), will set all noise probabilities to zero except the worst case noise\n # (Why?):\n # The distance from the uniform distribution (the assumed \"true\" distribution)\n # to a corner of a unit simplex is sqrt(S-1)/sqrt(S) if we have S scenarios. The corner\n # of a unit simplex is just a unit vector, i.e.: [0 ... 0 1 0 ... 0]. With this probability\n # vector, only one noise has a non-zero probablity.\n # In the worst case rhsnoise (0 inflows) the profit is:\n # Reservoir1: 70 * $3 + 70 * $2 + 65 * $1 +\n # Reservoir2: 70 * $3 + 70 * $2 + 70 * $1\n ### = $835\nend\n\ntest_hydro_valley_model()","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/#Add-noise-in-the-constraint-matrix","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"","category":"section"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"SDDP.jl supports coefficients in the constraint matrix through the JuMP.set_normalized_coefficient function.","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"julia> model = SDDP.LinearPolicyGraph(\n stages=3, lower_bound = 0, optimizer = HiGHS.Optimizer\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n @constraint(subproblem, emissions, 1x.out <= 1)\n SDDP.parameterize(subproblem, [0.2, 0.5, 1.0]) do ω\n JuMP.set_normalized_coefficient(emissions, x.out, ω)\n println(emissions)\n end\n @stageobjective(subproblem, -x.out)\n end\nA policy graph with 3 nodes.\n Node indices: 1, 2, 3\n\njulia> SDDP.simulate(model, 1);\nemissions : x_out <= 1\nemissions : 0.2 x_out <= 1\nemissions : 0.5 x_out <= 1","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"note: Note\nJuMP will normalize constraints by moving all variables to the left-hand side. Thus, @constraint(model, 0 <= 1 - x.out) becomes x.out <= 1. JuMP.set_normalized_coefficient sets the coefficient on the normalized constraint.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"EditURL = \"risk.jl\"","category":"page"},{"location":"explanation/risk/#Risk-aversion","page":"Risk aversion","title":"Risk aversion","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"(Image: ) (Image: )","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"In Introductory theory, we implemented a basic version of the SDDP algorithm. This tutorial extends that implementation to add risk-aversion.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Packages","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This tutorial uses the following packages. For clarity, we call import PackageName so that we must prefix PackageName. to all functions and structs provided by that package. Everything not prefixed is either part of base Julia, or we wrote it.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"import ForwardDiff\nimport HiGHS\nimport Ipopt\nimport JuMP\nimport Statistics","category":"page"},{"location":"explanation/risk/#Risk-aversion:-what-and-why?","page":"Risk aversion","title":"Risk aversion: what and why?","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Often, the agents making decisions in complex systems are risk-averse, that is, they care more about avoiding very bad outcomes, than they do about having a good average outcome.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"As an example, consumers in a hydro-thermal problem may be willing to pay a slightly higher electricity price on average, if it means that there is a lower probability of blackouts.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Risk aversion in multistage stochastic programming has been well studied in the academic literature, and is widely used in production implementations around the world.","category":"page"},{"location":"explanation/risk/#Risk-measures","page":"Risk aversion","title":"Risk measures","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"One way to add risk aversion to models is to use a risk measure. A risk measure is a function that maps a random variable to a real number.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"You are probably already familiar with lots of different risk measures. For example, the mean, median, mode, and maximum are all risk measures.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We call the act of applying a risk measure to a random variable \"computing the risk\" of a random variable.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"To keep things simple, and because we need it for SDDP, we restrict our attention to random variables Z with a finite sample space Omega and positive probabilities p_omega for all omega in Omega. We denote the realizations of Z by Z(omega) = z_omega.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"A risk measure, mathbbFZ, is a convex risk measure if it satisfies the following axioms:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Axiom 1: monotonicity","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Given two random variables Z_1 and Z_2, with Z_1 le Z_2 almost surely, then mathbbFZ_1 le FZ_2.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Axiom 2: translation equivariance","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Given two random variables Z_1 and Z_2, then for all a in mathbbR, mathbbFZ + a = mathbbFZ + a.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Axiom 3: convexity","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Given two random variables Z_1 and Z_2, then for all a in 0 1,","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFa Z_1 + (1 - a) Z_2 le a mathbbFZ_1 + (1-a)mathbbFZ_2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now we know what a risk measure is, let's see how we can use them to form risk-averse decision rules.","category":"page"},{"location":"explanation/risk/#Risk-averse-decision-rules:-Part-I","page":"Risk aversion","title":"Risk-averse decision rules: Part I","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We started this tutorial by explaining that we are interested in risk aversion because some agents are risk-averse. What that really means, is that they want a policy that is also risk-averse. The question then becomes, how do we create risk-averse decision rules and policies?","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Recall from Introductory theory that we can form an optimal decision rule using the recursive formulation:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If we can replace the expectation operator mathbbE with another (more risk-averse) risk measure mathbbF, then our decision rule will attempt to choose a control decision now that minimizes the risk of the future costs, as opposed to the expectation of the future costs. This makes our decisions more risk-averse, because we care more about the worst outcomes than we do about the average.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Therefore, we can form a risk-averse decision rule using the formulation:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"To convert this problem into a tractable equivalent, we apply Kelley's algorithm to the risk-averse cost-to-go term mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi), to obtain the approximated problem:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)quad k=1ldotsK\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nNote how we need to explicitly compute a risk-averse subgradient! (We need a subgradient because the function might not be differentiable.) When constructing cuts with the expectation operator in Introductory theory, we implicitly used the law of total expectation to combine the two expectations; we can't do that for a general risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"tip: Homework challenge\nIf it's not obvious why we can use Kelley's here, try to use the axioms of a convex risk measure to show that mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi) is a convex function w.r.t. x^prime if V_j is also a convex function.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Our challenge is now to find a way to compute the risk-averse cost-to-go function mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right, and a way to compute a subgradient of the risk-averse cost-to-go function with respect to x^prime.","category":"page"},{"location":"explanation/risk/#Primal-risk-measures","page":"Risk aversion","title":"Primal risk measures","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now we know what a risk measure is, and how we will use it, let's implement some code to see how we can compute the risk of some random variables.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"note: Note\nWe're going to start by implementing the primal version of each risk measure. We implement the dual version in the next section.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"First, we need some data:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Z = [1.0, 2.0, 3.0, 4.0]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"with probabilities:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"p = [0.1, 0.2, 0.4, 0.3]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We're going to implement a number of different risk measures, so to leverage Julia's multiple dispatch, we create an abstract type:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"abstract type AbstractRiskMeasure end","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and function to overload:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"\"\"\"\n primal_risk(F::AbstractRiskMeasure, Z::Vector{<:Real}, p::Vector{Float64})\n\nUse `F` to compute the risk of the random variable defined by a vector of costs\n`Z` and non-zero probabilities `p`.\n\"\"\"\nfunction primal_risk end","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"note: Note\nWe want Vector{<:Real} instead of Vector{Float64} because we're going to automatically differentiate this function in the next section.","category":"page"},{"location":"explanation/risk/#Expectation","page":"Risk aversion","title":"Expectation","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The expectation, mathbbE, also called the mean or the average, is the most widely used convex risk measure. The expectation of a random variable is just the sum of Z weighted by the probability:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFZ = mathbbE_pZ = sumlimits_omegainOmega p_omega z_omega","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct Expectation <: AbstractRiskMeasure end\n\nfunction primal_risk(::Expectation, Z::Vector{<:Real}, p::Vector{Float64})\n return sum(p[i] * Z[i] for i in 1:length(p))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Let's try it out:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk(Expectation(), Z, p)","category":"page"},{"location":"explanation/risk/#WorstCase","page":"Risk aversion","title":"WorstCase","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The worst-case risk measure, also called the maximum, is another widely used convex risk measure. This risk measure doesn't care about the probability vector p, only the cost vector Z:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFZ = maxZ = maxlimits_omegainOmega z_omega","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct WorstCase <: AbstractRiskMeasure end\n\nfunction primal_risk(::WorstCase, Z::Vector{<:Real}, ::Vector{Float64})\n return maximum(Z)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Let's try it out:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk(WorstCase(), Z, p)","category":"page"},{"location":"explanation/risk/#Entropic","page":"Risk aversion","title":"Entropic","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"A more interesting, and less widely used risk measure is the entropic risk measure. The entropic risk measure is parameterized by a value gamma 0, and computes the risk of a random variable as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbF_gammaZ = frac1gammalogleft(mathbbE_pe^gamma Zright) = frac1gammalogleft(sumlimits_omegainOmegap_omega e^gamma z_omegaright)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"tip: Homework challenge\nProve that the entropic risk measure satisfies the three axioms of a convex risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct Entropic <: AbstractRiskMeasure\n γ::Float64\n function Entropic(γ)\n if !(γ > 0)\n throw(DomainError(γ, \"Entropic risk measure must have γ > 0.\"))\n end\n return new(γ)\n end\nend\n\nfunction primal_risk(F::Entropic, Z::Vector{<:Real}, p::Vector{Float64})\n return 1 / F.γ * log(sum(p[i] * exp(F.γ * Z[i]) for i in 1:length(p)))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nexp(x) overflows when x 709. Therefore, if we are passed a vector of Float64, use arbitrary precision arithmetic with big.(Z).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function primal_risk(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n return Float64(primal_risk(F, big.(Z), p))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Let's try it out for different values of gamma:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1_000.0]\n println(\"γ = $(γ), F[Z] = \", primal_risk(Entropic(γ), Z, p))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nThe entropic has two extremes. As gamma rightarrow 0, the entropic acts like the expectation risk measure, and as gamma rightarrow infty, the entropic acts like the worst-case risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Computing risk measures this way works well for computing the primal value. However, there isn't an obvious way to compute a subgradient of the risk-averse cost-to-go function, which we need for our cut calculation.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"There is a nice solution to this problem, and that is to use the dual representation of a risk measure, instead of the primal.","category":"page"},{"location":"explanation/risk/#Dual-risk-measures","page":"Risk aversion","title":"Dual risk measures","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Convex risk measures have a dual representation as follows:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFZ = suplimits_q inmathcalM(p) mathbbE_qZ - alpha(p q)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where alpha is a concave function that maps the probability vectors p and q to a real number, and mathcalM(p) subseteq mathcalP is a convex subset of the probability simplex:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathcalP = p ge 0sumlimits_omegainOmegap_omega = 1","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The dual of a convex risk measure can be interpreted as taking the expectation of the random variable Z with respect to the worst probability vector q that lies within the set mathcalM, less some concave penalty term alpha(p q).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If we define a function dual_risk_inner that computes q and α:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"\"\"\"\n dual_risk_inner(\n F::AbstractRiskMeasure, Z::Vector{Float64}, p::Vector{Float64}\n )::Tuple{Vector{Float64},Float64}\n\nReturn a tuple formed by the worst-case probability vector `q` and the\ncorresponding evaluation `α(p, q)`.\n\"\"\"\nfunction dual_risk_inner end","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"then we can write a generic dual_risk function as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk(\n F::AbstractRiskMeasure,\n Z::Vector{Float64},\n p::Vector{Float64},\n)\n q, α = dual_risk_inner(F, Z, p)\n return sum(q[i] * Z[i] for i in 1:length(q)) - α\nend","category":"page"},{"location":"explanation/risk/#Expectation-2","page":"Risk aversion","title":"Expectation","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"For the expectation risk measure, mathcalM(p) = p, and alpha(cdot cdot) = 0. Therefore:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(::Expectation, ::Vector{Float64}, p::Vector{Float64})\n return p, 0.0\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk(Expectation(), Z, p) == primal_risk(Expectation(), Z, p)","category":"page"},{"location":"explanation/risk/#Worst-case","page":"Risk aversion","title":"Worst-case","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"For the worst-case risk measure, mathcalM(p) = mathcalP, and alpha(cdot cdot) = 0. Therefore, the dual representation just puts all of the probability weight on the maximum outcome:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(::WorstCase, Z::Vector{Float64}, ::Vector{Float64})\n q = zeros(length(Z))\n _, index = findmax(Z)\n q[index] = 1.0\n return q, 0.0\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk(WorstCase(), Z, p) == primal_risk(WorstCase(), Z, p)","category":"page"},{"location":"explanation/risk/#Entropic-2","page":"Risk aversion","title":"Entropic","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"For the entropic risk measure, mathcalM(p) = mathcalP, and:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"alpha(p q) = frac1gammasumlimits_omegainOmega q_omega logleft(fracq_omegap_omegaright)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"One way to solve the dual problem is to explicitly solve a nonlinear optimization problem:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n N = length(p)\n model = JuMP.Model(Ipopt.Optimizer)\n JuMP.set_silent(model)\n # For this problem, the solve is more accurate if we turn off problem\n # scaling.\n JuMP.set_optimizer_attribute(model, \"nlp_scaling_method\", \"none\")\n JuMP.@variable(model, 0 <= q[1:N] <= 1)\n JuMP.@constraint(model, sum(q) == 1)\n JuMP.@NLexpression(\n model,\n α,\n 1 / F.γ * sum(q[i] * log(q[i] / p[i]) for i in 1:N),\n )\n JuMP.@NLobjective(model, Max, sum(q[i] * Z[i] for i in 1:N) - α)\n JuMP.optimize!(model)\n return JuMP.value.(q), JuMP.value(α)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n primal = primal_risk(Entropic(γ), Z, p)\n dual = dual_risk(Entropic(γ), Z, p)\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nThis method of solving the dual problem \"on-the-side\" is used by SDDP.jl for a number of risk measures, including a distributionally robust risk measure with the Wasserstein distance. Check out all the risk measures that SDDP.jl supports in Add a risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The \"on-the-side\" method is very general, and it lets us incorporate any convex risk measure into SDDP. However, this comes at an increased computational cost and potential numerical issues (e.g., not converging to the exact solution).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"However, for the entropic risk measure, Dowson, Morton, and Pagnoncelli (2020) derive the following closed form solution for q^*:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"q_omega^* = fracp_omega e^gamma z_omegasumlimits_varphi in Omega p_varphi e^gamma z_varphi","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This is faster because we don't need to use Ipopt, and it avoids some of the numerical issues associated with solving a nonlinear program.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n q, α = zeros(length(p)), big(0.0)\n peγz = p .* exp.(F.γ .* big.(Z))\n sum_peγz = sum(peγz)\n for i in 1:length(q)\n big_q = peγz[i] / sum_peγz\n α += big_q * log(big_q / p[i])\n q[i] = Float64(big_q)\n end\n return q, Float64(α / F.γ)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nAgain, note that we use big to avoid introducing overflow errors, before explicitly casting back to Float64 for the values we return.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n primal = primal_risk(Entropic(γ), Z, p)\n dual = dual_risk(Entropic(γ), Z, p)\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"explanation/risk/#Risk-averse-subgradients","page":"Risk aversion","title":"Risk-averse subgradients","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We ended the section on primal risk measures by explaining how we couldn't use the primal risk measure in the cut calculation because we needed some way of computing a risk-averse subgradient:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The reason we use the dual representation is because of the following theorem, which explains how to compute a risk-averse gradient.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: The risk-averse subgradient theorem\nLet omega in Omega index a random vector with finite support and with nominal probability mass function, p in mathcalP, which satisfies p 0.Consider a convex risk measure, mathbbF, with a convex risk set, mathcalM(p), so that mathbbF can be expressed as the dual form.Let V(xomega) be convex with respect to x for all fixed omegainOmega, and let lambda(tildex omega) be a subgradient of V(xomega) with respect to x at x = tildex for each omega in Omega.Then, sum_omegainOmegaq^*_omega lambda(tildexomega) is a subgradient of mathbbFV(xomega) at tildex, whereq^* in argmax_q in mathcalM(p)leftmathbbE_qV(tildexomega) - alpha(p q)right","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This theorem can be a little hard to unpack, so let's see an example:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_averse_subgradient(\n V::Function,\n # Use automatic differentiation to compute the gradient of V w.r.t. x,\n # given a fixed ω.\n λ::Function = (x, ω) -> ForwardDiff.gradient(x -> V(x, ω), x);\n F::AbstractRiskMeasure,\n Ω::Vector,\n p::Vector{Float64},\n x̃::Vector{Float64},\n)\n # Evaluate the function at x=x̃ for all ω ∈ Ω.\n V_ω = [V(x̃, ω) for ω in Ω]\n # Solve the dual problem to obtain an optimal q^*.\n q, α = dual_risk_inner(F, V_ω, p)\n # Compute the risk-averse subgradient by taking the expectation of the\n # subgradients w.r.t. q^*.\n dVdx = sum(q[i] * λ(x̃, ω) for (i, ω) in enumerate(Ω))\n return dVdx\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can compare the subgradient obtained with the dual form against the automatic differentiation of the primal_risk function.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function primal_risk_averse_subgradient(\n V::Function;\n F::AbstractRiskMeasure,\n Ω::Vector,\n p::Vector{Float64},\n x̃::Vector{Float64},\n)\n inner(x) = primal_risk(F, [V(x, ω) for ω in Ω], p)\n return ForwardDiff.gradient(inner, x̃)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"As our example function, we use:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"V(x, ω) = ω * x[1]^2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"with:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Ω = [1.0, 2.0, 3.0]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"p = [0.3, 0.4, 0.3]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"at the point:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"x̃ = [3.0]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If mathbbF is the expectation risk-measure, then:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFV(x omega) = 2 x^2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The function evaluation x=3 is 18 and the subgradient is 12. Let's check we get it right with the dual form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and the primal form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If mathbbF is the worst-case risk measure, then:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFV(x omega) = 3 x^2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The function evaluation at x=3 is 27, and the subgradient is 18. Let's check we get it right with the dual form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and the primal form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If mathbbF is the entropic risk measure, the math is a little more difficult to derive analytically. However, we can check against our primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n dual =\n dual_risk_averse_subgradient(V; F = Entropic(γ), Ω = Ω, p = p, x̃ = x̃)\n primal = primal_risk_averse_subgradient(\n V;\n F = Entropic(γ),\n Ω = Ω,\n p = p,\n x̃ = x̃,\n )\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Uh oh! What happened with the last line? It looks our primal_risk_averse_subgradient encountered an error and returned a subgradient of NaN. This is because of the overflow issue with exp(x). However, we can be confident that our dual method of computing the risk-averse subgradient is both correct and more numerically robust than the primal version.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nAs another sanity check, notice how as gamma rightarrow 0, we tend toward the solution of the expectation risk-measure [12], and as gamma rightarrow infty, we tend toward the solution of the worse-case risk measure [18].","category":"page"},{"location":"explanation/risk/#Risk-averse-decision-rules:-Part-II","page":"Risk aversion","title":"Risk-averse decision rules: Part II","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Why is the risk-averse subgradient theorem helpful? Using the dual representation of a convex risk measure, we can re-write the cut:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)quad k=1ldotsK","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"theta ge mathbbE_q_kleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right - alpha(p q_k)quad k=1ldotsK","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where q_k = mathrmargsuplimits_q inmathcalM(p) mathbbE_qV_j^k(x_k^prime varphi) - alpha(p q).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Therefore, we can formulate a risk-averse decision rule as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbE_q_kleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right - alpha(p q_k)quad k=1ldotsK \n theta ge M\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where q_k = mathrmargsuplimits_q inmathcalM(p) mathbbE_qV_j^k(x_k^prime varphi) - alpha(p q).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Thus, to implement risk-averse SDDP, all we need to do is modify the backward pass to include this calculation of q_k, form the cut using q_k instead of p, and subtract the penalty term alpha(p q_k).","category":"page"},{"location":"explanation/risk/#Implementation","page":"Risk aversion","title":"Implementation","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now we're ready to implement our risk-averse version of SDDP.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"As a prerequisite, we need most of the code from Introductory theory.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"

      \nClick to view code from the tutorial \"Introductory theory\".","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct State\n in::JuMP.VariableRef\n out::JuMP.VariableRef\nend\n\nstruct Uncertainty\n parameterize::Function\n Ω::Vector{Any}\n P::Vector{Float64}\nend\n\nstruct Node\n subproblem::JuMP.Model\n states::Dict{Symbol,State}\n uncertainty::Uncertainty\n cost_to_go::JuMP.VariableRef\nend\n\nstruct PolicyGraph\n nodes::Vector{Node}\n arcs::Vector{Dict{Int,Float64}}\nend\n\nfunction Base.show(io::IO, model::PolicyGraph)\n println(io, \"A policy graph with $(length(model.nodes)) nodes\")\n println(io, \"Arcs:\")\n for (from, arcs) in enumerate(model.arcs)\n for (to, probability) in arcs\n println(io, \" $(from) => $(to) w.p. $(probability)\")\n end\n end\n return\nend\n\nfunction PolicyGraph(\n subproblem_builder::Function;\n graph::Vector{Dict{Int,Float64}},\n lower_bound::Float64,\n optimizer,\n)\n nodes = Node[]\n for t in 1:length(graph)\n model = JuMP.Model(optimizer)\n states, uncertainty = subproblem_builder(model, t)\n JuMP.@variable(model, cost_to_go >= lower_bound)\n obj = JuMP.objective_function(model)\n JuMP.@objective(model, Min, obj + cost_to_go)\n if length(graph[t]) == 0\n JuMP.fix(cost_to_go, 0.0; force = true)\n end\n push!(nodes, Node(model, states, uncertainty, cost_to_go))\n end\n return PolicyGraph(nodes, graph)\nend\n\nfunction sample_uncertainty(uncertainty::Uncertainty)\n r = rand()\n for (p, ω) in zip(uncertainty.P, uncertainty.Ω)\n r -= p\n if r < 0.0\n return ω\n end\n end\n return error(\"We should never get here because P should sum to 1.0.\")\nend\n\nfunction sample_next_node(model::PolicyGraph, current::Int)\n if length(model.arcs[current]) == 0\n return nothing\n else\n r = rand()\n for (to, probability) in model.arcs[current]\n r -= probability\n if r < 0.0\n return to\n end\n end\n return nothing\n end\nend\n\nfunction forward_pass(model::PolicyGraph, io::IO = stdout)\n incoming_state =\n Dict(k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states)\n simulation_cost = 0.0\n trajectory = Tuple{Int,Dict{Symbol,Float64}}[]\n t = 1\n while t !== nothing\n node = model.nodes[t]\n ω = sample_uncertainty(node.uncertainty)\n node.uncertainty.parameterize(ω)\n for (k, v) in incoming_state\n JuMP.fix(node.states[k].in, v; force = true)\n end\n JuMP.optimize!(node.subproblem)\n if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL\n error(\"Something went terribly wrong!\")\n end\n outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)\n stage_cost =\n JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)\n simulation_cost += stage_cost\n incoming_state = outgoing_state\n push!(trajectory, (t, outgoing_state))\n t = sample_next_node(model, t)\n end\n return trajectory, simulation_cost\nend\n\nfunction upper_bound(model::PolicyGraph; replications::Int)\n simulations = [forward_pass(model, devnull) for i in 1:replications]\n z = [s[2] for s in simulations]\n μ = Statistics.mean(z)\n tσ = 1.96 * Statistics.std(z) / sqrt(replications)\n return μ, tσ\nend\n\nfunction lower_bound(model::PolicyGraph)\n node = model.nodes[1]\n bound = 0.0\n for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)\n node.uncertainty.parameterize(ω)\n JuMP.optimize!(node.subproblem)\n bound += p * JuMP.objective_value(node.subproblem)\n end\n return bound\nend\n\nfunction evaluate_policy(\n model::PolicyGraph;\n node::Int,\n incoming_state::Dict{Symbol,Float64},\n random_variable,\n)\n the_node = model.nodes[node]\n the_node.uncertainty.parameterize(random_variable)\n for (k, v) in incoming_state\n JuMP.fix(the_node.states[k].in, v; force = true)\n end\n JuMP.optimize!(the_node.subproblem)\n return Dict(\n k => JuMP.value.(v) for\n (k, v) in JuMP.object_dictionary(the_node.subproblem)\n )\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"

      ","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"First, we need to modify the backward pass to compute the cuts using the risk-averse subgradient theorem:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function backward_pass(\n model::PolicyGraph,\n trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},\n io::IO = stdout;\n risk_measure::AbstractRiskMeasure,\n)\n println(io, \"| Backward pass\")\n for i in reverse(1:length(trajectory))\n index, outgoing_states = trajectory[i]\n node = model.nodes[index]\n println(io, \"| | Visiting node $(index)\")\n if length(model.arcs[index]) == 0\n continue\n end\n # =====================================================================\n # New! Create vectors to store the cut expressions, V(x,ω) and p:\n cut_expressions, V_ω, p = JuMP.AffExpr[], Float64[], Float64[]\n # =====================================================================\n for (j, P_ij) in model.arcs[index]\n next_node = model.nodes[j]\n for (k, v) in outgoing_states\n JuMP.fix(next_node.states[k].in, v; force = true)\n end\n for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)\n next_node.uncertainty.parameterize(φ)\n JuMP.optimize!(next_node.subproblem)\n V = JuMP.objective_value(next_node.subproblem)\n dVdx = Dict(\n k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states\n )\n # =============================================================\n # New! Construct and append the expression\n # `V_j^K(x_k, φ) + dVdx_j^K(x'_k, φ)ᵀ(x - x_k)` to the list of\n # cut expressions.\n push!(\n cut_expressions,\n JuMP.@expression(\n node.subproblem,\n V + sum(\n dVdx[k] * (x.out - outgoing_states[k]) for\n (k, x) in node.states\n ),\n )\n )\n # Add the objective value to Z:\n push!(V_ω, V)\n # Add the probability to p:\n push!(p, P_ij * pφ)\n # =============================================================\n end\n end\n # =====================================================================\n # New! Using the solutions in V_ω, compute q and α:\n q, α = dual_risk_inner(risk_measure, V_ω, p)\n println(io, \"| | | Z = \", Z)\n println(io, \"| | | p = \", p)\n println(io, \"| | | q = \", q)\n println(io, \"| | | α = \", α)\n # Then add the cut:\n c = JuMP.@constraint(\n node.subproblem,\n node.cost_to_go >=\n sum(q[i] * cut_expressions[i] for i in 1:length(q)) - α\n )\n # =====================================================================\n println(io, \"| | | Adding cut : \", c)\n end\n return nothing\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We also need to update the train loop of SDDP to pass a risk measure to the backward pass:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function train(\n model::PolicyGraph;\n iteration_limit::Int,\n replications::Int,\n # =========================================================================\n # New! Add a risk_measure argument\n risk_measure::AbstractRiskMeasure,\n # =========================================================================\n io::IO = stdout,\n)\n for i in 1:iteration_limit\n println(io, \"Starting iteration $(i)\")\n outgoing_states, _ = forward_pass(model, io)\n # =====================================================================\n # New! Pass the risk measure to the backward pass.\n backward_pass(model, outgoing_states, io; risk_measure = risk_measure)\n # =====================================================================\n println(io, \"| Finished iteration\")\n println(io, \"| | lower_bound = \", lower_bound(model))\n end\n μ, tσ = upper_bound(model; replications = replications)\n println(io, \"Upper bound = $(μ) ± $(tσ)\")\n return\nend","category":"page"},{"location":"explanation/risk/#Risk-averse-bounds","page":"Risk aversion","title":"Risk-averse bounds","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nThis section is important.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"When we had a risk-neutral policy (i.e., we only used the expectation risk measure), we discussed how we could form valid lower and upper bounds.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The upper bound is still valid as a Monte Carlo simulation of the expected cost of the policy. (Although this upper bound doesn't capture the change in the policy we wanted to achieve, namely that the impact of the worst outcomes were reduced.)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"However, if we use a different risk measure, the lower bound is no longer valid!","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can still calculate a \"lower bound\" as the objective of the first-stage approximated subproblem, and this will converge to a finite value. However, we can't meaningfully interpret it as a bound with respect to the optimal policy. Therefore, it's best to just ignore the lower bound when training a risk-averse policy.","category":"page"},{"location":"explanation/risk/#Example:-risk-averse-hydro-thermal-scheduling","page":"Risk aversion","title":"Example: risk-averse hydro-thermal scheduling","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now it's time for an example. We create the same problem as Introductory theory:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"model = PolicyGraph(\n graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict{Int,Float64}()],\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n JuMP.set_silent(subproblem)\n JuMP.@variable(subproblem, volume_in == 200)\n JuMP.@variable(subproblem, 0 <= volume_out <= 200)\n states = Dict(:volume => State(volume_in, volume_out))\n JuMP.@variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n JuMP.@constraints(\n subproblem,\n begin\n volume_out == volume_in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n fuel_cost = [50.0, 100.0, 150.0]\n JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)\n uncertainty =\n Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω\n return JuMP.fix(inflow, ω)\n end\n return states, uncertainty\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Then we train a risk-averse policy, passing a risk measure to train:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"train(\n model;\n iteration_limit = 3,\n replications = 100,\n risk_measure = Entropic(1.0),\n)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Finally, evaluate the decision rule:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"evaluate_policy(\n model;\n node = 1,\n incoming_state = Dict(:volume => 150.0),\n random_variable = 75,\n)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nFor this trivial example, the risk-averse policy isn't very different from the policy obtained using the expectation risk-measure. If you try it on some bigger/more interesting problems, you should see the expected cost increase, and the upper tail of the policy decrease.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This page was generated using Literate.jl.","category":"page"}] +[{"location":"guides/create_a_general_policy_graph/#Create-a-general-policy-graph","page":"Create a general policy graph","title":"Create a general policy graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.jl uses the concept of a policy graph to formulate multistage stochastic programming problems. For more details, read An introduction to SDDP.jl or the paper Dowson, O., (2020). The policy graph decomposition of multistage stochastic optimization problems. Networks, 76(1), 3-23. doi.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-[SDDP.Graph](@ref)","page":"Create a general policy graph","title":"Creating a SDDP.Graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/#Linear-graphs","page":"Create a general policy graph","title":"Linear graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Linear policy graphs can be created using the SDDP.LinearGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.LinearGraph(3)\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"We can add nodes to a graph using SDDP.add_node and edges using SDDP.add_edge.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.add_node(graph, 4)\n\njulia> SDDP.add_edge(graph, 3 => 4, 1.0)\n\njulia> SDDP.add_edge(graph, 4 => 1, 0.9)\n\njulia> graph\nRoot\n 0\nNodes\n 1\n 2\n 3\n 4\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n 3 => 4 w.p. 1.0\n 4 => 1 w.p. 0.9","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Look! We just made a cyclic graph! SDDP.jl can solve infinite horizon problems. The probability on the arc that completes a cycle should be interpreted as a discount factor.","category":"page"},{"location":"guides/create_a_general_policy_graph/#guide_unicyclic_policy_graph","page":"Create a general policy graph","title":"Unicyclic policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Linear policy graphs with a single infinite-horizon cycle can be created using the SDDP.UnicyclicGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.UnicyclicGraph(0.95; num_nodes = 2)\nRoot\n 0\nNodes\n 1\n 2\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 1 w.p. 0.95","category":"page"},{"location":"guides/create_a_general_policy_graph/#guide_markovian_policy_graph","page":"Create a general policy graph","title":"Markovian policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Markovian policy graphs can be created using the SDDP.MarkovianGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.MarkovianGraph(Matrix{Float64}[[1.0]', [0.4 0.6]])\nRoot\n (0, 1)\nNodes\n (1, 1)\n (2, 1)\n (2, 2)\nArcs\n (0, 1) => (1, 1) w.p. 1.0\n (1, 1) => (2, 1) w.p. 0.4\n (1, 1) => (2, 2) w.p. 0.6","category":"page"},{"location":"guides/create_a_general_policy_graph/#General-graphs","page":"Create a general policy graph","title":"General graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Arbitrarily complicated graphs can be constructed using SDDP.Graph, SDDP.add_node and SDDP.add_edge. For example","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.Graph(:root_node)\nRoot\n root_node\nNodes\n {}\nArcs\n {}\n\njulia> SDDP.add_node(graph, :decision_node)\n\njulia> SDDP.add_edge(graph, :root_node => :decision_node, 1.0)\n\njulia> SDDP.add_edge(graph, :decision_node => :decision_node, 0.9)\n\njulia> graph\nRoot\n root_node\nNodes\n decision_node\nArcs\n root_node => decision_node w.p. 1.0\n decision_node => decision_node w.p. 0.9","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-policy-graph","page":"Create a general policy graph","title":"Creating a policy graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Once you have constructed an instance of SDDP.Graph, you can create a policy graph by passing the graph as the first argument.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.Graph(\n :root_node,\n [:decision_node],\n [\n (:root_node => :decision_node, 1.0),\n (:decision_node => :decision_node, 0.9)\n ]);\n\njulia> model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer) do subproblem, node\n println(\"Called from node: \", node)\n end;\nCalled from node: decision_node","category":"page"},{"location":"guides/create_a_general_policy_graph/#Special-cases","page":"Create a general policy graph","title":"Special cases","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"There are two special cases which cover the majority of models in the literature.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.LinearPolicyGraph is a special case where a SDDP.LinearGraph is passed as the first argument.\nSDDP.MarkovianPolicyGraph is a special case where a SDDP.MarkovianGraph is passed as the first argument.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Note that the type of the names of all nodes (including the root node) must be the same. In this case, they are Symbols.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Simulating-non-standard-policy-graphs","page":"Create a general policy graph","title":"Simulating non-standard policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"If you simulate a policy graph with a node that has outgoing arcs that sum to less than one, you will end up with simulations of different lengths. (The most common case is an infinite horizon stochastic program, aka a linear policy graph with a single cycle.)","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"To simulate a fixed number of stages, use:","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"simulations = SDDP.simulate(\n model,\n 1,\n sampling_scheme = SDDP.InSampleMonteCarlo(\n max_depth = 10,\n terminate_on_dummy_leaf = false\n )\n)","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Here, max_depth controls the number of stages, and terminate_on_dummy_leaf = false stops us from terminating early.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"See also Simulate using a different sampling scheme.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-Markovian-graph-automatically","page":"Create a general policy graph","title":"Creating a Markovian graph automatically","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.jl can create a Markovian graph by automatically discretizing a one-dimensional stochastic process and fitting a Markov chain.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"To access this functionality, pass a function that takes no arguments and returns a Vector{Float64} to SDDP.MarkovianGraph. To keyword arguments also need to be provided: budget is the total number of nodes in the Markovian graph, and scenarios is the number of realizations of the simulator function used to approximate the graph.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"In some cases, scenarios may be too small to provide a reasonable fit of the stochastic process. If so, SDDP.jl will automatically try to re-fit the Markov chain using more scenarios.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"function simulator()\n scenario = zeros(5)\n for i = 2:5\n scenario[i] = scenario[i - 1] + rand() - 0.5\n end\n return scenario\nend\n\nmodel = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(simulator; budget = 10, scenarios = 100),\n sense = :Max,\n upper_bound = 1e3\n) do subproblem, node\n (stage, price) = node\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 1)\n @constraint(subproblem, x.out <= x.in)\n @stageobjective(subproblem, price * x.out)\nend","category":"page"},{"location":"guides/debug_a_model/#Debug-a-model","page":"Debug a model","title":"Debug a model","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Building multistage stochastic programming models is hard. There are a lot of different pieces that need to be put together, and we typically have no idea of the optimal policy, so it can be hard (impossible?) to validate the solution.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"That said, here are a few tips to verify and validate models built using SDDP.jl.","category":"page"},{"location":"guides/debug_a_model/#Writing-subproblems-to-file","page":"Debug a model","title":"Writing subproblems to file","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"The first step to debug a model is to write out the subproblems to a file in order to check that you are actually building what you think you are building.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"This can be achieved with the help of two functions: SDDP.parameterize and SDDP.write_subproblem_to_file. The first lets you parameterize a node given a noise, and the second writes out the subproblem to a file.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Here is an example model:","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 1)\n @variable(subproblem, y)\n @constraint(subproblem, balance, x.in == x.out + y)\n SDDP.parameterize(subproblem, [1.1, 2.2]) do ω\n @stageobjective(subproblem, ω * x.out)\n JuMP.fix(y, ω)\n end\nend\n\n# output\n\nA policy graph with 2 nodes.\n Node indices: 1, 2","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Initially, model hasn't been parameterized with a concrete realizations of ω. Let's do so now by parameterizing the first subproblem with ω=1.1.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.parameterize(model[1], 1.1)","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Easy! To parameterize the second stage problem, we would have used model[2].","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Now to write out the problem to a file. We'll get a few warnings because some variables and constraints don't have names. They don't matter, so ignore them.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.write_subproblem_to_file(model[1], \"subproblem.lp\")\n\njulia> read(\"subproblem.lp\") |> String |> print\nminimize\nobj: 1.1 x_out + 1 x2\nsubject to\nbalance: -1 y + 1 x_in - 1 x_out = 0\nBounds\ny = 1.1\nx2 >= 0\nx_in free\nx_out free\nEnd","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"It is easy to see that ω has been set in the objective, and as the fixed value for y.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"It is also possible to parameterize the subproblems using values for ω that are not in the original problem formulation.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.parameterize(model[1], 3.3)\n\njulia> SDDP.write_subproblem_to_file(model[1], \"subproblem.lp\")\n\njulia> read(\"subproblem.lp\") |> String |> print\nminimize\nobj: 3.3 x_out + 1 x2\nsubject to\nbalance: -1 y + 1 x_in - 1 x_out = 0\nBounds\ny = 3.3\nx2 >= 0\nx_in free\nx_out free\nEnd\n\njulia> rm(\"subproblem.lp\") # Clean up.","category":"page"},{"location":"guides/debug_a_model/#Solve-the-deterministic-equivalent","page":"Debug a model","title":"Solve the deterministic equivalent","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Sometimes, it can be helpful to solve the deterministic equivalent of a problem in order to obtain an exact solution to the problem. To obtain a JuMP model that represents the deterministic equivalent, use SDDP.deterministic_equivalent. The returned model is just a normal JuMP model. Use JuMP to optimize it and query the solution.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> det_equiv = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\nA JuMP Model\nFeasibility problem with:\nVariables: 24\n`AffExpr`-in-`MathOptInterface.EqualTo{Float64}`: 10 constraints\n`VariableRef`-in-`MathOptInterface.EqualTo{Float64}`: 8 constraints\n`VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 6 constraints\n`VariableRef`-in-`MathOptInterface.LessThan{Float64}`: 4 constraints\nModel mode: AUTOMATIC\nCachingOptimizer state: EMPTY_OPTIMIZER\nSolver name: HiGHS\n\njulia> set_silent(det_equiv)\n\njulia> optimize!(det_equiv)\n\njulia> objective_value(det_equiv)\n-5.472500000000001","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"warning: Warning\nThe deterministic equivalent scales poorly with problem size. Only use this on small problems!","category":"page"},{"location":"guides/add_multidimensional_noise/#Add-multi-dimensional-noise-terms","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"Multi-dimensional stagewise-independent random variables can be created by forming the Cartesian product of the random variables.","category":"page"},{"location":"guides/add_multidimensional_noise/#A-simple-example","page":"Add multi-dimensional noise terms","title":"A simple example","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"If the sample space and probabilities are given as vectors for each marginal distribution, do:","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"julia> model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n Ω = [(value = v, coefficient = c) for v in [1, 2] for c in [3, 4, 5]]\n P = [v * c for v in [0.5, 0.5] for c in [0.3, 0.5, 0.2]]\n SDDP.parameterize(subproblem, Ω, P) do ω\n JuMP.fix(x.out, ω.value)\n @stageobjective(subproblem, ω.coefficient * x.out)\n println(\"ω is: \", ω)\n end\n end;\n\njulia> SDDP.simulate(model, 1);\nω is: (value = 1, coefficient = 4)\nω is: (value = 1, coefficient = 3)\nω is: (value = 2, coefficient = 4)","category":"page"},{"location":"guides/add_multidimensional_noise/#Using-Distributions.jl","page":"Add multi-dimensional noise terms","title":"Using Distributions.jl","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"For sampling multidimensional random variates, it can be useful to use the Product type from Distributions.jl.","category":"page"},{"location":"guides/add_multidimensional_noise/#Finite-discrete-distributions","page":"Add multi-dimensional noise terms","title":"Finite discrete distributions","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"There are several ways to go about this. If the sample space is finite, and small enough that it makes sense to enumerate each element, we can use Base.product and Distributions.support to generate the entire sample space Ω from each of the marginal distributions. ","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"We can then evaluate the density function of the product distribution on each element of this space to get the vector of corresponding probabilities, P.","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"julia> import Distributions\n\njulia> distributions = [\n Distributions.Binomial(10, 0.5),\n Distributions.Bernoulli(0.5),\n Distributions.truncated(Distributions.Poisson(5), 2, 8)\n ];\n\njulia> supports = Distributions.support.(distributions);\n\njulia> Ω = vec([collect(ω) for ω in Base.product(supports...)]);\n\njulia> P = [Distributions.pdf(Distributions.Product(distributions), ω) for ω in Ω];\n\njulia> model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n SDDP.parameterize(subproblem, Ω, P) do ω\n JuMP.fix(x.out, ω[1])\n @stageobjective(subproblem, ω[2] * x.out + ω[3])\n println(\"ω is: \", ω)\n end\n end;\n\njulia> SDDP.simulate(model, 1);\nω is: [10, 0, 3]\nω is: [0, 1, 6]\nω is: [6, 0, 5]","category":"page"},{"location":"guides/add_multidimensional_noise/#Sampling","page":"Add multi-dimensional noise terms","title":"Sampling","text":"","category":"section"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"For sample spaces that are too large to explicitly represent, we can instead approximate the distribution by a sample of N points. Now Ω is a sample from the full sample space, and P is the uniform distribution over those points. Points with higher density in the full sample space will appear more frequently in Ω.","category":"page"},{"location":"guides/add_multidimensional_noise/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"julia> import Distributions\n\njulia> distributions = Distributions.Product([\n Distributions.Binomial(100, 0.5),\n Distributions.Geometric(1 / 20),\n Distributions.Poisson(20),\n ]);\n\njulia> N = 100;\n\njulia> Ω = [rand(distributions) for _ in 1:N];\n\njulia> P = fill(1 / N, N);\n\njulia> model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n SDDP.parameterize(subproblem, Ω, P) do ω\n JuMP.fix(x.out, ω[1])\n @stageobjective(subproblem, ω[2] * x.out + ω[3])\n println(\"ω is: \", ω)\n end\n end;\n\njulia> SDDP.simulate(model, 1);\nω is: [54, 38, 19]\nω is: [43, 3, 13]\nω is: [43, 4, 17]","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"EditURL = \"booking_management.jl\"","category":"page"},{"location":"examples/booking_management/#Booking-management","page":"Booking management","title":"Booking management","text":"","category":"section"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"This example concerns the acceptance of booking requests for rooms in a hotel in the lead up to a large event.","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"Each stage, we receive a booking request and can choose to accept or decline it. Once accepted, bookings cannot be terminated.","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"using SDDP, HiGHS, Test\n\nfunction booking_management_model(num_days, num_rooms, num_requests)\n # maximum revenue that could be accrued.\n max_revenue = (num_rooms + num_requests) * num_days * num_rooms\n # booking_requests is a vector of {0,1} arrays of size\n # (num_days x num_rooms) if the room is requested.\n booking_requests = Array{Int,2}[]\n for room in 1:num_rooms\n for day in 1:num_days\n # note: length_of_stay is 0 indexed to avoid unnecessary +/- 1\n # on the indexing\n for length_of_stay in 0:(num_days-day)\n req = zeros(Int, (num_rooms, num_days))\n req[room:room, day.+(0:length_of_stay)] .= 1\n push!(booking_requests, req)\n end\n end\n end\n\n return model = SDDP.LinearPolicyGraph(\n stages = num_requests,\n upper_bound = max_revenue,\n sense = :Max,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(\n sp,\n 0 <= vacancy[room = 1:num_rooms, day = 1:num_days] <= 1,\n SDDP.State,\n Bin,\n initial_value = 1\n )\n @variables(\n sp,\n begin\n # Accept request for booking of room for length of time.\n 0 <= accept_request <= 1, Bin\n # Accept a booking for an individual room on an individual day.\n 0 <= room_request_accepted[1:num_rooms, 1:num_days] <= 1, Bin\n # Helper for JuMP.fix\n req[1:num_rooms, 1:num_days]\n end\n )\n for room in 1:num_rooms, day in 1:num_days\n @constraints(\n sp,\n begin\n # Update vacancy if we accept a room request\n vacancy[room, day].out ==\n vacancy[room, day].in - room_request_accepted[room, day]\n # Can't accept a request of a filled room\n room_request_accepted[room, day] <= vacancy[room, day].in\n # Can't accept invididual room request if entire request is declined\n room_request_accepted[room, day] <= accept_request\n # Can't accept request if room not requested\n room_request_accepted[room, day] <= req[room, day]\n # Accept all individual rooms is entire request is accepted\n room_request_accepted[room, day] + (1 - accept_request) >= req[room, day]\n end\n )\n end\n SDDP.parameterize(sp, booking_requests) do request\n return JuMP.fix.(req, request)\n end\n @stageobjective(\n sp,\n sum(\n (room + stage - 1) * room_request_accepted[room, day] for\n room in 1:num_rooms for day in 1:num_days\n )\n )\n end\nend\n\nfunction booking_management(duality_handler)\n m_1_2_5 = booking_management_model(1, 2, 5)\n SDDP.train(m_1_2_5; log_frequency = 5, duality_handler = duality_handler)\n if duality_handler == SDDP.ContinuousConicDuality()\n @test SDDP.calculate_bound(m_1_2_5) >= 7.25 - 1e-4\n else\n @test isapprox(SDDP.calculate_bound(m_1_2_5), 7.25, atol = 0.02)\n end\n\n m_2_2_3 = booking_management_model(2, 2, 3)\n SDDP.train(m_2_2_3; log_frequency = 10, duality_handler = duality_handler)\n if duality_handler == SDDP.ContinuousConicDuality()\n @test SDDP.calculate_bound(m_1_2_5) > 6.13\n else\n @test isapprox(SDDP.calculate_bound(m_2_2_3), 6.13, atol = 0.02)\n end\nend\n\nbooking_management(SDDP.ContinuousConicDuality())","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"New version of HiGHS stalls booking_management(SDDP.LagrangianDuality())","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"EditURL = \"no_strong_duality.jl\"","category":"page"},{"location":"examples/no_strong_duality/#No-strong-duality","page":"No strong duality","title":"No strong duality","text":"","category":"section"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"This example is interesting, because strong duality doesn't hold for the extensive form (see if you can show why!), but we still converge.","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"using SDDP, HiGHS, Test\n\nfunction no_strong_duality()\n model = SDDP.PolicyGraph(\n SDDP.Graph(\n :root,\n [:node],\n [(:root => :node, 1.0), (:node => :node, 0.5)],\n ),\n optimizer = HiGHS.Optimizer,\n lower_bound = 0.0,\n ) do sp, t\n @variable(sp, x, SDDP.State, initial_value = 1.0)\n @stageobjective(sp, x.out)\n @constraint(sp, x.in == x.out)\n end\n SDDP.train(model)\n @test SDDP.calculate_bound(model) ≈ 2.0 atol = 1e-5\n return\nend\n\nno_strong_duality()","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"CurrentModule = SDDP","category":"page"},{"location":"guides/add_integrality/#Integrality","page":"Integrality","title":"Integrality","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"There's nothing special about binary and integer variables in SDDP.jl. Your models may contain a mix of binary, integer, or continuous state and control variables. Use the standard JuMP syntax to add binary or integer variables.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"For example:","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"using SDDP, HiGHS\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 0 <= x <= 100, Int, SDDP.State, initial_value = 0)\n @variable(sp, 0 <= u <= 200, integer = true)\n @variable(sp, v >= 0)\n @constraint(sp, x.out == x.in + u + v - 150)\n @stageobjective(sp, 2u + 6v + x.out)\nend","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"If you want finer control over how SDDP.jl computes subgradients in the backward pass, you can pass an SDDP.AbstractDualityHandler to the duality_handler argument of SDDP.train.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"See Duality handlers for the list of handlers you can pass.","category":"page"},{"location":"guides/add_integrality/#Convergence","page":"Integrality","title":"Convergence","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"SDDP.jl cannot guarantee that it will find a globally optimal policy when some of the variables are discrete. However, in most cases we find that it can still find an integer feasible policy that performs well in simulation.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"Moreover, when the number of nodes in the graph is large, or there is uncertainty, we are not aware of another algorithm that can claim to find a globally optimal policy.","category":"page"},{"location":"guides/add_integrality/#Does-SDDP.jl-implement-the-SDDiP-algorithm?","page":"Integrality","title":"Does SDDP.jl implement the SDDiP algorithm?","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"Most discussions of SDDiP in the literature confuse two unrelated things.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"First, how to compute dual variables\nSecond, when the algorithm will converge to a globally optimal policy.","category":"page"},{"location":"guides/add_integrality/#Computing-dual-variables","page":"Integrality","title":"Computing dual variables","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"The stochastic dual dynamic programming algorithm requires a subgradient of the objective with respect to the incoming state variable. ","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"One way to obtain a valid subgradient is to compute an optimal value of the dual variable lambda in the following subproblem:","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x quad lambda\nendaligned","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"The easiest option is to relax integrality of the discrete variables to form a linear program and then use linear programming duality to obtain the dual. But we could also use Lagrangian duality without needing to relax the integrality constraints.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"To compute the Lagrangian dual lambda, we penalize lambda^top(barx - x) in the objective instead of enforcing the constraint:","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"beginaligned\nmaxlimits_lambdaminlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi) - lambda^top(barx - x)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega)\nendaligned","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"You can use Lagrangian duality in SDDP.jl by passing SDDP.LagrangianDuality to the duality_handler argument of SDDP.train.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"Compared with linear programming duality, the Lagrangian problem is difficult to solve because it requires the solution of many mixed-integer programs instead of a single linear program. This is one reason why \"SDDiP\" has poor performance.","category":"page"},{"location":"guides/add_integrality/#Convergence-2","page":"Integrality","title":"Convergence","text":"","category":"section"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"The second part to SDDiP is a very tightly scoped claim: if all of the state variables are binary and the algorithm uses Lagrangian duality to compute a subgradient, then it will converge to an optimal policy.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"In many cases, papers claim to \"do SDDiP,\" but they have state variables which are not binary. In these cases, the algorithm is not guaranteed to converge to a globally optimal policy.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"One work-around that has been suggested is to discretize the state variables into a set of binary state variables. However, this leads to a large number of binary state variables, which is another reason why \"SDDiP\" has poor performance.","category":"page"},{"location":"guides/add_integrality/","page":"Integrality","title":"Integrality","text":"In general, we recommend that you introduce integer variables into your model without fear of the consequences, and that you treat the resulting policy as a good heuristic, rather than an attempt to find a globally optimal policy.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"EditURL = \"StructDualDynProg.jl_prob5.2_2stages.jl\"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/#StructDualDynProg:-Problem-5.2,-2-stages","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"","category":"section"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"This example comes from StochasticDualDynamicProgramming.jl","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"using SDDP, HiGHS, Test\n\nfunction test_prob52_2stages()\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n # ========== Problem data ==========\n n = 4\n m = 3\n i_c = [16, 5, 32, 2]\n C = [25, 80, 6.5, 160]\n T = [8760, 7000, 1500] / 8760\n D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]\n p2 = [0.9, 0.1]\n # ========== State Variables ==========\n @variable(subproblem, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)\n # ========== Variables ==========\n @variables(subproblem, begin\n y[1:n, 1:m] >= 0\n v[1:n] >= 0\n penalty >= 0\n rhs_noise[1:m] # Dummy variable for RHS noise term.\n end)\n # ========== Constraints ==========\n @constraints(\n subproblem,\n begin\n [i = 1:n], x[i].out == x[i].in + v[i]\n [i = 1:n], sum(y[i, :]) <= x[i].in\n [j = 1:m], sum(y[:, j]) + penalty >= rhs_noise[j]\n end\n )\n if stage == 2\n # No investment in last stage.\n @constraint(subproblem, sum(v) == 0)\n end\n # ========== Uncertainty ==========\n if stage != 1 # no uncertainty in first stage\n SDDP.parameterize(subproblem, 1:size(D2, 2), p2) do ω\n for j in 1:m\n JuMP.fix(rhs_noise[j], D2[j, ω])\n end\n end\n end\n # ========== Stage objective ==========\n @stageobjective(subproblem, i_c' * v + C' * y * T + 1e6 * penalty)\n return\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 340315.52 atol = 0.1\n return\nend\n\ntest_prob52_2stages()","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"EditURL = \"stochastic_all_blacks.jl\"","category":"page"},{"location":"examples/stochastic_all_blacks/#Stochastic-All-Blacks","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"","category":"section"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"using SDDP, HiGHS, Test\n\nfunction stochastic_all_blacks()\n # Number of time periods\n T = 3\n # Number of seats\n N = 2\n # R_ij = price of seat i at time j\n R = [3 3 6; 3 3 6]\n # Number of noises\n s = 3\n offers = [\n [[1, 1], [0, 0], [1, 1]],\n [[1, 0], [0, 0], [0, 0]],\n [[0, 1], [1, 0], [1, 1]],\n ]\n\n model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n # Seat remaining?\n @variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)\n # Action: accept offer, or don't accept offer\n # We are allowed to accept some of the seats offered but not others\n @variable(sp, accept_offer[1:N], Bin)\n @variable(sp, offers_made[1:N])\n # Balance on seats\n @constraint(\n sp,\n balance[i in 1:N],\n x[i].in - x[i].out == accept_offer[i]\n )\n @stageobjective(sp, sum(R[i, stage] * accept_offer[i] for i in 1:N))\n SDDP.parameterize(sp, offers[stage]) do o\n return JuMP.fix.(offers_made, o)\n end\n @constraint(sp, accept_offer .<= offers_made)\n end\n\n SDDP.train(model; duality_handler = SDDP.LagrangianDuality())\n @test SDDP.calculate_bound(model) ≈ 8.0\n return\nend\n\nstochastic_all_blacks()","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"EditURL = \"example_milk_producer.jl\"","category":"page"},{"location":"tutorial/example_milk_producer/#Example:-the-milk-producer","page":"Example: the milk producer","title":"Example: the milk producer","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The purpose of this tutorial is to demonstrate how to fit a Markovian policy graph to a univariate stochastic process.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"using SDDP\nimport HiGHS\nimport Plots","category":"page"},{"location":"tutorial/example_milk_producer/#Background","page":"Example: the milk producer","title":"Background","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"A company produces milk for sale on a spot market each month. The quantity of milk they produce is uncertain, and so too is the price on the spot market.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The company can store the milk, but, over time, some milk spoils and must be discarded. Eventually the milk expires and is worthless.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The spot price is determined by an auction system, and so varies from month to month, but demonstrates serial correlation. In each auction, there is sufficient demand that the milk producer finds a buyer for all their widgets, regardless of the quantity they supply. Furthermore, the spot price is independent of the milk producer (they are a small player in the market).","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The spot price is highly volatile, and is the result of a process that is out of the control of the company. To counteract their price risk, the company engages in a forward contracting programme.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The forward contracting programme is a deal for physical milk at a future date in time, up to four months in the future.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The futures price is the current spot price, plus some forward contango (the buyers gain certainty that they will receive the milk in the future).","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"In general, the milk company should forward contract (since they reduce their price risk), however they also have production risk. Therefore, it may be the case that they forward contract a fixed amount, but find that they do not produce enough milk to meet the fixed demand. They are then forced to buy additional milk on the spot market.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The goal of the milk company is to choose the extent to which they forward contract in order to maximise (risk-adjusted) revenues, whilst managing their production risk.","category":"page"},{"location":"tutorial/example_milk_producer/#A-stochastic-process-for-price","page":"Example: the milk producer","title":"A stochastic process for price","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"It is outside the scope of this tutorial, but assume that we have gone away and analysed historical data to fit a stochastic process to the sequence of monthly auction spot prices.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"One plausible model is a multiplicative auto-regressive model of order one, where the white noise term is modeled by a finite distribution of empirical residuals. We can simulate this stochastic process as follows:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"function simulator()\n residuals = [0.0987, 0.199, 0.303, 0.412, 0.530, 0.661, 0.814, 1.010, 1.290]\n residuals = 0.1 * vcat(-residuals, 0.0, residuals)\n scenario = zeros(12)\n y, μ, α = 4.5, 6.0, 0.05\n for t in 1:12\n y = exp((1 - α) * log(y) + α * log(μ) + rand(residuals))\n scenario[t] = clamp(y, 3.0, 9.0)\n end\n return scenario\nend\n\nsimulator()","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"It may be helpful to visualize a number of simulations of the price process:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"plot = Plots.plot(\n [simulator() for _ in 1:500];\n color = \"gray\",\n opacity = 0.2,\n legend = false,\n xlabel = \"Month\",\n ylabel = \"Price [\\$/kg]\",\n xlims = (1, 12),\n ylims = (3, 9),\n)","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The prices gradually revert to the mean of $6/kg, and there is high volatility.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"We can't incorporate this price process directly into SDDP.jl, but we can fit a SDDP.MarkovianGraph directly from the simulator:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"graph = SDDP.MarkovianGraph(simulator; budget = 60, scenarios = 10_000);\nnothing # hide","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Here budget is the number of nodes in the policy graph, and scenarios is the number of simulations to use when estimating the transition probabilities.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"The graph contains too many nodes to be show, but we can plot it:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"for ((t, price), edges) in graph.nodes\n for ((t′, price′), probability) in edges\n Plots.plot!(\n plot,\n [t, t′],\n [price, price′];\n color = \"red\",\n width = 3 * probability,\n )\n end\nend\n\nplot","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"That looks okay. Try changing budget and scenarios to see how different Markovian policy graphs can be created.","category":"page"},{"location":"tutorial/example_milk_producer/#Model","page":"Example: the milk producer","title":"Model","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Now that we have a Markovian graph, we can build the model. See if you can work out how we arrived at this formulation by reading the background description. Do all the variables and constraints make sense?","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"model = SDDP.PolicyGraph(\n graph;\n sense = :Max,\n upper_bound = 1e4,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n t, price = node\n c_contango = [1.0, 1.025, 1.05, 1.075]\n c_transaction, c_perish_factor, c_buy_premium = 0.01, 0.95, 1.5\n F, P = length(c_contango), 5\n @variable(sp, 0 <= x_forward[1:F], SDDP.State, initial_value = 0)\n @variable(sp, 0 <= x_stock[p = 1:P], SDDP.State, initial_value = 0)\n @variable(sp, 0 <= u_spot_sell[1:P])\n @variable(sp, 0 <= u_spot_buy)\n @variable(sp, 0 <= u_forward_deliver[1:P])\n @variable(sp, 0 <= u_forward_sell[1:F] <= 10)\n @variable(sp, 0 <= u_production)\n for f in 1:F\n if t + f >= 12\n fix(u_forward_sell[f], 0.0; force = true)\n end\n end\n @constraint(\n sp,\n [i = 1:F-1],\n x_forward[i].out == x_forward[i+1].in + u_forward_sell[i],\n )\n @constraint(sp, x_forward[F].out == u_forward_sell[F])\n @constraint(sp, x_forward[1].in == sum(u_forward_deliver))\n @constraint(\n sp,\n x_stock[1].out ==\n u_production - u_forward_deliver[1] - u_spot_sell[1] + u_spot_buy\n )\n @constraint(\n sp,\n [i = 2:P],\n x_stock[i].out ==\n c_perish_factor * x_stock[i-1].in - u_forward_deliver[i] -\n u_spot_sell[i]\n )\n Ω = [(price, (production = p,)) for p in range(0.1, 0.2; length = 5)]\n SDDP.parameterize(sp, Ω) do (price, ω)\n set_upper_bound(u_production, ω.production)\n @stageobjective(\n sp,\n price * sum(u_spot_sell) +\n price * sum(c_contango[f] * u_forward_sell[f] for f in 1:F) -\n price * c_buy_premium * u_spot_buy -\n c_transaction * sum(u_forward_sell)\n )\n end\nend","category":"page"},{"location":"tutorial/example_milk_producer/#Training-a-policy","page":"Example: the milk producer","title":"Training a policy","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Now we have a model, we train a policy. The SDDP.SimulatorSamplingScheme is used in the forward pass. It generates an out-of-sample sequence of prices using simulator and traverses the closest sequence of nodes in the policy graph. When calling SDDP.parameterize for each subproblem, it uses the new out-of-sample price instead of the price associated with the Markov node.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"SDDP.train(\n model;\n time_limit = 20,\n risk_measure = SDDP.EAVaR(; lambda = 0.5, beta = 0.25),\n sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),\n)","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"warning: Warning\nWe're intentionally terminating the training early so that the documentation doesn't take too long to build. If you run this example locally, increase the time limit.","category":"page"},{"location":"tutorial/example_milk_producer/#Simulating-the-policy","page":"Example: the milk producer","title":"Simulating the policy","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"When simulating the policy, we can also use the SDDP.SimulatorSamplingScheme.","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"simulations = SDDP.simulate(\n model,\n 200,\n Symbol[:x_forward, :x_stock, :u_spot_sell, :u_spot_buy];\n sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),\n);\nnothing # hide","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"To show how the sampling scheme uses the new out-of-sample price instead of the price associated with the Markov node, compare the index of the Markov state visited in stage 12 of the first simulation:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"simulations[1][12][:node_index]","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"to the realization of the noise (price, ω) passed to SDDP.parameterize:","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"simulations[1][12][:noise_term]","category":"page"},{"location":"tutorial/example_milk_producer/#Visualizing-the-policy","page":"Example: the milk producer","title":"Visualizing the policy","text":"","category":"section"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Finally, we can plot the policy to gain insight (although note that we terminated the training early, so we should run the re-train the policy for more iterations before making too many judgements).","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"Plots.plot(\n SDDP.publication_plot(simulations; title = \"sum(x_stock.out)\") do data\n return sum(y.out for y in data[:x_stock])\n end,\n SDDP.publication_plot(simulations; title = \"sum(x_forward.out)\") do data\n return sum(y.out for y in data[:x_forward])\n end,\n SDDP.publication_plot(simulations; title = \"u_spot_buy\") do data\n return data[:u_spot_buy]\n end,\n SDDP.publication_plot(simulations; title = \"sum(u_spot_sell)\") do data\n return sum(data[:u_spot_sell])\n end;\n layout = (2, 2),\n)","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"","category":"page"},{"location":"tutorial/example_milk_producer/","page":"Example: the milk producer","title":"Example: the milk producer","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"EditURL = \"FAST_production_management.jl\"","category":"page"},{"location":"examples/FAST_production_management/#FAST:-the-production-management-problem","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"","category":"section"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"An implementation of the Production Management example from FAST","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"using SDDP, HiGHS, Test\n\nfunction fast_production_management(; cut_type)\n DEMAND = [2, 10]\n H = 3\n N = 2\n C = [0.2, 0.7]\n S = 2 .+ [0.33, 0.54]\n model = SDDP.LinearPolicyGraph(;\n stages = H,\n lower_bound = -50.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, x[1:N] >= 0, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n s[i = 1:N] >= 0\n d\n end)\n @constraints(sp, begin\n [i = 1:N], s[i] <= x[i].in\n sum(s) <= d\n end)\n SDDP.parameterize(sp, t == 1 ? [0] : DEMAND) do ω\n return JuMP.fix(d, ω)\n end\n @stageobjective(sp, sum(C[i] * x[i].out for i in 1:N) - S's)\n end\n SDDP.train(model; cut_type = cut_type, print_level = 2, log_frequency = 5)\n @test SDDP.calculate_bound(model) ≈ -23.96 atol = 1e-2\nend\n\nfast_production_management(cut_type = SDDP.SINGLE_CUT)\nfast_production_management(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"EditURL = \"example_reservoir.jl\"","category":"page"},{"location":"tutorial/example_reservoir/#Example:-deterministic-to-stochastic","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"This tutorial was written by Oscar Dowson and Andy Philpott for the 2023 Winter School \"Planning under Uncertainty in Energy Markets,\" held March 26 to 31 in Geilo, Norway.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Download this tutorial as a notebook.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The purpose of this tutorial is to explain how we can go from a deterministic time-staged optimal control model in JuMP to a multistage stochastic optimization model in SDDP.jl. As a motivating problem, we consider the hydro-thermal problem with a single reservoir.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For variations on this problem, see the examples and tutorials:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"An introduction to SDDP.jl\nHydro-thermal scheduling\nHydro valleys\nInfinite horizon hydro-thermal","category":"page"},{"location":"tutorial/example_reservoir/#Packages","page":"Example: deterministic to stochastic","title":"Packages","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"This tutorial requires the following packages:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"using JuMP\nusing SDDP\nimport CSV\nimport DataFrames\nimport HiGHS\nimport Plots","category":"page"},{"location":"tutorial/example_reservoir/#Data","page":"Example: deterministic to stochastic","title":"Data","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The data for this tutorial is contained in the example_reservoir.csv file in the SDDP.jl repository. To run locally, download the CSV file, then change filename to point to the location where you downloaded it to.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"filename = joinpath(@__DIR__, \"example_reservoir.csv\")\ndata = CSV.read(filename, DataFrames.DataFrame)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"It's easier to visualize the data if we plot it:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Plots.plot(\n Plots.plot(data[!, :inflow], ylabel = \"Inflow\"),\n Plots.plot(data[!, :demand], ylabel = \"Demand\"),\n Plots.plot(data[!, :cost], ylabel = \"Cost\", xlabel = \"Week\");\n layout = (3, 1),\n legend = false,\n)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The number of weeks will be useful later:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"T = size(data, 1)","category":"page"},{"location":"tutorial/example_reservoir/#Deterministic-JuMP-model","page":"Example: deterministic to stochastic","title":"Deterministic JuMP model","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"To start, we construct a deterministic model in pure JuMP.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Create a JuMP model, using HiGHS as the optimizer:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"model = Model(HiGHS.Optimizer)\nset_silent(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"x[t]: the amount of water in the reservoir at the start of stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"reservoir_max = 320.0\n@variable(model, 0 <= x[1:T+1] <= reservoir_max)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"We need an initial condition for x[1]. Fix it to 300 units:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"reservoir_initial = 300\nfix(x[1], reservoir_initial; force = true)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"u[t]: the amount of water to flow through the turbine in stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"flow_max = 12\n@variable(model, 0 <= u[i = 1:T] <= flow_max)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"s[t]: the amount of water to spill from the reservoir in stage t, bypassing the turbine:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@variable(model, 0 <= s[1:T])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"r[t]: the amount of thermal generation in stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@variable(model, 0 <= r[1:T])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"i[t]: the amount of inflow to the reservoir in stage t:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@variable(model, i[1:T])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For this model, our inflow is fixed, so we fix it to the data we have:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"for t in 1:T\n fix(i[t], data[t, :inflow])\nend","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The water balance constraint says that the water in the reservoir at the start of stage t+1 is the water in the reservoir at the start of stage t, less the amount flowed through the turbine, u[t], less the amount spilled, s[t], plus the amount of inflow, i[t], into the reservoir:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@constraint(model, [t = 1:T], x[t+1] == x[t] - u[t] - s[t] + i[t])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"We also need a supply = demand constraint. In practice, the units of this would be in MWh, and there would be a conversion factor between the amount of water flowing through the turbine and the power output. To simplify, we assume that power and water have the same units, so that one \"unit\" of demand is equal to one \"unit\" of the reservoir x[t]:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@constraint(model, [t = 1:T], u[t] + r[t] == data[t, :demand])","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Our objective is to minimize the cost of thermal generation:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"@objective(model, Min, sum(data[t, :cost] * r[t] for t in 1:T))","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Let's optimize and check the solution","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"optimize!(model)\nsolution_summary(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The total cost is:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"objective_value(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Here's a plot of demand and generation:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Plots.plot(data[!, :demand]; label = \"Demand\", xlabel = \"Week\")\nPlots.plot!(value.(r), label = \"Thermal\")\nPlots.plot!(value.(u), label = \"Hydro\")","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"And here's the storage over time:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Plots.plot(value.(x); label = \"Storage\", xlabel = \"Week\")","category":"page"},{"location":"tutorial/example_reservoir/#Deterministic-SDDP-model","page":"Example: deterministic to stochastic","title":"Deterministic SDDP model","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For the next step, we show how to decompose our JuMP model into SDDP.jl. It should obtain the same solution!","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(\n subproblem,\n 0 <= x <= reservoir_max,\n SDDP.State,\n initial_value = reservoir_initial,\n )\n @variable(subproblem, 0 <= u <= flow_max)\n @variable(subproblem, 0 <= r)\n @variable(subproblem, 0 <= s)\n @variable(subproblem, i)\n fix(i, data[t, :inflow])\n @constraint(subproblem, x.out == x.in - u - s + i)\n @constraint(subproblem, u + r == data[t, :demand])\n @stageobjective(subproblem, data[t, :cost] * r)\nend","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Can you see how the JuMP model maps to this syntax? We have created a SDDP.LinearPolicyGraph with T stages, we're minimizing, and we're using HiGHS.Optimizer as the optimizer.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"A few bits might be non-obvious:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"We need to provide a lower bound for the objective function. Since our costs are always positive, a valid lower bound for the total cost is 0.0.\nWe define x as a state variable using SDDP.State. A state variable is any variable that flows through time, and for which we need to know the value of it in stage t-1 to compute the best action in stage t. The state variable x is actually two decision variables, x.in and x.out, which represent x[t] and x[t+1] respectively.\nWe need to use @stageobjective instead of @objective.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Instead of calling JuMP.optimize!, SDDP.jl uses a train method. With our machine learning hat on, you can think of SDDP.jl as training a function for each stage that accepts the current reservoir state as input and returns the optimal actions as output. It is also an iterative algorithm, so we need to specify when it should terminate:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"SDDP.train(model; iteration_limit = 10)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"As a quick sanity check, did we get the same cost as our JuMP model?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"SDDP.calculate_bound(model)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"That's good. Next, to check the value of the decision variables. This isn't as straight forward as our JuMP model. Instead, we need to simulate the policy, and then extract the values of the decision variables from the results of the simulation.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Since our model is deterministic, we need only 1 replication of the simulation, and we want to record the values of the x, u, and r variables:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"n_replications = 1\nsimulations = SDDP.simulate(model, n_replications, [:x, :u, :r]);\n","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The simulations vector is too big to show. But it contains one element for each replication, and each replication contains one dictionary for each stage.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"For example, the data corresponding to the tenth stage in the first replication is:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"simulations[1][10]","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Let's grab the trace of the r and u variables in the first replication, and then plot them:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"r_sim = [sim[:r] for sim in simulations[1]]\nu_sim = [sim[:u] for sim in simulations[1]]\n\nPlots.plot(data[!, :demand]; label = \"Demand\", xlabel = \"Week\")\nPlots.plot!(r_sim, label = \"Thermal\")\nPlots.plot!(u_sim, label = \"Hydro\")","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Perfect. That's the same as we got before.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Now let's look at x. This is a little more complicated, because we need to grab the outgoing value of the state variable in each stage:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"x_sim = [sim[:x].out for sim in simulations[1]]\n\nPlots.plot(x_sim; label = \"Storage\", xlabel = \"Week\")","category":"page"},{"location":"tutorial/example_reservoir/#Stochastic-SDDP-model","page":"Example: deterministic to stochastic","title":"Stochastic SDDP model","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Now we add some randomness to our model. In each stage, we assume that the inflow could be: 2 units lower, with 30% probability; the same as before, with 40% probability; or 5 units higher, with 30% probability.","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(\n sp,\n 0 <= x <= reservoir_max,\n SDDP.State,\n initial_value = reservoir_initial,\n )\n @variable(sp, 0 <= u <= flow_max)\n @variable(sp, 0 <= r)\n @variable(sp, 0 <= s)\n @variable(sp, i)\n # <--- This bit is new\n Ω = [data[t, :inflow] - 2, data[t, :inflow], data[t, :inflow] + 5]\n P = [0.3, 0.4, 0.3]\n SDDP.parameterize(sp, Ω, P) do ω\n fix(i, ω)\n return\n end\n # --->\n @constraint(sp, x.out == x.in - u - s + i)\n @constraint(sp, u + r == data[t, :demand])\n @stageobjective(sp, data[t, :cost] * r)\n return\nend","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Can you see the differences?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Let's train our new model. We need more iterations because of the stochasticity:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"SDDP.train(model; iteration_limit = 100)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Now simulate the policy. This time we do 100 replications because the policy is now stochastic instead of deterministic:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"n_replications = 100\nsimulations = SDDP.simulate(model, n_replications, [:x, :u, :r, :i]);\n","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"And let's plot the use of thermal generation in each replication:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"plot = Plots.plot(data[!, :demand]; label = \"Demand\", xlabel = \"Week\")\nfor simulation in simulations\n Plots.plot!(plot, [sim[:r] for sim in simulation], label = \"\")\nend\nplot","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Viewing an interpreting static plots like this is difficult, particularly as the number of simulations grows. SDDP.jl includes an interactive SpaghettiPlot that makes things easier:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"plot = SDDP.SpaghettiPlot(simulations)\nSDDP.add_spaghetti(plot; title = \"Storage\") do sim\n return sim[:x].out\nend\nSDDP.add_spaghetti(plot; title = \"Hydro\") do sim\n return sim[:u]\nend\nSDDP.add_spaghetti(plot; title = \"Inflow\") do sim\n return sim[:i]\nend\nSDDP.plot(\n plot,\n \"spaghetti_plot.html\";\n # We need this to build the documentation. Set to true if running locally.\n open = false,\n)","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"info: Info\nIf you have trouble viewing the plot, you can open it in a new window.","category":"page"},{"location":"tutorial/example_reservoir/#Next-steps","page":"Example: deterministic to stochastic","title":"Next steps","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Take this model and modify it following the suggestions below. The SDDP.jl documentation has a range of similar examples and hints for how to achieve them.","category":"page"},{"location":"tutorial/example_reservoir/#Terminal-value-functions","page":"Example: deterministic to stochastic","title":"Terminal value functions","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The model ends with an empty reservoir. That isn't ideal for the following year. Can you modify the objective in the final stage to encourage ending the year with a full reservoir?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"You might write some variation of:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"if t == 52\n @variable(sp, terminal_cost_function >= 0)\n @constraint(sp, terminal_cost_function >= reservoir_initial - x.out)\n @stageobjective(sp, data[t, :cost] * r + terminal_cost_function)\nelse\n @stageobjective(sp, data[t, :cost] * r)\nend","category":"page"},{"location":"tutorial/example_reservoir/#Higher-fidelity-modeling","page":"Example: deterministic to stochastic","title":"Higher fidelity modeling","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Our model is very basic. There are many aspects that we could improve:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Instead of hard-coding a terminal value function, can you solve an infinite horizon model? What are the differences?\nCan you add a second reservoir to make a river chain?\nCan you modify the problem and data to use proper units, including a conversion between the volume of water flowing through the turbine and the electrical power output?\nCan you add random demand or cost data as well as inflows?","category":"page"},{"location":"tutorial/example_reservoir/#Algorithmic-considerations","page":"Example: deterministic to stochastic","title":"Algorithmic considerations","text":"","category":"section"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"The algorithm implemented by SDDP.jl has a number of tuneable parameters:","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"Try using a different lower bound. What happens if it is too low, or too high?\nWas our stopping rule correct? What happens if we use fewer or more iterations? What other stopping rules could you try?\nCan you add a risk measure to make the policy risk-averse?","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"","category":"page"},{"location":"tutorial/example_reservoir/","page":"Example: deterministic to stochastic","title":"Example: deterministic to stochastic","text":"This page was generated using Literate.jl.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"CurrentModule = SDDP","category":"page"},{"location":"changelog/#Release-notes","page":"Release notes","title":"Release notes","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.","category":"page"},{"location":"changelog/#v1.6.2-(August-24,-2023)","page":"Release notes","title":"v1.6.2 (August 24, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"MSPFormat now detect and exploit stagewise independent lattices (#653)\nFixed set_optimizer for models read from file (#654)","category":"page"},{"location":"changelog/#Other","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed typo in pglib_opf.jl (#647)\nFixed documentation build and added color (#652)","category":"page"},{"location":"changelog/#v1.6.1-(July-20,-2023)","page":"Release notes","title":"v1.6.1 (July 20, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed-2","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed bugs in MSPFormat reader (#638) (#639)","category":"page"},{"location":"changelog/#Other-2","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Clarified OutOfSampleMonteCarlo docstring (#643)","category":"page"},{"location":"changelog/#v1.6.0-(July-3,-2023)","page":"Release notes","title":"v1.6.0 (July 3, 2023)","text":"","category":"section"},{"location":"changelog/#Added","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added RegularizedForwardPass (#624)\nAdded FirstStageStoppingRule (#634)","category":"page"},{"location":"changelog/#Other-3","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Removed an unbound type parameter (#632)\nFixed typo in docstring (#633)\nAdded Here-and-now and hazard-decision tutorial (#635)","category":"page"},{"location":"changelog/#v1.5.1-(June-30,-2023)","page":"Release notes","title":"v1.5.1 (June 30, 2023)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a \"good\" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.","category":"page"},{"location":"changelog/#Other-4","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed various typos in the documentation (#617)\nFixed printing test after changes in JuMP (#618)\nSet SimulationStoppingRule as the default stopping rule (#619)\nChanged the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)\nAdded example usage with Distributions.jl (@slwu89) (#622)\nRemoved the numerical issue @warn (#627)\nImproved the quality of docstrings (#630)","category":"page"},{"location":"changelog/#v1.5.0-(May-14,-2023)","page":"Release notes","title":"v1.5.0 (May 14, 2023)","text":"","category":"section"},{"location":"changelog/#Added-2","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)","category":"page"},{"location":"changelog/#Other-5","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated missing changelog entries (#608)\nRemoved global variables (#610)\nConverted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)\nFixed some typos (#613)","category":"page"},{"location":"changelog/#v1.4.0-(May-8,-2023)","page":"Release notes","title":"v1.4.0 (May 8, 2023)","text":"","category":"section"},{"location":"changelog/#Added-3","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulationStoppingRule (#598)\nAdded sampling_scheme argument to SDDP.write_to_file (#607)","category":"page"},{"location":"changelog/#Fixed-3","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed parsing of some MSPFormat files (#602) (#604)\nFixed printing in header (#605)","category":"page"},{"location":"changelog/#v1.3.0-(May-3,-2023)","page":"Release notes","title":"v1.3.0 (May 3, 2023)","text":"","category":"section"},{"location":"changelog/#Added-4","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added experimental support for SDDP.MSPFormat.read_from_file (#593)","category":"page"},{"location":"changelog/#Other-6","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated to StochOptFormat v0.3 (#600)","category":"page"},{"location":"changelog/#v1.2.1-(May-1,-2023)","page":"Release notes","title":"v1.2.1 (May 1, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed-4","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed log_every_seconds (#597)","category":"page"},{"location":"changelog/#v1.2.0-(May-1,-2023)","page":"Release notes","title":"v1.2.0 (May 1, 2023)","text":"","category":"section"},{"location":"changelog/#Added-5","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulatorSamplingScheme (#594)\nAdded log_every_seconds argument to SDDP.train (#595)","category":"page"},{"location":"changelog/#Other-7","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Tweaked how the log is printed (#588)\nUpdated to StochOptFormat v0.2 (#592)","category":"page"},{"location":"changelog/#v1.1.4-(April-10,-2023)","page":"Release notes","title":"v1.1.4 (April 10, 2023)","text":"","category":"section"},{"location":"changelog/#Fixed-5","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Logs are now flushed every iteration (#584)","category":"page"},{"location":"changelog/#Other-8","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added docstrings to various functions (#581)\nMinor documentation updates (#580)\nClarified integrality documentation (#582)\nUpdated the README (#585)\nNumber of numerical issues is now printed to the log (#586)","category":"page"},{"location":"changelog/#v1.1.3-(April-2,-2023)","page":"Release notes","title":"v1.1.3 (April 2, 2023)","text":"","category":"section"},{"location":"changelog/#Other-9","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed typo in Example: deterministic to stochastic tutorial (#578)\nFixed typo in documentation of SDDP.simulate (#577)","category":"page"},{"location":"changelog/#v1.1.2-(March-18,-2023)","page":"Release notes","title":"v1.1.2 (March 18, 2023)","text":"","category":"section"},{"location":"changelog/#Other-10","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added Example: deterministic to stochastic tutorial (#572)","category":"page"},{"location":"changelog/#v1.1.1-(March-16,-2023)","page":"Release notes","title":"v1.1.1 (March 16, 2023)","text":"","category":"section"},{"location":"changelog/#Other-11","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed email in Project.toml\nAdded notebook to documentation tutorials (#571)","category":"page"},{"location":"changelog/#v1.1.0-(January-12,-2023)","page":"Release notes","title":"v1.1.0 (January 12, 2023)","text":"","category":"section"},{"location":"changelog/#Added-6","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added the node_name_parser argument to SDDP.write_cuts_to_file and added the option to skip nodes in SDDP.read_cuts_from_file (#565)","category":"page"},{"location":"changelog/#v1.0.0-(January-3,-2023)","page":"Release notes","title":"v1.0.0 (January 3, 2023)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Although we're bumping MAJOR version, this is a non-breaking release. Going forward:","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"New features will bump the MINOR version\nBug fixes, maintenance, and documentation updates will bump the PATCH version\nWe will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version\nUpdates to the compat bounds of package dependencies will bump the PATCH version.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.","category":"page"},{"location":"changelog/#Added-7","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added num_nodes argument to SDDP.UnicyclicGraph (#562)\nAdded support for passing an optimizer to SDDP.Asynchronous (#545)","category":"page"},{"location":"changelog/#Other-12","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated Plotting tools to use live plots (#563)\nAdded vale as a linter (#565)\nImproved documentation for initializing a parallel scheme (#566)","category":"page"},{"location":"changelog/#v0.4.9-(January-3,-2023)","page":"Release notes","title":"v0.4.9 (January 3, 2023)","text":"","category":"section"},{"location":"changelog/#Added-8","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.UnicyclicGraph (#556)","category":"page"},{"location":"changelog/#Other-13","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added tutorial on Markov Decision Processes (#556)\nAdded two-stage newsvendor tutorial (#557)\nRefactored the layout of the documentation (#554) (#555)\nUpdated copyright to 2023 (#558)\nFixed errors in the documentation (#561)","category":"page"},{"location":"changelog/#v0.4.8-(December-19,-2022)","page":"Release notes","title":"v0.4.8 (December 19, 2022)","text":"","category":"section"},{"location":"changelog/#Added-9","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added terminate_on_cycle option to SDDP.Historical (#549)\nAdded include_last_node option to SDDP.DefaultForwardPass (#547)","category":"page"},{"location":"changelog/#Fixed-6","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)","category":"page"},{"location":"changelog/#v0.4.7-(December-17,-2022)","page":"Release notes","title":"v0.4.7 (December 17, 2022)","text":"","category":"section"},{"location":"changelog/#Added-10","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)","category":"page"},{"location":"changelog/#Fixed-7","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Rethrow InterruptException when solver is interrupted (#534)\nFixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)\nFixed re-using the dashboard = true option between solves (#538)\nFixed bug when no @stageobjective is set (now defaults to 0.0) (#539)\nFixed errors thrown when invalid inputs are provided to add_objective_state (#540)","category":"page"},{"location":"changelog/#Other-14","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Drop support for Julia versions prior to 1.6 (#533)\nUpdated versions of dependencies (#522) (#533)\nSwitched to HiGHS in the documentation and tests (#533)\nAdded license headers (#519)\nFixed link in air conditioning example (#521) (Thanks @conema)\nClarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)\nAdded this change log (#536)\nCuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)","category":"page"},{"location":"changelog/#v0.4.6-(March-25,-2022)","page":"Release notes","title":"v0.4.6 (March 25, 2022)","text":"","category":"section"},{"location":"changelog/#Other-15","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Updated to JuMP v1.0 (#517)","category":"page"},{"location":"changelog/#v0.4.5-(March-9,-2022)","page":"Release notes","title":"v0.4.5 (March 9, 2022)","text":"","category":"section"},{"location":"changelog/#Fixed-8","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed issue with set_silent in a subproblem (#510)","category":"page"},{"location":"changelog/#Other-16","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)\nUpdate to JuMP v0.23 (#514)\nAdded auto-regressive tutorial (#507)","category":"page"},{"location":"changelog/#v0.4.4-(December-11,-2021)","page":"Release notes","title":"v0.4.4 (December 11, 2021)","text":"","category":"section"},{"location":"changelog/#Added-11","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added BanditDuality (#471)\nAdded benchmark scripts (#475) (#476) (#490)\nwrite_cuts_to_file now saves visited states (#468)","category":"page"},{"location":"changelog/#Fixed-9","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed BoundStalling in a deterministic policy (#470) (#474)\nFixed magnitude warning with zero coefficients (#483)","category":"page"},{"location":"changelog/#Other-17","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improvements to LagrangianDuality (#481) (#482) (#487)\nImprovements to StrengthenedConicDuality (#486)\nSwitch to functional form for the tests (#478)\nFixed typos (#472) (Thanks @vfdev-5)\nUpdate to JuMP v0.22 (#498)","category":"page"},{"location":"changelog/#v0.4.3-(August-31,-2021)","page":"Release notes","title":"v0.4.3 (August 31, 2021)","text":"","category":"section"},{"location":"changelog/#Added-12","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added biobjective solver (#462)\nAdded forward_pass_callback (#466)","category":"page"},{"location":"changelog/#Other-18","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update tutorials and documentation (#459) (#465)\nOrganize how paper materials are stored (#464)","category":"page"},{"location":"changelog/#v0.4.2-(August-24,-2021)","page":"Release notes","title":"v0.4.2 (August 24, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-10","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed a bug in Lagrangian duality (#457)","category":"page"},{"location":"changelog/#v0.4.1-(August-23,-2021)","page":"Release notes","title":"v0.4.1 (August 23, 2021)","text":"","category":"section"},{"location":"changelog/#Other-19","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Minor changes to our implementation of LagrangianDuality (#454) (#455)","category":"page"},{"location":"changelog/#v0.4.0-(August-17,-2021)","page":"Release notes","title":"v0.4.0 (August 17, 2021)","text":"","category":"section"},{"location":"changelog/#Breaking","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#499) (#453)","category":"page"},{"location":"changelog/#Other-20","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#447) (#448) (#450)","category":"page"},{"location":"changelog/#v0.3.17-(July-6,-2021)","page":"Release notes","title":"v0.3.17 (July 6, 2021)","text":"","category":"section"},{"location":"changelog/#Added-13","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.PSRSamplingScheme (#426)","category":"page"},{"location":"changelog/#Other-21","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Display more model attributes (#438)\nDocumentation improvements (#433) (#437) (#439)","category":"page"},{"location":"changelog/#v0.3.16-(June-17,-2021)","page":"Release notes","title":"v0.3.16 (June 17, 2021)","text":"","category":"section"},{"location":"changelog/#Added-14","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.RiskAdjustedForwardPass (#413)\nAllow SDDP.Historical to sample sequentially (#420)","category":"page"},{"location":"changelog/#Other-22","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update risk measure docstrings (#418)","category":"page"},{"location":"changelog/#v0.3.15-(June-1,-2021)","page":"Release notes","title":"v0.3.15 (June 1, 2021)","text":"","category":"section"},{"location":"changelog/#Added-15","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added SDDP.StoppingChain","category":"page"},{"location":"changelog/#Fixed-11","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed scoping bug in SDDP.@stageobjective (#407)\nFixed a bug when the initial point is infeasible (#411)\nSet subproblems to silent by default (#409)","category":"page"},{"location":"changelog/#Other-23","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Add JuliaFormatter (#412)\nDocumentation improvements (#406) (#408)","category":"page"},{"location":"changelog/#v0.3.14-(March-30,-2021)","page":"Release notes","title":"v0.3.14 (March 30, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-12","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed O(N^2) behavior in get_same_children (#393)","category":"page"},{"location":"changelog/#v0.3.13-(March-27,-2021)","page":"Release notes","title":"v0.3.13 (March 27, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-13","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed bug in print.jl\nFixed compat of Reexport (#388)","category":"page"},{"location":"changelog/#v0.3.12-(March-22,-2021)","page":"Release notes","title":"v0.3.12 (March 22, 2021)","text":"","category":"section"},{"location":"changelog/#Added-16","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added problem statistics to header (#385) (#386)","category":"page"},{"location":"changelog/#Fixed-14","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed subtypes in visualization (#384)","category":"page"},{"location":"changelog/#v0.3.11-(March-22,-2021)","page":"Release notes","title":"v0.3.11 (March 22, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-15","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed constructor in direct mode (#383)","category":"page"},{"location":"changelog/#Other-24","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fix documentation (#379)","category":"page"},{"location":"changelog/#v0.3.10-(February-23,-2021)","page":"Release notes","title":"v0.3.10 (February 23, 2021)","text":"","category":"section"},{"location":"changelog/#Fixed-16","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed seriescolor in publication plot (#376)","category":"page"},{"location":"changelog/#v0.3.9-(February-20,-2021)","page":"Release notes","title":"v0.3.9 (February 20, 2021)","text":"","category":"section"},{"location":"changelog/#Added-17","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Add option to simulate with different incoming state (#372)\nAdded warning for cuts with high dynamic range (#373)","category":"page"},{"location":"changelog/#Fixed-17","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed seriesalpha in publication plot (#375)","category":"page"},{"location":"changelog/#v0.3.8-(January-19,-2021)","page":"Release notes","title":"v0.3.8 (January 19, 2021)","text":"","category":"section"},{"location":"changelog/#Other-25","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#367) (#369) (#370)","category":"page"},{"location":"changelog/#v0.3.7-(January-8,-2021)","page":"Release notes","title":"v0.3.7 (January 8, 2021)","text":"","category":"section"},{"location":"changelog/#Other-26","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#362) (#363) (#365) (#366)\nBump copyright (#364)","category":"page"},{"location":"changelog/#v0.3.6-(December-17,-2020)","page":"Release notes","title":"v0.3.6 (December 17, 2020)","text":"","category":"section"},{"location":"changelog/#Other-27","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fix typos (#358)\nCollapse navigation bar in docs (#359)\nUpdate TagBot.yml (#361)","category":"page"},{"location":"changelog/#v0.3.5-(November-18,-2020)","page":"Release notes","title":"v0.3.5 (November 18, 2020)","text":"","category":"section"},{"location":"changelog/#Other-28","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update citations (#348)\nSwitch to GitHub actions (#355)","category":"page"},{"location":"changelog/#v0.3.4-(August-25,-2020)","page":"Release notes","title":"v0.3.4 (August 25, 2020)","text":"","category":"section"},{"location":"changelog/#Added-18","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added non-uniform distributionally robust risk measure (#328)\nAdded numerical recovery functions (#330)\nAdded experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)\nAdded entropic risk measure (#347)","category":"page"},{"location":"changelog/#Other-29","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#327) (#333) (#339) (#340)","category":"page"},{"location":"changelog/#v0.3.3-(June-19,-2020)","page":"Release notes","title":"v0.3.3 (June 19, 2020)","text":"","category":"section"},{"location":"changelog/#Added-19","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added asynchronous support for price and belief states (#325)\nAdded ForwardPass plug-in system (#320)","category":"page"},{"location":"changelog/#Fixed-18","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fix check for probabilities in Markovian graph (#322)","category":"page"},{"location":"changelog/#v0.3.2-(April-6,-2020)","page":"Release notes","title":"v0.3.2 (April 6, 2020)","text":"","category":"section"},{"location":"changelog/#Added-20","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added log_frequency argument to SDDP.train (#307)","category":"page"},{"location":"changelog/#Other-30","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improve error message in deterministic equivalent (#312)\nUpdate to RecipesBase 1.0 (#313)","category":"page"},{"location":"changelog/#v0.3.1-(February-26,-2020)","page":"Release notes","title":"v0.3.1 (February 26, 2020)","text":"","category":"section"},{"location":"changelog/#Fixed-19","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed filename in integrality_handlers.jl (#304)","category":"page"},{"location":"changelog/#v0.3.0-(February-20,-2020)","page":"Release notes","title":"v0.3.0 (February 20, 2020)","text":"","category":"section"},{"location":"changelog/#Breaking-2","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Breaking changes to update to JuMP v0.21 (#300).","category":"page"},{"location":"changelog/#v0.2.4-(February-7,-2020)","page":"Release notes","title":"v0.2.4 (February 7, 2020)","text":"","category":"section"},{"location":"changelog/#Added-21","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added a counter for the number of total subproblem solves (#301)","category":"page"},{"location":"changelog/#Other-31","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Update formatter (#298)\nAdded tests (#299)","category":"page"},{"location":"changelog/#v0.2.3-(January-24,-2020)","page":"Release notes","title":"v0.2.3 (January 24, 2020)","text":"","category":"section"},{"location":"changelog/#Added-22","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added support for convex risk measures (#294)","category":"page"},{"location":"changelog/#Fixed-20","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed bug when subproblem is infeasible (#296)\nFixed bug in deterministic equivalent (#297)","category":"page"},{"location":"changelog/#Other-32","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added example from IJOC paper (#293)","category":"page"},{"location":"changelog/#v0.2.2-(January-10,-2020)","page":"Release notes","title":"v0.2.2 (January 10, 2020)","text":"","category":"section"},{"location":"changelog/#Fixed-21","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Fixed flakey time limit in tests (#291)","category":"page"},{"location":"changelog/#Other-33","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Removed MathOptFormat.jl (#289)\nUpdate copyright (#290)","category":"page"},{"location":"changelog/#v0.2.1-(December-19,-2019)","page":"Release notes","title":"v0.2.1 (December 19, 2019)","text":"","category":"section"},{"location":"changelog/#Added-23","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added support for approximating a Markov lattice (#282) (#285)\nAdd tools for visualizing the value function (#272) (#286)\nWrite .mof.json files on error (#284)","category":"page"},{"location":"changelog/#Other-34","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improve documentation (#281) (#283)\nUpdate tests for Julia 1.3 (#287)","category":"page"},{"location":"changelog/#v0.2.0-(December-16,-2019)","page":"Release notes","title":"v0.2.0 (December 16, 2019)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.","category":"page"},{"location":"changelog/#Added-24","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Added asynchronous parallel implementation (#277)\nAdded roll-out algorithm for cyclic graphs (#279)","category":"page"},{"location":"changelog/#Other-35","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Improved error messages in PolicyGraph (#271)\nAdded JuliaFormatter (#273) (#276)\nFixed compat bounds (#274) (#278)\nAdded documentation for simulating non-standard graphs (#280)","category":"page"},{"location":"changelog/#v0.1.0-(October-17,-2019)","page":"Release notes","title":"v0.1.0 (October 17, 2019)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.","category":"page"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.","category":"page"},{"location":"changelog/#v0.0.1-(April-18,-2018)","page":"Release notes","title":"v0.0.1 (April 18, 2018)","text":"","category":"section"},{"location":"changelog/","page":"Release notes","title":"Release notes","text":"Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"EditURL = \"example_newsvendor.jl\"","category":"page"},{"location":"tutorial/example_newsvendor/#Example:-Two-stage-Newsvendor","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"","category":"section"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"This example is based on the classical newsvendor problem.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"using SDDP\nimport HiGHS\nimport Plots","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"First, we need some discretized distribution of demand. For simplicity, we're going to sample 10 points from the uniform [0, 1) distribution three times and add them together. This is a rough approximation of a normal distribution.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Ω = rand(10) .+ rand(10) .+ rand(10)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"We also need some price data. We assume the agent can buy a newspaper for $1 and sell it for $1.20.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"buy_price, sales_price = 1.0, 1.2","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Now we can formulate and train a policy for the two-stage newsvendor problem:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"model = SDDP.LinearPolicyGraph(\n stages = 2,\n sense = :Max,\n upper_bound = maximum(Ω) * sales_price,\n optimizer = HiGHS.Optimizer,\n) do subproblem, stage\n @variable(subproblem, inventory >= 0, SDDP.State, initial_value = 0)\n if stage == 1\n @variable(subproblem, buy >= 0)\n @constraint(subproblem, inventory.out == inventory.in + buy)\n @stageobjective(subproblem, -buy_price * buy)\n else\n @variable(subproblem, sell >= 0)\n @constraint(subproblem, sell <= inventory.in)\n SDDP.parameterize(subproblem, Ω) do ω\n return JuMP.set_upper_bound(sell, ω)\n end\n @stageobjective(subproblem, sales_price * sell)\n end\nend\n\nSDDP.train(model)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"To check the first-stage buy decision, we need to obtain a decision rule for the first-stage node 1:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"first_stage_rule = SDDP.DecisionRule(model, node = 1)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Then we can evaluate it, passing in a starting point for the incoming state:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solution = SDDP.evaluate(\n first_stage_rule;\n incoming_state = Dict(:inventory => 0.0),\n controls_to_record = [:buy],\n)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"The optimal value of the buy variable is stored here:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solution.controls[:buy]","category":"page"},{"location":"tutorial/example_newsvendor/#Introducing-risk","page":"Example: Two-stage Newsvendor","title":"Introducing risk","text":"","category":"section"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"The solution of a single newsvendor problem offers little insight about how a decision-maker should act. In particular, they may be averse to bad outcomes, such as when they purchase a larger number of newspapers only for there to be little demand.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"We can explore how the optimal decision changes with risk by creating a function:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"function solve_risk_averse_newsvendor(Ω, risk_measure)\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n sense = :Max,\n upper_bound = maximum(Ω) * sales_price,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n @variable(subproblem, inventory >= 0, SDDP.State, initial_value = 0)\n if stage == 1\n @variable(subproblem, buy >= 0)\n @constraint(subproblem, inventory.out == inventory.in + buy)\n @stageobjective(subproblem, -buy_price * buy)\n else\n @variable(subproblem, sell >= 0)\n @constraint(subproblem, sell <= inventory.in)\n SDDP.parameterize(subproblem, Ω) do ω\n return JuMP.set_upper_bound(sell, ω)\n end\n @stageobjective(subproblem, sales_price * sell)\n end\n end\n SDDP.train(model; risk_measure = risk_measure, print_level = 0)\n first_stage_rule = SDDP.DecisionRule(model, node = 1)\n solution = SDDP.evaluate(\n first_stage_rule;\n incoming_state = Dict(:inventory => 0.0),\n controls_to_record = [:buy],\n )\n return solution.controls[:buy]\nend","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Now we can see how many units a risk-neutral decision maker would order:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solve_risk_averse_newsvendor(Ω, SDDP.Expectation())","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"as well as a decision-maker who cares only about the worst-case outcome:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"solve_risk_averse_newsvendor(Ω, SDDP.WorstCase())","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"In general, the decision-maker will be somewhere between the two extremes. The SDDP.Entropic risk measure is a risk measure that has a single parameter that lets us explore the space of policies between the two extremes. When the parameter is small, the measure acts like SDDP.Expectation, and when it is large, it acts like SDDP.WorstCase.","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Here is what we get if we solve our problem multiple times for different values of the risk aversion parameter gamma:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Γ = [10^i for i in -3:0.2:3]\nbuy = [solve_risk_averse_newsvendor(Ω, SDDP.Entropic(γ)) for γ in Γ]\nPlots.plot(\n Γ,\n buy;\n xaxis = :log,\n xlabel = \"Risk aversion parameter γ\",\n ylabel = \"First-stage buy decision\",\n legend = false,\n)","category":"page"},{"location":"tutorial/example_newsvendor/#Things-to-try","page":"Example: Two-stage Newsvendor","title":"Things to try","text":"","category":"section"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"There are a number of things you can try next:","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"Experiment with different buy and sales prices\nExperiment with different distributions of demand\nExplore how the optimal policy changes if you use a different risk measure\nWhat happens if you can only buy and sell integer numbers of newspapers? Try this by adding Int to the variable definitions: @variable(subproblem, buy >= 0, Int)","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"","category":"page"},{"location":"tutorial/example_newsvendor/","page":"Example: Two-stage Newsvendor","title":"Example: Two-stage Newsvendor","text":"This page was generated using Literate.jl.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"EditURL = \"theory_intro.jl\"","category":"page"},{"location":"explanation/theory_intro/#Introductory-theory","page":"Introductory theory","title":"Introductory theory","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"(Image: ) (Image: )","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThis tutorial is aimed at advanced undergraduates or early-stage graduate students. You don't need prior exposure to stochastic programming! (Indeed, it may be better if you don't, because our approach is non-standard in the literature.)This tutorial is also a living document. If parts are unclear, please open an issue so it can be improved!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This tutorial will teach you how the stochastic dual dynamic programming algorithm works by implementing a simplified version of the algorithm.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Our implementation is very much a \"vanilla\" version of SDDP; it doesn't have (m)any fancy computational tricks (e.g., the ones included in SDDP.jl) that you need to code a performant or stable version that will work on realistic instances. However, our simplified implementation will work on arbitrary policy graphs, including those with cycles such as infinite horizon problems!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Packages","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This tutorial uses the following packages. For clarity, we call import PackageName so that we must prefix PackageName. to all functions and structs provided by that package. Everything not prefixed is either part of base Julia, or we wrote it.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"import ForwardDiff\nimport HiGHS\nimport JuMP\nimport Statistics","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"tip: Tip\nYou can follow along by installing the above packages, and copy-pasting the code we will write into a Julia REPL. Alternatively, you can download the Julia .jl file which created this tutorial from GitHub.","category":"page"},{"location":"explanation/theory_intro/#Preliminaries:-background-theory","page":"Introductory theory","title":"Preliminaries: background theory","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Start this tutorial by reading An introduction to SDDP.jl, which introduces the necessary notation and vocabulary that we need for this tutorial.","category":"page"},{"location":"explanation/theory_intro/#Preliminaries:-Kelley's-cutting-plane-algorithm","page":"Introductory theory","title":"Preliminaries: Kelley's cutting plane algorithm","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Kelley's cutting plane algorithm is an iterative method for minimizing convex functions. Given a convex function f(x), Kelley's constructs an under-approximation of the function at the minimum by a set of first-order Taylor series approximations (called cuts) constructed at a set of points k = 1ldotsK:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"beginaligned\nf^K = minlimits_theta in mathbbR x in mathbbR^N theta\n theta ge f(x_k) + fracddxf(x_k)^top (x - x_k)quad k=1ldotsK\n theta ge M\nendaligned","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"where M is a sufficiently large negative number that is a lower bound for f over the domain of x.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Kelley's cutting plane algorithm is a structured way of choosing points x_k to visit, so that as more cuts are added:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"lim_K rightarrow infty f^K = minlimits_x in mathbbR^N f(x)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"However, before we introduce the algorithm, we need to introduce some bounds.","category":"page"},{"location":"explanation/theory_intro/#Bounds","page":"Introductory theory","title":"Bounds","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"By convexity, f^K le f(x) for all x. Thus, if x^* is a minimizer of f, then at any point in time we can construct a lower bound for f(x^*) by solving f^K.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Moreover, we can use the primal solutions x_k^* returned by solving f^k to evaluate f(x_k^*) to generate an upper bound.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Therefore, f^K le f(x^*) le minlimits_k=1ldotsK f(x_k^*).","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"When the lower bound is sufficiently close to the upper bound, we can terminate the algorithm and declare that we have found an solution that is close to optimal.","category":"page"},{"location":"explanation/theory_intro/#Implementation","page":"Introductory theory","title":"Implementation","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Here is pseudo-code fo the Kelley algorithm:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Take as input a convex function f(x) and a iteration limit K_max. Set K = 0, and initialize f^K. Set lb = -infty and ub = infty.\nSolve f^K to obtain a candidate solution x_K+1.\nUpdate lb = f^K and ub = minub f(x_K+1).\nAdd a cut theta ge f(x_K+1) + fracddxfleft(x_K+1right)^top (x - x_K+1) to form f^K+1.\nIncrement K.\nIf K = K_max or ub - lb epsilon, STOP, otherwise, go to step 2.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"And here's a complete implementation:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function kelleys_cutting_plane(\n # The function to be minimized.\n f::Function,\n # The gradient of `f`. By default, we use automatic differentiation to\n # compute the gradient of f so the user doesn't have to!\n dfdx::Function = x -> ForwardDiff.gradient(f, x);\n # The number of arguments to `f`.\n input_dimension::Int,\n # A lower bound for the function `f` over its domain.\n lower_bound::Float64,\n # The number of iterations to run Kelley's algorithm for before stopping.\n iteration_limit::Int,\n # The absolute tolerance ϵ to use for convergence.\n tolerance::Float64 = 1e-6,\n)\n # Step (1):\n K = 0\n model = JuMP.Model(HiGHS.Optimizer)\n JuMP.set_silent(model)\n JuMP.@variable(model, θ >= lower_bound)\n JuMP.@variable(model, x[1:input_dimension])\n JuMP.@objective(model, Min, θ)\n x_k = fill(NaN, input_dimension)\n lower_bound, upper_bound = -Inf, Inf\n while true\n # Step (2):\n JuMP.optimize!(model)\n x_k .= JuMP.value.(x)\n # Step (3):\n lower_bound = JuMP.objective_value(model)\n upper_bound = min(upper_bound, f(x_k))\n println(\"K = $K : $(lower_bound) <= f(x*) <= $(upper_bound)\")\n # Step (4):\n JuMP.@constraint(model, θ >= f(x_k) + dfdx(x_k)' * (x .- x_k))\n # Step (5):\n K = K + 1\n # Step (6):\n if K == iteration_limit\n println(\"-- Termination status: iteration limit --\")\n break\n elseif abs(upper_bound - lower_bound) < tolerance\n println(\"-- Termination status: converged --\")\n break\n end\n end\n println(\"Found solution: x_K = \", x_k)\n return\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Let's run our algorithm to see what happens:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"kelleys_cutting_plane(\n input_dimension = 2,\n lower_bound = 0.0,\n iteration_limit = 20,\n) do x\n return (x[1] - 1)^2 + (x[2] + 2)^2 + 1.0\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"warning: Warning\nIt's hard to choose a valid lower bound! If you choose one too loose, the algorithm can take a long time to converge. However, if you choose one so tight that M f(x^*), then you can obtain a suboptimal solution. For a deeper discussion of the implications for SDDP.jl, see Choosing an initial bound.","category":"page"},{"location":"explanation/theory_intro/#Preliminaries:-approximating-the-cost-to-go-term","page":"Introductory theory","title":"Preliminaries: approximating the cost-to-go term","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"In the background theory section, we discussed how you could formulate an optimal policy to a multistage stochastic program using the dynamic programming recursion:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"where our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution. Moreover, we alluded to the fact that the cost-to-go term (the nasty recursive expectation) makes this problem intractable to solve.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"However, if, excluding the cost-to-go term (i.e., the SP formulation), V_i(x omega) can be formulated as a linear program (this also works for convex programs, but the math is more involved), then we can make some progress by noticing that x only appears as a right-hand side term of the fishing constraint barx = x. Therefore, V_i(x cdot) is convex with respect to x for fixed omega. (If you have not seen this result before, try to prove it.)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The fishing constraint barx = x has an associated dual variable. The economic interpretation of this dual variable is that it represents the change in the objective function if the right-hand side x is increased on the scale of one unit. In other words, and with a slight abuse of notation, it is the value fracddx V_i(x omega). (Because V_i is not differentiable, it is a subgradient instead of a derivative.)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"If we implement the constraint barx = x by setting the lower- and upper bounds of barx to x, then the reduced cost of the decision variable barx is the subgradient, and we do not need to explicitly add the fishing constraint as a row to the constraint matrix.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"tip: Tip\nThe subproblem can have binary and integer variables, but you'll need to use Lagrangian duality to compute a subgradient!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Stochastic dual dynamic programming converts this problem into a tractable form by applying Kelley's cutting plane algorithm to the V_j functions in the cost-to-go term:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbE_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)rightquad k=1ldotsK \n theta ge M\nendaligned","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"All we need now is a way of generating these cutting planes in an iterative manner. Before we get to that though, let's start writing some code.","category":"page"},{"location":"explanation/theory_intro/#Implementation:-modeling","page":"Introductory theory","title":"Implementation: modeling","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Let's make a start by defining the problem structure. Like SDDP.jl, we need a few things:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"A description of the structure of the policy graph: how many nodes there are, and the arcs linking the nodes together with their corresponding probabilities.\nA JuMP model for each node in the policy graph.\nA way to identify the incoming and outgoing state variables of each node.\nA description of the random variable, as well as a function that we can call that will modify the JuMP model to reflect the realization of the random variable.\nA decision variable to act as the approximated cost-to-go term.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"warning: Warning\nIn the interests of brevity, there is minimal error checking. Think about all the different ways you could break the code!","category":"page"},{"location":"explanation/theory_intro/#Structs","page":"Introductory theory","title":"Structs","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The first struct we are going to use is a State struct that will wrap an incoming and outgoing state variable:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct State\n in::JuMP.VariableRef\n out::JuMP.VariableRef\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Next, we need a struct to wrap all of the uncertainty within a node:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct Uncertainty\n parameterize::Function\n Ω::Vector{Any}\n P::Vector{Float64}\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"parameterize is a function which takes a realization of the random variable omegainOmega and updates the subproblem accordingly. The finite discrete random variable is defined by the vectors Ω and P, so that the random variable takes the value Ω[i] with probability P[i]. As such, P should sum to 1. (We don't check this here, but we should; we do in SDDP.jl.)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Now we have two building blocks, we can declare the structure of each node:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct Node\n subproblem::JuMP.Model\n states::Dict{Symbol,State}\n uncertainty::Uncertainty\n cost_to_go::JuMP.VariableRef\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"subproblem is going to be the JuMP model that we build at each node.\nstates is a dictionary that maps a symbolic name of a state variable to a State object wrapping the incoming and outgoing state variables in subproblem.\nuncertainty is an Uncertainty object described above.\ncost_to_go is a JuMP variable that approximates the cost-to-go term.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Finally, we define a simplified policy graph as follows:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"struct PolicyGraph\n nodes::Vector{Node}\n arcs::Vector{Dict{Int,Float64}}\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"There is a vector of nodes, as well as a data structure for the arcs. arcs is a vector of dictionaries, where arcs[i][j] gives the probability of transitioning from node i to node j, if an arc exists.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"To simplify things, we will assume that the root node transitions to node 1 with probability 1, and there are no other incoming arcs to node 1. Notably, we can still define cyclic graphs though!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"We also define a nice show method so that we don't accidentally print a large amount of information to the screen when creating a model:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function Base.show(io::IO, model::PolicyGraph)\n println(io, \"A policy graph with $(length(model.nodes)) nodes\")\n println(io, \"Arcs:\")\n for (from, arcs) in enumerate(model.arcs)\n for (to, probability) in arcs\n println(io, \" $(from) => $(to) w.p. $(probability)\")\n end\n end\n return\nend","category":"page"},{"location":"explanation/theory_intro/#Functions","page":"Introductory theory","title":"Functions","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Now we have some basic types, let's implement some functions so that the user can create a model.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"First, we need an example of a function that the user will provide. Like SDDP.jl, this takes an empty subproblem, and a node index, in this case t::Int. You could change this function to change the model, or define a new one later in the code.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"We're going to copy the example from An introduction to SDDP.jl, with some minor adjustments for the fact we don't have many of the bells and whistles of SDDP.jl. You can probably see how some of the SDDP.jl functionality like @stageobjective and SDDP.parameterize help smooth some of the usability issues like needing to construct both the incoming and outgoing state variables, or needing to explicitly declare return states, uncertainty.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function subproblem_builder(subproblem::JuMP.Model, t::Int)\n # Define the state variables. Note how we fix the incoming state to the\n # initial state variable regardless of `t`! This isn't strictly necessary;\n # it only matters that we do it for the first node.\n JuMP.@variable(subproblem, volume_in == 200)\n JuMP.@variable(subproblem, 0 <= volume_out <= 200)\n states = Dict(:volume => State(volume_in, volume_out))\n # Define the control variables.\n JuMP.@variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n JuMP.@constraints(\n subproblem,\n begin\n volume_out == volume_in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n # Define the objective for each stage `t`. Note that we can use `t` as an\n # index for t = 1, 2, 3.\n fuel_cost = [50.0, 100.0, 150.0]\n JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)\n # Finally, we define the uncertainty object. Because this is a simplified\n # implementation of SDDP, we shall politely ask the user to only modify the\n # constraints, and not the objective function! (Not that it changes the\n # algorithm, we just have to add more information to keep track of things.)\n uncertainty = Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω\n return JuMP.fix(inflow, ω)\n end\n return states, uncertainty\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The next function we need to define is the analog of SDDP.PolicyGraph. It should be pretty readable.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function PolicyGraph(\n subproblem_builder::Function;\n graph::Vector{Dict{Int,Float64}},\n lower_bound::Float64,\n optimizer,\n)\n nodes = Node[]\n for t in 1:length(graph)\n # Create a model.\n model = JuMP.Model(optimizer)\n JuMP.set_silent(model)\n # Use the provided function to build out each subproblem. The user's\n # function returns a dictionary mapping `Symbol`s to `State` objects,\n # and an `Uncertainty` object.\n states, uncertainty = subproblem_builder(model, t)\n # Now add the cost-to-go terms:\n JuMP.@variable(model, cost_to_go >= lower_bound)\n obj = JuMP.objective_function(model)\n JuMP.@objective(model, Min, obj + cost_to_go)\n # If there are no outgoing arcs, the cost-to-go is 0.0.\n if length(graph[t]) == 0\n JuMP.fix(cost_to_go, 0.0; force = true)\n end\n push!(nodes, Node(model, states, uncertainty, cost_to_go))\n end\n return PolicyGraph(nodes, graph)\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Then, we can create a model using the subproblem_builder function we defined earlier:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"model = PolicyGraph(\n subproblem_builder;\n graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict{Int,Float64}()],\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"explanation/theory_intro/#Implementation:-helpful-samplers","page":"Introductory theory","title":"Implementation: helpful samplers","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Before we get properly coding the solution algorithm, it's also going to be useful to have a function that samples a realization of the random variable defined by Ω and P.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function sample_uncertainty(uncertainty::Uncertainty)\n r = rand()\n for (p, ω) in zip(uncertainty.P, uncertainty.Ω)\n r -= p\n if r < 0.0\n return ω\n end\n end\n return error(\"We should never get here because P should sum to 1.0.\")\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nrand() samples a uniform random variable in [0, 1).","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"For example:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"for i in 1:3\n println(\"ω = \", sample_uncertainty(model.nodes[1].uncertainty))\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"It's also going to be useful to define a function that generates a random walk through the nodes of the graph:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function sample_next_node(model::PolicyGraph, current::Int)\n if length(model.arcs[current]) == 0\n # No outgoing arcs!\n return nothing\n else\n r = rand()\n for (to, probability) in model.arcs[current]\n r -= probability\n if r < 0.0\n return to\n end\n end\n # We looped through the outgoing arcs and still have probability left\n # over! This means we've hit an implicit \"zero\" node.\n return nothing\n end\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"For example:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"for i in 1:3\n # We use `repr` to print the next node, because `sample_next_node` can\n # return `nothing`.\n println(\"Next node from $(i) = \", repr(sample_next_node(model, i)))\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This is a little boring, because our graph is simple. However, more complicated graphs will generate more interesting trajectories!","category":"page"},{"location":"explanation/theory_intro/#Implementation:-the-forward-pass","page":"Introductory theory","title":"Implementation: the forward pass","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Recall that, after approximating the cost-to-go term, we need a way of generating the cuts. As the first step, we need a way of generating candidate solutions x_k^prime. However, unlike the Kelley's example, our functions V_j^k(x^prime varphi) need two inputs: an outgoing state variable and a realization of the random variable.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"One way of getting these inputs is just to pick a random (feasible) value. However, in doing so, we might pick outgoing state variables that we will never see in practice, or we might infrequently pick outgoing state variables that we will often see in practice. Therefore, a better way of generating the inputs is to use a simulation of the policy, which we call the forward pass.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The forward pass walks the policy graph from start to end, transitioning randomly along the arcs. At each node, it observes a realization of the random variable and solves the approximated subproblem to generate a candidate outgoing state variable x_k^prime. The outgoing state variable is passed as the incoming state variable to the next node in the trajectory.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function forward_pass(model::PolicyGraph, io::IO = stdout)\n println(io, \"| Forward Pass\")\n # First, get the value of the state at the root node (e.g., x_R).\n incoming_state =\n Dict(k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states)\n # `simulation_cost` is an accumlator that is going to sum the stage-costs\n # incurred over the forward pass.\n simulation_cost = 0.0\n # We also need to record the nodes visited and resultant outgoing state\n # variables so we can pass them to the backward pass.\n trajectory = Tuple{Int,Dict{Symbol,Float64}}[]\n # Now's the meat of the forward pass: beginning at the first node:\n t = 1\n while t !== nothing\n node = model.nodes[t]\n println(io, \"| | Visiting node $(t)\")\n # Sample the uncertainty:\n ω = sample_uncertainty(node.uncertainty)\n println(io, \"| | | ω = \", ω)\n # Parameterizing the subproblem using the user-provided function:\n node.uncertainty.parameterize(ω)\n println(io, \"| | | x = \", incoming_state)\n # Update the incoming state variable:\n for (k, v) in incoming_state\n JuMP.fix(node.states[k].in, v; force = true)\n end\n # Now solve the subproblem and check we found an optimal solution:\n JuMP.optimize!(node.subproblem)\n if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL\n error(\"Something went terribly wrong!\")\n end\n # Compute the outgoing state variables:\n outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)\n println(io, \"| | | x′ = \", outgoing_state)\n # We also need to compute the stage cost to add to our\n # `simulation_cost` accumulator:\n stage_cost =\n JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)\n simulation_cost += stage_cost\n println(io, \"| | | C(x, u, ω) = \", stage_cost)\n # As a penultimate step, set the outgoing state of stage t and the\n # incoming state of stage t + 1, and add the node to the trajectory.\n incoming_state = outgoing_state\n push!(trajectory, (t, outgoing_state))\n # Finally, sample a new node to step to. If `t === nothing`, the\n # `while` loop will break.\n t = sample_next_node(model, t)\n end\n return trajectory, simulation_cost\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Let's take a look at one forward pass:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"trajectory, simulation_cost = forward_pass(model);\n","category":"page"},{"location":"explanation/theory_intro/#Implementation:-the-backward-pass","page":"Introductory theory","title":"Implementation: the backward pass","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"From the forward pass, we obtained a vector of nodes visited and their corresponding outgoing state variables. Now we need to refine the approximation for each node at the candidate solution for the outgoing state variable. That is, we need to add a new cut:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"theta ge mathbbE_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"or alternatively:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"theta ge sumlimits_j in i^+ sumlimits_varphi in Omega_j p_ij p_varphileftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"It doesn't matter what order we visit the nodes to generate these cuts for. For example, we could compute them all in parallel, using the current approximations of V^K_i.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"However, we can be smarter than that.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"If we traverse the list of nodes visited in the forward pass in reverse, then we come to refine the i^th node in the trajectory, we will already have improved the approximation of the (i+1)^th node in the trajectory as well! Therefore, our refinement of the i^th node will be better than if we improved node i first, and then refined node (i+1).","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Because we walk the nodes in reverse, we call this the backward pass.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"info: Info\nIf you're into deep learning, you could view this as the equivalent of back-propagation: the forward pass pushes primal information through the graph (outgoing state variables), and the backward pass pulls dual information (cuts) back through the graph to improve our decisions on the next forward pass.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function backward_pass(\n model::PolicyGraph,\n trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},\n io::IO = stdout,\n)\n println(io, \"| Backward pass\")\n # For the backward pass, we walk back up the nodes.\n for i in reverse(1:length(trajectory))\n index, outgoing_states = trajectory[i]\n node = model.nodes[index]\n println(io, \"| | Visiting node $(index)\")\n if length(model.arcs[index]) == 0\n # If there are no children, the cost-to-go is 0.\n println(io, \"| | | Skipping node because the cost-to-go is 0\")\n continue\n end\n # Create an empty affine expression that we will use to build up the\n # right-hand side of the cut expression.\n cut_expression = JuMP.AffExpr(0.0)\n # For each node j ∈ i⁺\n for (j, P_ij) in model.arcs[index]\n next_node = model.nodes[j]\n # Set the incoming state variables of node j to the outgoing state\n # variables of node i\n for (k, v) in outgoing_states\n JuMP.fix(next_node.states[k].in, v; force = true)\n end\n # Then for each realization of φ ∈ Ωⱼ\n for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)\n # Setup and solve for the realization of φ\n println(io, \"| | | Solving φ = \", φ)\n next_node.uncertainty.parameterize(φ)\n JuMP.optimize!(next_node.subproblem)\n # Then prepare the cut `P_ij * pφ * [V + dVdxᵀ(x - x_k)]``\n V = JuMP.objective_value(next_node.subproblem)\n println(io, \"| | | | V = \", V)\n dVdx = Dict(\n k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states\n )\n println(io, \"| | | | dVdx′ = \", dVdx)\n cut_expression += JuMP.@expression(\n node.subproblem,\n P_ij *\n pφ *\n (\n V + sum(\n dVdx[k] * (x.out - outgoing_states[k]) for\n (k, x) in node.states\n )\n ),\n )\n end\n end\n # And then refine the cost-to-go variable by adding the cut:\n c = JuMP.@constraint(node.subproblem, node.cost_to_go >= cut_expression)\n println(io, \"| | | Adding cut : \", c)\n end\n return nothing\nend","category":"page"},{"location":"explanation/theory_intro/#Implementation:-bounds","page":"Introductory theory","title":"Implementation: bounds","text":"","category":"section"},{"location":"explanation/theory_intro/#Lower-bounds","page":"Introductory theory","title":"Lower bounds","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Recall from Kelley's that we can obtain a lower bound for f(x^*) be evaluating f^K. The analogous lower bound for a multistage stochastic program is:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"mathbbE_i in R^+ omega in Omega_iV_i^K(x_R omega) le min_pi mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Here's how we compute the lower bound:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function lower_bound(model::PolicyGraph)\n node = model.nodes[1]\n bound = 0.0\n for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)\n node.uncertainty.parameterize(ω)\n JuMP.optimize!(node.subproblem)\n bound += p * JuMP.objective_value(node.subproblem)\n end\n return bound\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThe implementation is simplified because we assumed that there is only one arc from the root node, and that it pointed to the first node in the vector.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Because we haven't trained a policy yet, the lower bound is going to be very bad:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"lower_bound(model)","category":"page"},{"location":"explanation/theory_intro/#Upper-bounds","page":"Introductory theory","title":"Upper bounds","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"With Kelley's algorithm, we could easily construct an upper bound by evaluating f(x_K). However, it is almost always intractable to evaluate an upper bound for multistage stochastic programs due to the large number of nodes and the nested expectations. Instead, we can perform a Monte Carlo simulation of the policy to build a statistical estimate for the value of mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega), where pi is the policy defined by the current approximations V^K_i.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function upper_bound(model::PolicyGraph; replications::Int)\n # Pipe the output to `devnull` so we don't print too much!\n simulations = [forward_pass(model, devnull) for i in 1:replications]\n z = [s[2] for s in simulations]\n μ = Statistics.mean(z)\n tσ = 1.96 * Statistics.std(z) / sqrt(replications)\n return μ, tσ\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThe width of the confidence interval is incorrect if there are cycles in the graph, because the distribution of simulation costs z is not symmetric. The mean is correct, however.","category":"page"},{"location":"explanation/theory_intro/#Termination-criteria","page":"Introductory theory","title":"Termination criteria","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"In Kelley's algorithm, the upper bound was deterministic. Therefore, we could terminate the algorithm when the lower bound was sufficiently close to the upper bound. However, our upper bound for SDDP is not deterministic; it is a confidence interval!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Some people suggest terminating SDDP when the lower bound is contained within the confidence interval. However, this is a poor choice because it is too easy to generate a false positive. For example, if we use a small number of replications then the width of the confidence will be large, and we are more likely to terminate!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"In a future tutorial (not yet written...) we will discuss termination criteria in more depth. For now, pick a large number of iterations and train for as long as possible.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"tip: Tip\nFor a rule of thumb, pick a large number of iterations to train the policy for (e.g., 10 times mathcalN times maxlimits_iinmathcalN Omega_i)","category":"page"},{"location":"explanation/theory_intro/#Implementation:-the-training-loop","page":"Introductory theory","title":"Implementation: the training loop","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"The train loop of SDDP just applies the forward and backward passes iteratively, followed by a final simulation to compute the upper bound confidence interval:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function train(\n model::PolicyGraph;\n iteration_limit::Int,\n replications::Int,\n io::IO = stdout,\n)\n for i in 1:iteration_limit\n println(io, \"Starting iteration $(i)\")\n outgoing_states, _ = forward_pass(model, io)\n backward_pass(model, outgoing_states, io)\n println(io, \"| Finished iteration\")\n println(io, \"| | lower_bound = \", lower_bound(model))\n end\n println(io, \"Termination status: iteration limit\")\n μ, tσ = upper_bound(model; replications = replications)\n println(io, \"Upper bound = $(μ) ± $(tσ)\")\n return\nend","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Using our model we defined earlier, we can go:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"train(model; iteration_limit = 3, replications = 100)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Success! We trained a policy for a finite horizon multistage stochastic program using stochastic dual dynamic programming.","category":"page"},{"location":"explanation/theory_intro/#Implementation:-evaluating-the-policy","page":"Introductory theory","title":"Implementation: evaluating the policy","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"A final step is the ability to evaluate the policy at a given point.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"function evaluate_policy(\n model::PolicyGraph;\n node::Int,\n incoming_state::Dict{Symbol,Float64},\n random_variable,\n)\n the_node = model.nodes[node]\n the_node.uncertainty.parameterize(random_variable)\n for (k, v) in incoming_state\n JuMP.fix(the_node.states[k].in, v; force = true)\n end\n JuMP.optimize!(the_node.subproblem)\n return Dict(\n k => JuMP.value.(v) for\n (k, v) in JuMP.object_dictionary(the_node.subproblem)\n )\nend\n\nevaluate_policy(\n model;\n node = 1,\n incoming_state = Dict(:volume => 150.0),\n random_variable = 75,\n)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"note: Note\nThe random variable can be out-of-sample, i.e., it doesn't have to be in the vector Omega we created when defining the model! This is a notable difference to other multistage stochastic solution methods like progressive hedging or using the deterministic equivalent.","category":"page"},{"location":"explanation/theory_intro/#Example:-infinite-horizon","page":"Introductory theory","title":"Example: infinite horizon","text":"","category":"section"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"As promised earlier, our implementation is actually pretty general. It can solve any multistage stochastic (linear) program defined by a policy graph, including infinite horizon problems!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Here's an example, where we have extended our earlier problem with an arc from node 3 to node 2 with probability 0.5. You can interpret the 0.5 as a discount factor.","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"model = PolicyGraph(\n subproblem_builder;\n graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict(2 => 0.5)],\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Then, train a policy:","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"train(model; iteration_limit = 3, replications = 100)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"Success! We trained a policy for an infinite horizon multistage stochastic program using stochastic dual dynamic programming. Note how some of the forward passes are different lengths!","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"evaluate_policy(\n model;\n node = 3,\n incoming_state = Dict(:volume => 100.0),\n random_variable = 10.0,\n)","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"","category":"page"},{"location":"explanation/theory_intro/","page":"Introductory theory","title":"Introductory theory","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"EditURL = \"generation_expansion.jl\"","category":"page"},{"location":"examples/generation_expansion/#Generation-expansion","page":"Generation expansion","title":"Generation expansion","text":"","category":"section"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction generation_expansion(duality_handler)\n build_cost = 1e4\n use_cost = 4\n num_units = 5\n capacities = ones(num_units)\n demand_vals =\n 0.5 * [\n 5 5 5 5 5 5 5 5\n 4 3 1 3 0 9 8 17\n 0 9 4 2 19 19 13 7\n 25 11 4 14 4 6 15 12\n 6 7 5 3 8 4 17 13\n ]\n # Cost of unmet demand\n penalty = 5e5\n # Discounting rate\n rho = 0.99\n model = SDDP.LinearPolicyGraph(\n stages = 5,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(\n sp,\n 0 <= invested[1:num_units] <= 1,\n SDDP.State,\n Int,\n initial_value = 0\n )\n @variables(sp, begin\n generation >= 0\n unmet >= 0\n demand\n end)\n\n @constraints(\n sp,\n begin\n # Can't un-invest\n investment[i in 1:num_units], invested[i].out >= invested[i].in\n # Generation capacity\n sum(capacities[i] * invested[i].out for i in 1:num_units) >=\n generation\n # Meet demand or pay a penalty\n unmet >= demand - sum(generation)\n # For fewer iterations order the units to break symmetry, units are identical (tougher numerically)\n [j in 1:(num_units-1)], invested[j].out <= invested[j+1].out\n end\n )\n # Demand is uncertain\n SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, demand_vals[stage, :])\n\n @expression(\n sp,\n investment_cost,\n build_cost *\n sum(invested[i].out - invested[i].in for i in 1:num_units)\n )\n @stageobjective(\n sp,\n (investment_cost + generation * use_cost) * rho^(stage - 1) +\n penalty * unmet\n )\n end\n if get(ARGS, 1, \"\") == \"--write\"\n # Run `$ julia generation_expansion.jl --write` to update the benchmark\n # model directory\n model_dir = joinpath(@__DIR__, \"..\", \"..\", \"..\", \"benchmarks\", \"models\")\n SDDP.write_to_file(\n model,\n joinpath(model_dir, \"generation_expansion.sof.json.gz\");\n test_scenarios = 100,\n )\n exit(0)\n end\n SDDP.train(model; log_frequency = 10, duality_handler = duality_handler)\n Test.@test SDDP.calculate_bound(model) ≈ 2.078860e6 atol = 1e3\n return\nend\n\ngeneration_expansion(SDDP.ContinuousConicDuality())\ngeneration_expansion(SDDP.LagrangianDuality())","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"EditURL = \"biobjective_hydro.jl\"","category":"page"},{"location":"examples/biobjective_hydro/#Biobjective-hydro-thermal","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"","category":"section"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"using SDDP, HiGHS, Statistics, Test\n\nfunction biobjective_example()\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, _\n @variable(subproblem, 0 <= v <= 200, SDDP.State, initial_value = 50)\n @variables(subproblem, begin\n 0 <= g[i = 1:2] <= 100\n 0 <= u <= 150\n s >= 0\n shortage_cost >= 0\n end)\n @expressions(subproblem, begin\n objective_1, g[1] + 10 * g[2]\n objective_2, shortage_cost\n end)\n @constraints(subproblem, begin\n inflow_constraint, v.out == v.in - u - s\n g[1] + g[2] + u == 150\n shortage_cost >= 40 - v.out\n shortage_cost >= 60 - 2 * v.out\n shortage_cost >= 80 - 4 * v.out\n end)\n # You must call this for a biobjective problem!\n SDDP.initialize_biobjective_subproblem(subproblem)\n SDDP.parameterize(subproblem, 0.0:5:50.0) do ω\n JuMP.set_normalized_rhs(inflow_constraint, ω)\n # You must call `set_biobjective_functions` from within\n # `SDDP.parameterize`.\n return SDDP.set_biobjective_functions(\n subproblem,\n objective_1,\n objective_2,\n )\n end\n end\n pareto_weights =\n SDDP.train_biobjective(model, solution_limit = 10, iteration_limit = 10)\n solutions = [(k, v) for (k, v) in pareto_weights]\n sort!(solutions; by = x -> x[1])\n @test length(solutions) == 10\n # Test for convexity! The gradient must be decreasing as we move from left\n # to right.\n gradient(a, b) = (b[2] - a[2]) / (b[1] - a[1])\n grad = Inf\n for i in 1:9\n new_grad = gradient(solutions[i], solutions[i+1])\n @test new_grad < grad\n grad = new_grad\n end\n return\nend\n\nbiobjective_example()","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"EditURL = \"asset_management_simple.jl\"","category":"page"},{"location":"examples/asset_management_simple/#Asset-management","page":"Asset management","title":"Asset management","text":"","category":"section"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"using SDDP, HiGHS, Test\n\nfunction asset_management_simple()\n model = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(\n Array{Float64,2}[\n [1.0]',\n [0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n ],\n ),\n lower_bound = -1_000.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, index\n (stage, markov_state) = index\n r_stock = [1.25, 1.06]\n r_bonds = [1.14, 1.12]\n @variable(subproblem, stocks >= 0, SDDP.State, initial_value = 0.0)\n @variable(subproblem, bonds >= 0, SDDP.State, initial_value = 0.0)\n if stage == 1\n @constraint(subproblem, stocks.out + bonds.out == 55)\n @stageobjective(subproblem, 0)\n elseif 1 < stage < 4\n @constraint(\n subproblem,\n r_stock[markov_state] * stocks.in +\n r_bonds[markov_state] * bonds.in == stocks.out + bonds.out\n )\n @stageobjective(subproblem, 0)\n else\n @variable(subproblem, over >= 0)\n @variable(subproblem, short >= 0)\n @constraint(\n subproblem,\n r_stock[markov_state] * stocks.in +\n r_bonds[markov_state] * bonds.in - over + short == 80\n )\n @stageobjective(subproblem, -over + 4 * short)\n end\n end\n SDDP.train(model; log_frequency = 5)\n @test SDDP.calculate_bound(model) ≈ 1.514 atol = 1e-4\n return\nend\n\nasset_management_simple()","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/access_previous_variables/#Access-variables-from-a-previous-stage","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"","category":"section"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"A common question is \"how do I use a variable from a previous stage in a constraint?\"","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"info: Info\nIf you want to use a variable from a previous stage, it must be a state variable.","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"Here are some examples:","category":"page"},{"location":"guides/access_previous_variables/#Access-a-first-stage-decision-in-a-future-stage","page":"Access variables from a previous stage","title":"Access a first-stage decision in a future stage","text":"","category":"section"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"This is often useful if your first-stage decisions are capacity-expansion type decisions (e.g., you choose first how much capacity to add, but because it takes time to build, it only shows up in some future stage).","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"using SDDP, HiGHS\nSDDP.LinearPolicyGraph(\n stages = 10,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n # Capacity of the generator. Decided in the first stage.\n @variable(sp, capacity >= 0, SDDP.State, initial_value = 0)\n # Quantity of water stored.\n @variable(sp, reservoir >= 0, SDDP.State, initial_value = 0)\n # Quantity of water to use for electricity generation in current stage.\n @variable(sp, generation >= 0)\n if t == 1\n # There are no constraints in the first stage, but we need to push the\n # initial value of the reservoir to the next stage.\n @constraint(sp, reservoir.out == reservoir.in)\n # Since we're maximizing profit, subtract cost of capacity.\n @stageobjective(sp, -capacity.out)\n else\n # Water balance constraint.\n @constraint(sp, balance, reservoir.out - reservoir.in + generation == 0)\n # Generation limit.\n @constraint(sp, generation <= capacity.in)\n # Push capacity to the next stage.\n @constraint(sp, capacity.out == capacity.in)\n # Maximize generation.\n @stageobjective(sp, generation)\n # Random inflow in balance constraint.\n SDDP.parameterize(sp, rand(4)) do w\n set_normalized_rhs(balance, w)\n end\n end\nend","category":"page"},{"location":"guides/access_previous_variables/#Access-a-decision-from-N-stages-ago","page":"Access variables from a previous stage","title":"Access a decision from N stages ago","text":"","category":"section"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"This is often useful if have some inventory problem with a lead-time on orders.","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"using SDDP, HiGHS\nSDDP.LinearPolicyGraph(\n stages = 10,\n sense = :Max,\n upper_bound = 100,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n # Current inventory on hand.\n @variable(sp, inventory >= 0, SDDP.State, initial_value = 0)\n # Inventory pipeline.\n # pipeline[1].out are orders placed today.\n # pipeline[5].in are orders that arrive today and can be added to the\n # current inventory.\n # Stock moves up one slot in the pipeline each stage.\n @variable(sp, pipeline[1:5], SDDP.State, initial_value = 0)\n # The number of units to order today.\n @variable(sp, 0 <= buy <= 10)\n # The number of units to sell today.\n @variable(sp, sell >= 0)\n # Buy orders get placed in the pipeline.\n @constraint(sp, pipeline[1].out == buy)\n # Stock moves up one slot in the pipeline each stage.\n @constraint(sp, [i=2:5], pipeline[i].out == pipeline[i-1].in)\n # Stock balance constraint.\n @constraint(sp, inventory.out == inventory.in - sell + pipeline[5].in)\n # Maximize quantity of sold items.\n @stageobjective(sp, sell)\nend","category":"page"},{"location":"guides/access_previous_variables/","page":"Access variables from a previous stage","title":"Access variables from a previous stage","text":"warning: Warning\nYou must initialize the same number of state variables in every stage, even if they are not used in that stage.","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"DocTestSetup = quote\n using SDDP\nend","category":"page"},{"location":"guides/create_a_belief_state/#Create-a-belief-state","page":"Create a belief state","title":"Create a belief state","text":"","category":"section"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"SDDP.jl includes an implementation of the algorithm described in Dowson, O., Morton, D.P., & Pagnoncelli, B.K. (2020). Partially observable multistage stochastic optimization. Operations Research Letters, 48(4), 505–512.","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"Given a SDDP.Graph object (see Create a general policy graph for details), we can define the ambiguity partition using SDDP.add_ambiguity_set.","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"For example, first we create a Markovian graph:","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"using SDDP\nG = SDDP.MarkovianGraph([[0.5 0.5], [0.2 0.8; 0.8 0.2]])","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"Then we add an ambiguity set over the nodes in the each stage:","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"for t in 1:2\n SDDP.add_ambiguity_set(G, [(t, 1), (t, 2)])\nend","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"This results in the graph:","category":"page"},{"location":"guides/create_a_belief_state/","page":"Create a belief state","title":"Create a belief state","text":"G","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"CurrentModule = SDDP","category":"page"},{"location":"release_notes/#Release-notes","page":"Release notes","title":"Release notes","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.","category":"page"},{"location":"release_notes/#[v1.6.2](https://github.com/odow/SDDP.jl/releases/tag/v1.6.2)-(August-24,-2023)","page":"Release notes","title":"v1.6.2 (August 24, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"MSPFormat now detect and exploit stagewise independent lattices (#653)\nFixed set_optimizer for models read from file (#654)","category":"page"},{"location":"release_notes/#Other","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed typo in pglib_opf.jl (#647)\nFixed documentation build and added color (#652)","category":"page"},{"location":"release_notes/#[v1.6.1](https://github.com/odow/SDDP.jl/releases/tag/v1.6.1)-(July-20,-2023)","page":"Release notes","title":"v1.6.1 (July 20, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-2","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bugs in MSPFormat reader (#638) (#639)","category":"page"},{"location":"release_notes/#Other-2","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Clarified OutOfSampleMonteCarlo docstring (#643)","category":"page"},{"location":"release_notes/#[v1.6.0](https://github.com/odow/SDDP.jl/releases/tag/v1.6.0)-(July-3,-2023)","page":"Release notes","title":"v1.6.0 (July 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added RegularizedForwardPass (#624)\nAdded FirstStageStoppingRule (#634)","category":"page"},{"location":"release_notes/#Other-3","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Removed an unbound type parameter (#632)\nFixed typo in docstring (#633)\nAdded Here-and-now and hazard-decision tutorial (#635)","category":"page"},{"location":"release_notes/#[v1.5.1](https://github.com/odow/SDDP.jl/releases/tag/v1.5.1)-(June-30,-2023)","page":"Release notes","title":"v1.5.1 (June 30, 2023)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a \"good\" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.","category":"page"},{"location":"release_notes/#Other-4","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed various typos in the documentation (#617)\nFixed printing test after changes in JuMP (#618)\nSet SimulationStoppingRule as the default stopping rule (#619)\nChanged the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)\nAdded example usage with Distributions.jl (@slwu89) (#622)\nRemoved the numerical issue @warn (#627)\nImproved the quality of docstrings (#630)","category":"page"},{"location":"release_notes/#[v1.5.0](https://github.com/odow/SDDP.jl/releases/tag/v1.5.0)-(May-14,-2023)","page":"Release notes","title":"v1.5.0 (May 14, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-2","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)","category":"page"},{"location":"release_notes/#Other-5","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated missing changelog entries (#608)\nRemoved global variables (#610)\nConverted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)\nFixed some typos (#613)","category":"page"},{"location":"release_notes/#[v1.4.0](https://github.com/odow/SDDP.jl/releases/tag/v1.4.0)-(May-8,-2023)","page":"Release notes","title":"v1.4.0 (May 8, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-3","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulationStoppingRule (#598)\nAdded sampling_scheme argument to SDDP.write_to_file (#607)","category":"page"},{"location":"release_notes/#Fixed-3","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed parsing of some MSPFormat files (#602) (#604)\nFixed printing in header (#605)","category":"page"},{"location":"release_notes/#[v1.3.0](https://github.com/odow/SDDP.jl/releases/tag/v1.3.0)-(May-3,-2023)","page":"Release notes","title":"v1.3.0 (May 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-4","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added experimental support for SDDP.MSPFormat.read_from_file (#593)","category":"page"},{"location":"release_notes/#Other-6","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated to StochOptFormat v0.3 (#600)","category":"page"},{"location":"release_notes/#[v1.2.1](https://github.com/odow/SDDP.jl/releases/tag/v1.2.1)-(May-1,-2023)","page":"Release notes","title":"v1.2.1 (May 1, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-4","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed log_every_seconds (#597)","category":"page"},{"location":"release_notes/#[v1.2.0](https://github.com/odow/SDDP.jl/releases/tag/v1.2.0)-(May-1,-2023)","page":"Release notes","title":"v1.2.0 (May 1, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-5","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.SimulatorSamplingScheme (#594)\nAdded log_every_seconds argument to SDDP.train (#595)","category":"page"},{"location":"release_notes/#Other-7","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Tweaked how the log is printed (#588)\nUpdated to StochOptFormat v0.2 (#592)","category":"page"},{"location":"release_notes/#[v1.1.4](https://github.com/odow/SDDP.jl/releases/tag/v1.1.4)-(April-10,-2023)","page":"Release notes","title":"v1.1.4 (April 10, 2023)","text":"","category":"section"},{"location":"release_notes/#Fixed-5","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Logs are now flushed every iteration (#584)","category":"page"},{"location":"release_notes/#Other-8","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added docstrings to various functions (#581)\nMinor documentation updates (#580)\nClarified integrality documentation (#582)\nUpdated the README (#585)\nNumber of numerical issues is now printed to the log (#586)","category":"page"},{"location":"release_notes/#[v1.1.3](https://github.com/odow/SDDP.jl/releases/tag/v1.1.3)-(April-2,-2023)","page":"Release notes","title":"v1.1.3 (April 2, 2023)","text":"","category":"section"},{"location":"release_notes/#Other-9","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed typo in Example: deterministic to stochastic tutorial (#578)\nFixed typo in documentation of SDDP.simulate (#577)","category":"page"},{"location":"release_notes/#[v1.1.2](https://github.com/odow/SDDP.jl/releases/tag/v1.1.2)-(March-18,-2023)","page":"Release notes","title":"v1.1.2 (March 18, 2023)","text":"","category":"section"},{"location":"release_notes/#Other-10","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added Example: deterministic to stochastic tutorial (#572)","category":"page"},{"location":"release_notes/#[v1.1.1](https://github.com/odow/SDDP.jl/releases/tag/v1.1.1)-(March-16,-2023)","page":"Release notes","title":"v1.1.1 (March 16, 2023)","text":"","category":"section"},{"location":"release_notes/#Other-11","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed email in Project.toml\nAdded notebook to documentation tutorials (#571)","category":"page"},{"location":"release_notes/#[v1.1.0](https://github.com/odow/SDDP.jl/releases/tag/v1.1.0)-(January-12,-2023)","page":"Release notes","title":"v1.1.0 (January 12, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-6","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added the node_name_parser argument to SDDP.write_cuts_to_file and added the option to skip nodes in SDDP.read_cuts_from_file (#565)","category":"page"},{"location":"release_notes/#[v1.0.0](https://github.com/odow/SDDP.jl/releases/tag/v1.0.0)-(January-3,-2023)","page":"Release notes","title":"v1.0.0 (January 3, 2023)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Although we're bumping MAJOR version, this is a non-breaking release. Going forward:","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"New features will bump the MINOR version\nBug fixes, maintenance, and documentation updates will bump the PATCH version\nWe will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version\nUpdates to the compat bounds of package dependencies will bump the PATCH version.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.","category":"page"},{"location":"release_notes/#Added-7","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added num_nodes argument to SDDP.UnicyclicGraph (#562)\nAdded support for passing an optimizer to SDDP.Asynchronous (#545)","category":"page"},{"location":"release_notes/#Other-12","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated Plotting tools to use live plots (#563)\nAdded vale as a linter (#565)\nImproved documentation for initializing a parallel scheme (#566)","category":"page"},{"location":"release_notes/#[v0.4.9](https://github.com/odow/SDDP.jl/releases/tag/v0.4.9)-(January-3,-2023)","page":"Release notes","title":"v0.4.9 (January 3, 2023)","text":"","category":"section"},{"location":"release_notes/#Added-8","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.UnicyclicGraph (#556)","category":"page"},{"location":"release_notes/#Other-13","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added tutorial on Markov Decision Processes (#556)\nAdded two-stage newsvendor tutorial (#557)\nRefactored the layout of the documentation (#554) (#555)\nUpdated copyright to 2023 (#558)\nFixed errors in the documentation (#561)","category":"page"},{"location":"release_notes/#[v0.4.8](https://github.com/odow/SDDP.jl/releases/tag/v0.4.8)-(December-19,-2022)","page":"Release notes","title":"v0.4.8 (December 19, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-9","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added terminate_on_cycle option to SDDP.Historical (#549)\nAdded include_last_node option to SDDP.DefaultForwardPass (#547)","category":"page"},{"location":"release_notes/#Fixed-6","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)","category":"page"},{"location":"release_notes/#[v0.4.7](https://github.com/odow/SDDP.jl/releases/tag/v0.4.7)-(December-17,-2022)","page":"Release notes","title":"v0.4.7 (December 17, 2022)","text":"","category":"section"},{"location":"release_notes/#Added-10","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)","category":"page"},{"location":"release_notes/#Fixed-7","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Rethrow InterruptException when solver is interrupted (#534)\nFixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)\nFixed re-using the dashboard = true option between solves (#538)\nFixed bug when no @stageobjective is set (now defaults to 0.0) (#539)\nFixed errors thrown when invalid inputs are provided to add_objective_state (#540)","category":"page"},{"location":"release_notes/#Other-14","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Drop support for Julia versions prior to 1.6 (#533)\nUpdated versions of dependencies (#522) (#533)\nSwitched to HiGHS in the documentation and tests (#533)\nAdded license headers (#519)\nFixed link in air conditioning example (#521) (Thanks @conema)\nClarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)\nAdded this change log (#536)\nCuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)","category":"page"},{"location":"release_notes/#[v0.4.6](https://github.com/odow/SDDP.jl/releases/tag/v0.4.6)-(March-25,-2022)","page":"Release notes","title":"v0.4.6 (March 25, 2022)","text":"","category":"section"},{"location":"release_notes/#Other-15","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Updated to JuMP v1.0 (#517)","category":"page"},{"location":"release_notes/#[v0.4.5](https://github.com/odow/SDDP.jl/releases/tag/v0.4.5)-(March-9,-2022)","page":"Release notes","title":"v0.4.5 (March 9, 2022)","text":"","category":"section"},{"location":"release_notes/#Fixed-8","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed issue with set_silent in a subproblem (#510)","category":"page"},{"location":"release_notes/#Other-16","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)\nUpdate to JuMP v0.23 (#514)\nAdded auto-regressive tutorial (#507)","category":"page"},{"location":"release_notes/#[v0.4.4](https://github.com/odow/SDDP.jl/releases/tag/v0.4.4)-(December-11,-2021)","page":"Release notes","title":"v0.4.4 (December 11, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-11","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added BanditDuality (#471)\nAdded benchmark scripts (#475) (#476) (#490)\nwrite_cuts_to_file now saves visited states (#468)","category":"page"},{"location":"release_notes/#Fixed-9","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed BoundStalling in a deterministic policy (#470) (#474)\nFixed magnitude warning with zero coefficients (#483)","category":"page"},{"location":"release_notes/#Other-17","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improvements to LagrangianDuality (#481) (#482) (#487)\nImprovements to StrengthenedConicDuality (#486)\nSwitch to functional form for the tests (#478)\nFixed typos (#472) (Thanks @vfdev-5)\nUpdate to JuMP v0.22 (#498)","category":"page"},{"location":"release_notes/#[v0.4.3](https://github.com/odow/SDDP.jl/releases/tag/v0.4.3)-(August-31,-2021)","page":"Release notes","title":"v0.4.3 (August 31, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-12","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added biobjective solver (#462)\nAdded forward_pass_callback (#466)","category":"page"},{"location":"release_notes/#Other-18","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update tutorials and documentation (#459) (#465)\nOrganize how paper materials are stored (#464)","category":"page"},{"location":"release_notes/#[v0.4.2](https://github.com/odow/SDDP.jl/releases/tag/v0.4.2)-(August-24,-2021)","page":"Release notes","title":"v0.4.2 (August 24, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-10","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed a bug in Lagrangian duality (#457)","category":"page"},{"location":"release_notes/#[v0.4.1](https://github.com/odow/SDDP.jl/releases/tag/v0.4.1)-(August-23,-2021)","page":"Release notes","title":"v0.4.1 (August 23, 2021)","text":"","category":"section"},{"location":"release_notes/#Other-19","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Minor changes to our implementation of LagrangianDuality (#454) (#455)","category":"page"},{"location":"release_notes/#[v0.4.0](https://github.com/odow/SDDP.jl/releases/tag/v0.4.0)-(August-17,-2021)","page":"Release notes","title":"v0.4.0 (August 17, 2021)","text":"","category":"section"},{"location":"release_notes/#Breaking","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#499) (#453)","category":"page"},{"location":"release_notes/#Other-20","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#447) (#448) (#450)","category":"page"},{"location":"release_notes/#[v0.3.17](https://github.com/odow/SDDP.jl/releases/tag/v0.3.17)-(July-6,-2021)","page":"Release notes","title":"v0.3.17 (July 6, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-13","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.PSRSamplingScheme (#426)","category":"page"},{"location":"release_notes/#Other-21","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Display more model attributes (#438)\nDocumentation improvements (#433) (#437) (#439)","category":"page"},{"location":"release_notes/#[v0.3.16](https://github.com/odow/SDDP.jl/releases/tag/v0.3.16)-(June-17,-2021)","page":"Release notes","title":"v0.3.16 (June 17, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-14","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.RiskAdjustedForwardPass (#413)\nAllow SDDP.Historical to sample sequentially (#420)","category":"page"},{"location":"release_notes/#Other-22","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update risk measure docstrings (#418)","category":"page"},{"location":"release_notes/#[v0.3.15](https://github.com/odow/SDDP.jl/releases/tag/v0.3.15)-(June-1,-2021)","page":"Release notes","title":"v0.3.15 (June 1, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-15","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added SDDP.StoppingChain","category":"page"},{"location":"release_notes/#Fixed-11","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed scoping bug in SDDP.@stageobjective (#407)\nFixed a bug when the initial point is infeasible (#411)\nSet subproblems to silent by default (#409)","category":"page"},{"location":"release_notes/#Other-23","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add JuliaFormatter (#412)\nDocumentation improvements (#406) (#408)","category":"page"},{"location":"release_notes/#[v0.3.14](https://github.com/odow/SDDP.jl/releases/tag/v0.3.14)-(March-30,-2021)","page":"Release notes","title":"v0.3.14 (March 30, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-12","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed O(N^2) behavior in get_same_children (#393)","category":"page"},{"location":"release_notes/#[v0.3.13](https://github.com/odow/SDDP.jl/releases/tag/v0.3.13)-(March-27,-2021)","page":"Release notes","title":"v0.3.13 (March 27, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-13","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bug in print.jl\nFixed compat of Reexport (#388)","category":"page"},{"location":"release_notes/#[v0.3.12](https://github.com/odow/SDDP.jl/releases/tag/v0.3.12)-(March-22,-2021)","page":"Release notes","title":"v0.3.12 (March 22, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-16","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added problem statistics to header (#385) (#386)","category":"page"},{"location":"release_notes/#Fixed-14","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed subtypes in visualization (#384)","category":"page"},{"location":"release_notes/#[v0.3.11](https://github.com/odow/SDDP.jl/releases/tag/v0.3.11)-(March-22,-2021)","page":"Release notes","title":"v0.3.11 (March 22, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-15","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed constructor in direct mode (#383)","category":"page"},{"location":"release_notes/#Other-24","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix documentation (#379)","category":"page"},{"location":"release_notes/#[v0.3.10](https://github.com/odow/SDDP.jl/releases/tag/v0.3.10)-(February-23,-2021)","page":"Release notes","title":"v0.3.10 (February 23, 2021)","text":"","category":"section"},{"location":"release_notes/#Fixed-16","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed seriescolor in publication plot (#376)","category":"page"},{"location":"release_notes/#[v0.3.9](https://github.com/odow/SDDP.jl/releases/tag/v0.3.9)-(February-20,-2021)","page":"Release notes","title":"v0.3.9 (February 20, 2021)","text":"","category":"section"},{"location":"release_notes/#Added-17","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Add option to simulate with different incoming state (#372)\nAdded warning for cuts with high dynamic range (#373)","category":"page"},{"location":"release_notes/#Fixed-17","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed seriesalpha in publication plot (#375)","category":"page"},{"location":"release_notes/#[v0.3.8](https://github.com/odow/SDDP.jl/releases/tag/v0.3.8)-(January-19,-2021)","page":"Release notes","title":"v0.3.8 (January 19, 2021)","text":"","category":"section"},{"location":"release_notes/#Other-25","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#367) (#369) (#370)","category":"page"},{"location":"release_notes/#[v0.3.7](https://github.com/odow/SDDP.jl/releases/tag/v0.3.7)-(January-8,-2021)","page":"Release notes","title":"v0.3.7 (January 8, 2021)","text":"","category":"section"},{"location":"release_notes/#Other-26","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#362) (#363) (#365) (#366)\nBump copyright (#364)","category":"page"},{"location":"release_notes/#[v0.3.6](https://github.com/odow/SDDP.jl/releases/tag/v0.3.6)-(December-17,-2020)","page":"Release notes","title":"v0.3.6 (December 17, 2020)","text":"","category":"section"},{"location":"release_notes/#Other-27","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix typos (#358)\nCollapse navigation bar in docs (#359)\nUpdate TagBot.yml (#361)","category":"page"},{"location":"release_notes/#[v0.3.5](https://github.com/odow/SDDP.jl/releases/tag/v0.3.5)-(November-18,-2020)","page":"Release notes","title":"v0.3.5 (November 18, 2020)","text":"","category":"section"},{"location":"release_notes/#Other-28","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update citations (#348)\nSwitch to GitHub actions (#355)","category":"page"},{"location":"release_notes/#[v0.3.4](https://github.com/odow/SDDP.jl/releases/tag/v0.3.4)-(August-25,-2020)","page":"Release notes","title":"v0.3.4 (August 25, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-18","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added non-uniform distributionally robust risk measure (#328)\nAdded numerical recovery functions (#330)\nAdded experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)\nAdded entropic risk measure (#347)","category":"page"},{"location":"release_notes/#Other-29","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Documentation improvements (#327) (#333) (#339) (#340)","category":"page"},{"location":"release_notes/#[v0.3.3](https://github.com/odow/SDDP.jl/releases/tag/v0.3.3)-(June-19,-2020)","page":"Release notes","title":"v0.3.3 (June 19, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-19","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added asynchronous support for price and belief states (#325)\nAdded ForwardPass plug-in system (#320)","category":"page"},{"location":"release_notes/#Fixed-18","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fix check for probabilities in Markovian graph (#322)","category":"page"},{"location":"release_notes/#[v0.3.2](https://github.com/odow/SDDP.jl/releases/tag/v0.3.2)-(April-6,-2020)","page":"Release notes","title":"v0.3.2 (April 6, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-20","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added log_frequency argument to SDDP.train (#307)","category":"page"},{"location":"release_notes/#Other-30","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improve error message in deterministic equivalent (#312)\nUpdate to RecipesBase 1.0 (#313)","category":"page"},{"location":"release_notes/#[v0.3.1](https://github.com/odow/SDDP.jl/releases/tag/v0.3.1)-(February-26,-2020)","page":"Release notes","title":"v0.3.1 (February 26, 2020)","text":"","category":"section"},{"location":"release_notes/#Fixed-19","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed filename in integrality_handlers.jl (#304)","category":"page"},{"location":"release_notes/#[v0.3.0](https://github.com/odow/SDDP.jl/releases/tag/v0.3.0)-(February-20,-2020)","page":"Release notes","title":"v0.3.0 (February 20, 2020)","text":"","category":"section"},{"location":"release_notes/#Breaking-2","page":"Release notes","title":"Breaking","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Breaking changes to update to JuMP v0.21 (#300).","category":"page"},{"location":"release_notes/#[v0.2.4](https://github.com/odow/SDDP.jl/releases/tag/v0.2.4)-(February-7,-2020)","page":"Release notes","title":"v0.2.4 (February 7, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-21","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added a counter for the number of total subproblem solves (#301)","category":"page"},{"location":"release_notes/#Other-31","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Update formatter (#298)\nAdded tests (#299)","category":"page"},{"location":"release_notes/#[v0.2.3](https://github.com/odow/SDDP.jl/releases/tag/v0.2.3)-(January-24,-2020)","page":"Release notes","title":"v0.2.3 (January 24, 2020)","text":"","category":"section"},{"location":"release_notes/#Added-22","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for convex risk measures (#294)","category":"page"},{"location":"release_notes/#Fixed-20","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed bug when subproblem is infeasible (#296)\nFixed bug in deterministic equivalent (#297)","category":"page"},{"location":"release_notes/#Other-32","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added example from IJOC paper (#293)","category":"page"},{"location":"release_notes/#[v0.2.2](https://github.com/odow/SDDP.jl/releases/tag/v0.2.2)-(January-10,-2020)","page":"Release notes","title":"v0.2.2 (January 10, 2020)","text":"","category":"section"},{"location":"release_notes/#Fixed-21","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Fixed flakey time limit in tests (#291)","category":"page"},{"location":"release_notes/#Other-33","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Removed MathOptFormat.jl (#289)\nUpdate copyright (#290)","category":"page"},{"location":"release_notes/#[v0.2.1](https://github.com/odow/SDDP.jl/releases/tag/v0.2.1)-(December-19,-2019)","page":"Release notes","title":"v0.2.1 (December 19, 2019)","text":"","category":"section"},{"location":"release_notes/#Added-23","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added support for approximating a Markov lattice (#282) (#285)\nAdd tools for visualizing the value function (#272) (#286)\nWrite .mof.json files on error (#284)","category":"page"},{"location":"release_notes/#Other-34","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improve documentation (#281) (#283)\nUpdate tests for Julia 1.3 (#287)","category":"page"},{"location":"release_notes/#[v0.2.0](https://github.com/odow/SDDP.jl/releases/tag/v0.2.0)-(December-16,-2019)","page":"Release notes","title":"v0.2.0 (December 16, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.","category":"page"},{"location":"release_notes/#Added-24","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Added asynchronous parallel implementation (#277)\nAdded roll-out algorithm for cyclic graphs (#279)","category":"page"},{"location":"release_notes/#Other-35","page":"Release notes","title":"Other","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Improved error messages in PolicyGraph (#271)\nAdded JuliaFormatter (#273) (#276)\nFixed compat bounds (#274) (#278)\nAdded documentation for simulating non-standard graphs (#280)","category":"page"},{"location":"release_notes/#[v0.1.0](https://github.com/odow/SDDP.jl/releases/tag/v0.1.0)-(October-17,-2019)","page":"Release notes","title":"v0.1.0 (October 17, 2019)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.","category":"page"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.","category":"page"},{"location":"release_notes/#[v0.0.1](https://github.com/odow/SDDP.jl/releases/tag/v0.0.1)-(April-18,-2018)","page":"Release notes","title":"v0.0.1 (April 18, 2018)","text":"","category":"section"},{"location":"release_notes/","page":"Release notes","title":"Release notes","text":"Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"EditURL = \"asset_management_stagewise.jl\"","category":"page"},{"location":"examples/asset_management_stagewise/#Asset-management-with-modifications","page":"Asset management with modifications","title":"Asset management with modifications","text":"","category":"section"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"A modified version of the Asset Management Problem Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"using SDDP, HiGHS, Test\n\nfunction asset_management_stagewise(; cut_type)\n w_s = [1.25, 1.06]\n w_b = [1.14, 1.12]\n Phi = [-1, 5]\n Psi = [0.02, 0.0]\n\n model = SDDP.MarkovianPolicyGraph(\n sense = :Max,\n transition_matrices = Array{Float64,2}[\n [1.0]',\n [0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n ],\n upper_bound = 1000.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n t, i = node\n @variable(subproblem, xs >= 0, SDDP.State, initial_value = 0)\n @variable(subproblem, xb >= 0, SDDP.State, initial_value = 0)\n if t == 1\n @constraint(subproblem, xs.out + xb.out == 55 + xs.in + xb.in)\n @stageobjective(subproblem, 0)\n elseif t == 2 || t == 3\n @variable(subproblem, phi)\n @constraint(\n subproblem,\n w_s[i] * xs.in + w_b[i] * xb.in + phi == xs.out + xb.out\n )\n SDDP.parameterize(subproblem, [1, 2], [0.6, 0.4]) do ω\n JuMP.fix(phi, Phi[ω])\n @stageobjective(subproblem, Psi[ω] * xs.out)\n end\n else\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(\n subproblem,\n w_s[i] * xs.in + w_b[i] * xb.in + u - v == 80,\n )\n @stageobjective(subproblem, -4u + v)\n end\n end\n SDDP.train(\n model;\n cut_type = cut_type,\n log_frequency = 10,\n risk_measure = (node) -> begin\n if node[1] != 3\n SDDP.Expectation()\n else\n SDDP.EAVaR(lambda = 0.5, beta = 0.5)\n end\n end,\n )\n @test SDDP.calculate_bound(model) ≈ 1.278 atol = 1e-3\n return\nend\n\nasset_management_stagewise(cut_type = SDDP.SINGLE_CUT)\n\nasset_management_stagewise(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/choose_a_stopping_rule/#Choose-a-stopping-rule","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The theory of SDDP tells us that the algorithm converges to an optimal policy almost surely in a finite number of iterations. In practice, this number is very large. Therefore, we need some way of pre-emptively terminating SDDP when the solution is “good enough.” We call heuristics for pre-emptively terminating SDDP stopping rules.","category":"page"},{"location":"guides/choose_a_stopping_rule/#Basic-limits","page":"Choose a stopping rule","title":"Basic limits","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The training of an SDDP policy can be terminated after a fixed number of iterations using the iteration_limit keyword.","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, iteration_limit = 10)","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The training of an SDDP policy can be terminated after a fixed number of seconds using the time_limit keyword.","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, time_limit = 2.0)","category":"page"},{"location":"guides/choose_a_stopping_rule/#Stopping-rules","page":"Choose a stopping rule","title":"Stopping rules","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"In addition to the limits provided as keyword arguments, a variety of other stopping rules are available. These can be passed to SDDP.train as a vector to the stopping_rules keyword. For example:","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, stopping_rules = [SDDP.BoundStalling(10, 1e-4)])","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"See Stopping rules for a list of stopping rules supported by SDDP.jl.","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"EditURL = \"belief.jl\"","category":"page"},{"location":"examples/belief/#Partially-observable-inventory-management","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"","category":"section"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"using SDDP, HiGHS, Random, Statistics, Test\n\nfunction inventory_management_problem()\n demand_values = [1.0, 2.0]\n demand_prob = Dict(:Ah => [0.2, 0.8], :Bh => [0.8, 0.2])\n graph = SDDP.Graph(\n :root_node,\n [:Ad, :Ah, :Bd, :Bh],\n [\n (:root_node => :Ad, 0.5),\n (:root_node => :Bd, 0.5),\n (:Ad => :Ah, 1.0),\n (:Ah => :Ad, 0.8),\n (:Ah => :Bd, 0.1),\n (:Bd => :Bh, 1.0),\n (:Bh => :Bd, 0.8),\n (:Bh => :Ad, 0.1),\n ],\n )\n SDDP.add_ambiguity_set(graph, [:Ad, :Bd], 1e2)\n SDDP.add_ambiguity_set(graph, [:Ah, :Bh], 1e2)\n\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n @variables(\n subproblem,\n begin\n 0 <= inventory <= 2, (SDDP.State, initial_value = 0.0)\n buy >= 0\n demand\n end\n )\n @constraint(subproblem, demand == inventory.in - inventory.out + buy)\n if node == :Ad || node == :Bd || node == :D\n JuMP.fix(demand, 0)\n @stageobjective(subproblem, buy)\n else\n SDDP.parameterize(subproblem, demand_values, demand_prob[node]) do ω\n return JuMP.fix(demand, ω)\n end\n @stageobjective(subproblem, 2 * buy + inventory.out)\n end\n end\n # Train the policy.\n Random.seed!(123)\n SDDP.train(\n model;\n iteration_limit = 100,\n cut_type = SDDP.SINGLE_CUT,\n log_frequency = 10,\n )\n results = SDDP.simulate(model, 500)\n objectives =\n [sum(s[:stage_objective] for s in simulation) for simulation in results]\n sample_mean = round(Statistics.mean(objectives); digits = 2)\n sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)\n @test SDDP.calculate_bound(model) ≈ sample_mean atol = sample_ci\n return\nend\n\ninventory_management_problem()","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"EditURL = \"decision_hazard.jl\"","category":"page"},{"location":"tutorial/decision_hazard/#Here-and-now-and-hazard-decision","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"SDDP.jl assumes that the agent gets to make a decision after observing the realization of the random variable. This is called a wait-and-see or hazard-decision model. In contrast, you might want your agent to make decisions before observing the random variable. This is called a here-and-now or decision-hazard model.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"info: Info\nThe terms decision-hazard and hazard-decision from the French hasard, meaning chance. It could also have been translated as uncertainty-decision and decision-uncertainty, but the community seems to have settled on the transliteration hazard instead. We like the hazard-decision and decision-hazard terms because they clearly communicate the order of the decision and the uncertainty.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"The purpose of this tutorial is to demonstrate how to model here-and-how decisions in SDDP.jl.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"This tutorial uses the following packages:","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"using SDDP\nimport HiGHS","category":"page"},{"location":"tutorial/decision_hazard/#Hazard-decision-formulation","page":"Here-and-now and hazard-decision","title":"Hazard-decision formulation","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"As an example, we're going to build a standard hydro-thermal scheduling model, with a single hydro-reservoir and a single thermal generation plant. In each of the four stages, we need to choose some mix of u_thermal and u_hydro to meet a demand of 9 units, where unmet demand is penalized at a rate of $500/unit.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"hazard_decision = SDDP.LinearPolicyGraph(\n stages = 4,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n @variables(sp, begin\n 0 <= x_storage <= 8, (SDDP.State, initial_value = 6)\n u_thermal >= 0\n u_hydro >= 0\n u_unmet_demand >= 0\n end)\n @constraint(sp, u_thermal + u_hydro == 9 - u_unmet_demand)\n @constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)\n SDDP.parameterize(sp, [2, 3]) do ω_inflow\n return set_normalized_rhs(c_balance, ω_inflow)\n end\n @stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal)\nend","category":"page"},{"location":"tutorial/decision_hazard/#Decision-hazard-formulation","page":"Here-and-now and hazard-decision","title":"Decision-hazard formulation","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In the wait-and-see formulation, we get to decide the generation variables after observing the realization of ω_inflow. However, a common modeling situation is that we need to decide the level of thermal generation u_thermal before observing the inflow.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"SDDP.jl can model here-and-now decisions with a modeling trick: a wait-and-see decision in stage t-1 is equivalent to a here-and-now decision in stage t.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In other words, we need to convert the u_thermal decision from a control variable that is decided in stage t, to a state variable that is decided in stage t-1. Here's our new model, with the three lines that have changed:","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"decision_hazard = SDDP.LinearPolicyGraph(\n stages = 4,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n @variables(sp, begin\n 0 <= x_storage <= 8, (SDDP.State, initial_value = 6)\n u_thermal >= 0, (SDDP.State, initial_value = 0) # <-- changed\n u_hydro >= 0\n u_unmet_demand >= 0\n end)\n @constraint(sp, u_thermal.in + u_hydro == 9 - u_unmet_demand) # <-- changed\n @constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)\n SDDP.parameterize(sp, [2, 3]) do ω\n return set_normalized_rhs(c_balance, ω)\n end\n @stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal.in) # <-- changed\nend","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"Can you understand the reformulation? In each stage, we now use the value of u_thermal.in instead of u_thermal, and the value of the outgoing u_thermal.out is the here-and-how decision for stage t+1.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"(If you can spot a \"mistake\" with this model, don't worry, we'll fix it below. Presenting it like this simplifies the exposition.)","category":"page"},{"location":"tutorial/decision_hazard/#Comparison","page":"Here-and-now and hazard-decision","title":"Comparison","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"Let's compare the cost of operating the two models:","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"function train_and_compute_cost(model)\n SDDP.train(model; print_level = 0)\n return println(\"Cost = \\$\", SDDP.calculate_bound(model))\nend\n\ntrain_and_compute_cost(hazard_decision)","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"train_and_compute_cost(decision_hazard)","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"This suggests that choosing the thermal generation before observing the inflow adds a cost of $250. But does this make sense?","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"If we look carefully at our decision_hazard model, the incoming value of u_thermal.in in the first stage is fixed to the initial_value of 0. Therefore, we must always meet the full demand with u_hydro, which we cannot do without incurring unmet demand.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"To allow the model to choose an optimal level of u_thermal in the first-stage, we need to add an extra stage that is deterministic with no stage objective.","category":"page"},{"location":"tutorial/decision_hazard/#Fixing-the-decision-hazard","page":"Here-and-now and hazard-decision","title":"Fixing the decision-hazard","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In the following model, we now have five stages, so that stage t+1 in decision_hazard_2 corresponds to stage t in decision_hazard. We've also added an if-statement, which adds different constraints depending on the node. Note that we need to add an x_storage.out == x_storage.in constraint because the storage can't change in this new first-stage.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"decision_hazard_2 = SDDP.LinearPolicyGraph(\n stages = 5, # <-- changed\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n @variables(sp, begin\n 0 <= x_storage <= 8, (SDDP.State, initial_value = 6)\n u_thermal >= 0, (SDDP.State, initial_value = 0)\n u_hydro >= 0\n u_unmet_demand >= 0\n end)\n if node == 1 # <-- new\n @constraint(sp, x_storage.out == x_storage.in) # <-- new\n @stageobjective(sp, 0) # <-- new\n else\n @constraint(sp, u_thermal.in + u_hydro == 9 - u_unmet_demand)\n @constraint(sp, c_balance, x_storage.out == x_storage.in - u_hydro + 0)\n SDDP.parameterize(sp, [2, 3]) do ω\n return set_normalized_rhs(c_balance, ω)\n end\n @stageobjective(sp, 500 * u_unmet_demand + 20 * u_thermal.in)\n end\nend\n\ntrain_and_compute_cost(decision_hazard_2)","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"Now we find that the cost of choosing the thermal generation before observing the inflow adds a much more reasonable cost of $10.","category":"page"},{"location":"tutorial/decision_hazard/#Summary","page":"Here-and-now and hazard-decision","title":"Summary","text":"","category":"section"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"To summarize, the difference between here-and-now and wait-and-see variables is a modeling choice.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"To create a here-and-now decision, add it as a state variable to the previous stage","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"In some cases, you'll need to add an additional \"first-stage\" problem to enable the model to choose an optimal value for the here-and-now decision variable. You do not need to do this if the first stage is deterministic. You must make sure that the subproblem is feasible for all possible incoming values of the here-and-now decision variable.","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"","category":"page"},{"location":"tutorial/decision_hazard/","page":"Here-and-now and hazard-decision","title":"Here-and-now and hazard-decision","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"EditURL = \"pglib_opf.jl\"","category":"page"},{"location":"tutorial/pglib_opf/#Alternative-forward-models","page":"Alternative forward models","title":"Alternative forward models","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"This example demonstrates how to train convex and non-convex models.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"This example uses the following packages:","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"using SDDP\nimport Ipopt\nimport PowerModels\nimport Test","category":"page"},{"location":"tutorial/pglib_opf/#Formulation","page":"Alternative forward models","title":"Formulation","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"For our model, we build a simple optimal power flow model with a single hydro-electric generator.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"The formulation of our optimal power flow problem depends on model_type, which must be one of the PowerModels formulations.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"function build_model(model_type)\n filename = joinpath(@__DIR__, \"pglib_opf_case5_pjm.m\")\n data = PowerModels.parse_file(filename)\n return SDDP.PolicyGraph(\n SDDP.UnicyclicGraph(0.95);\n sense = :Min,\n lower_bound = 0.0,\n optimizer = Ipopt.Optimizer,\n ) do sp, t\n power_model = PowerModels.instantiate_model(\n data,\n model_type,\n PowerModels.build_opf;\n jump_model = sp,\n )\n # Now add hydro power models. Assume that generator 5 is hydro, and the\n # rest are thermal.\n pg = power_model.var[:it][:pm][:nw][0][:pg][5]\n sp[:pg] = pg\n @variable(sp, x >= 0, SDDP.State, initial_value = 10.0)\n @variable(sp, deficit >= 0)\n @constraint(sp, balance, x.out == x.in - pg + deficit)\n @stageobjective(sp, objective_function(sp) + 1e6 * deficit)\n SDDP.parameterize(sp, [0, 2, 5]) do ω\n return SDDP.set_normalized_rhs(balance, ω)\n end\n return\n end\nend","category":"page"},{"location":"tutorial/pglib_opf/#Training-a-convex-model","page":"Alternative forward models","title":"Training a convex model","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"We can build and train a convex approximation of the optimal power flow problem.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"The problem with the convex model is that it does not accurately simulate the true dynamics of the problem. Therefore, it under-estimates the true cost of operation.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"convex = build_model(PowerModels.DCPPowerModel)\nSDDP.train(convex; iteration_limit = 10)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"To more accurately simulate the dynamics of the problem, a common approach is to write the cuts representing the policy to a file, and then read them into a non-convex model:","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"SDDP.write_cuts_to_file(convex, \"convex.cuts.json\")\nnon_convex = build_model(PowerModels.ACPPowerModel)\nSDDP.read_cuts_from_file(non_convex, \"convex.cuts.json\")","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"Now we can simulate non_convex to evaluate the policy.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"result = SDDP.simulate(non_convex, 1)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"A problem with reading and writing the cuts to file is that the cuts have been generated from trial points of the convex model. Therefore, the policy may be arbitrarily bad at points visited by the non-convex model.","category":"page"},{"location":"tutorial/pglib_opf/#Training-a-non-convex-model","page":"Alternative forward models","title":"Training a non-convex model","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"We can also build and train a non-convex formulation of the optimal power flow problem.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"The problem with the non-convex model is that because it is non-convex, SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the true cost of operation.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"non_convex = build_model(PowerModels.ACPPowerModel)\nSDDP.train(non_convex; iteration_limit = 10)\nresult = SDDP.simulate(non_convex, 1)","category":"page"},{"location":"tutorial/pglib_opf/#Combining-convex-and-non-convex-models","page":"Alternative forward models","title":"Combining convex and non-convex models","text":"","category":"section"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"To summarize, training with the convex model constructs cuts at points that may never be visited by the non-convex model, and training with the non-convex model may construct arbitrarily poor cuts because a key assumption of SDDP is convexity.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"As a compromise, we can train a policy using a combination of the convex and non-convex models; we'll use the non-convex model to generate trial points on the forward pass, and we'll use the convex model to build cuts on the backward pass.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"convex = build_model(PowerModels.DCPPowerModel)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"non_convex = build_model(PowerModels.ACPPowerModel)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"To do so, we train convex using the SDDP.AlternativeForwardPass forward pass, which simulates the model using non_convex, and we use SDDP.AlternativePostIterationCallback as a post-iteration callback, which copies cuts from the convex model back into the non_convex model.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"SDDP.train(\n convex;\n forward_pass = SDDP.AlternativeForwardPass(non_convex),\n post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex),\n iteration_limit = 10,\n)","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"In practice, if we were to simulate non_convex now, we should obtain a better policy than either of the two previous approaches.","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"","category":"page"},{"location":"tutorial/pglib_opf/","page":"Alternative forward models","title":"Alternative forward models","text":"This page was generated using Literate.jl.","category":"page"},{"location":"","page":"Home","title":"Home","text":"CurrentModule = SDDP","category":"page"},{"location":"","page":"Home","title":"Home","text":"\"logo\"","category":"page"},{"location":"#Introduction","page":"Home","title":"Introduction","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Welcome to the documentation for SDDP.jl, a package for solving large multistage convex stochastic programming problems using stochastic dual dynamic programming.","category":"page"},{"location":"","page":"Home","title":"Home","text":"SDDP.jl is built on JuMP, so it supports a number of open-source and commercial solvers, making it a powerful and flexible tool for stochastic optimization.","category":"page"},{"location":"","page":"Home","title":"Home","text":"The implementation of the stochastic dual dynamic programming algorithm in SDDP.jl is state of the art, and it includes support for a number of advanced features not commonly found in other implementations. This includes support for:","category":"page"},{"location":"","page":"Home","title":"Home","text":"infinite horizon problems\nconvex risk measures\nmixed-integer state and control variables\npartially observable stochastic processes.","category":"page"},{"location":"#Installation","page":"Home","title":"Installation","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Install SDDP.jl as follows:","category":"page"},{"location":"","page":"Home","title":"Home","text":"julia> import Pkg\n\njulia> Pkg.add(\"SDDP\")","category":"page"},{"location":"#Resources-for-getting-started","page":"Home","title":"Resources for getting started","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"There are a few ways to get started with SDDP.jl:","category":"page"},{"location":"","page":"Home","title":"Home","text":"Become familiar with JuMP by reading the JuMP documentation\nRead the introductory tutorial An introduction to SDDP.jl\nBrowse some of the examples, such as Example: deterministic to stochastic","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you need help, please open a GitHub issue.","category":"page"},{"location":"#How-the-documentation-is-structured","page":"Home","title":"How the documentation is structured","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Having a high-level overview of how this documentation is structured will help you know where to look for certain things.","category":"page"},{"location":"","page":"Home","title":"Home","text":"Tutorials contains step-by-step explanations of how to use SDDP.jl. Once you've got SDDP.jl installed, start by reading An introduction to SDDP.jl.\nGuides contains \"how-to\" snippets that demonstrate specific topics within SDDP.jl. A good one to get started on is Debug a model.\nExplanation contains step-by-step explanations of the theory and algorithms that underpin SDDP.jl. If you want a basic understanding of the algorithm behind SDDP.jl, start with Introductory theory.\nExamples contain worked examples of various problems solved using SDDP.jl. A good one to get started on is the Hydro-thermal scheduling problem. In particular, it shows how to solve an infinite horizon problem.\nThe API Reference contains a complete list of the functions you can use in SDDP.jl. Look here if you want to know how to use a particular function.","category":"page"},{"location":"#Citing-SDDP.jl","page":"Home","title":"Citing SDDP.jl","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"If you use SDDP.jl, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_sddp.jl,\n\ttitle = {{SDDP}.jl: a {Julia} package for stochastic dual dynamic programming},\n\tjournal = {INFORMS Journal on Computing},\n\tauthor = {Dowson, O. and Kapelevich, L.},\n\tdoi = {https://doi.org/10.1287/ijoc.2020.0987},\n\tyear = {2021},\n\tvolume = {33},\n\tissue = {1},\n\tpages = {27-33},\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the infinite horizon functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_policy_graph,\n\ttitle = {The policy graph decomposition of multistage stochastic optimization problems},\n\tdoi = {https://doi.org/10.1002/net.21932},\n\tjournal = {Networks},\n\tauthor = {Dowson, O.},\n\tvolume = {76},\n\tissue = {1},\n\tpages = {3-23},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the partially observable functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_pomsp,\n\ttitle = {Partially observable multistage stochastic programming},\n\tdoi = {https://doi.org/10.1016/j.orl.2020.06.005},\n\tjournal = {Operations Research Letters},\n\tauthor = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.},\n\tvolume = {48},\n\tissue = {4},\n\tpages = {505-512},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the objective state functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{downward_objective,\n\ttitle = {Stochastic dual dynamic programming with stagewise-dependent objective uncertainty},\n\tdoi = {https://doi.org/10.1016/j.orl.2019.11.002},\n\tjournal = {Operations Research Letters},\n\tauthor = {Downward, A. and Dowson, O. and Baucke, R.},\n\tvolume = {48},\n\tissue = {1},\n\tpages = {33-39},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the entropic risk measure, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_entropic,\n\ttitle = {Incorporating convex risk measures into multistage stochastic programming algorithms},\n\tdoi = {https://doi.org/10.1007/s10479-022-04977-w},\n\tjournal = {Annals of Operations Research},\n\tauthor = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.},\n\tyear = {2022},\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"EditURL = \"all_blacks.jl\"","category":"page"},{"location":"examples/all_blacks/#Deterministic-All-Blacks","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"","category":"section"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"using SDDP, HiGHS, Test\n\nfunction all_blacks()\n # Number of time periods, number of seats, R_ij = revenue from selling seat\n # i at time j, offer_ij = whether an offer for seat i will come at time j\n (T, N, R, offer) = (3, 2, [3 3 6; 3 3 6], [1 1 0; 1 0 1])\n model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n # Seat remaining?\n @variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)\n # Action: accept offer, or don't accept offer\n @variable(sp, accept_offer, Bin)\n # Balance on seats\n @constraint(\n sp,\n [i in 1:N],\n x[i].out == x[i].in - offer[i, stage] * accept_offer\n )\n @stageobjective(\n sp,\n sum(R[i, stage] * offer[i, stage] * accept_offer for i in 1:N)\n )\n end\n SDDP.train(model; duality_handler = SDDP.LagrangianDuality())\n @test SDDP.calculate_bound(model) ≈ 9.0\n return\nend\n\nall_blacks()","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"EditURL = \"sldp_example_one.jl\"","category":"page"},{"location":"examples/sldp_example_one/#SLDP:-example-1","page":"SLDP: example 1","title":"SLDP: example 1","text":"","category":"section"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"This example is derived from Section 4.2 of the paper: Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz Dynamic Programming. Optimization Online. PDF","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"using SDDP, HiGHS, Test\n\nfunction sldp_example_one()\n model = SDDP.LinearPolicyGraph(\n stages = 8,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, x, SDDP.State, initial_value = 2.0)\n @variables(sp, begin\n x⁺ >= 0\n x⁻ >= 0\n 0 <= u <= 1, Bin\n ω\n end)\n @stageobjective(sp, 0.9^(t - 1) * (x⁺ + x⁻))\n @constraints(sp, begin\n x.out == x.in + 2 * u - 1 + ω\n x⁺ >= x.out\n x⁻ >= -x.out\n end)\n points = [\n -0.3089653673606697,\n -0.2718277412744214,\n -0.09611178608243474,\n 0.24645863921577763,\n 0.5204224537256875,\n ]\n return SDDP.parameterize(φ -> JuMP.fix(ω, φ), sp, [points; -points])\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) <= 1.1675\n return\nend\n\nsldp_example_one()","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Simulate-using-a-different-sampling-scheme","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"By default, SDDP.simulate will simulate the policy using the distributions of noise terms that were defined when the model was created. We call these in-sample simulations. However, in general the in-sample distributions are an approximation of some underlying probability model which we term the true process. Therefore, SDDP.jl makes it easy to simulate the policy using different probability distributions.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"To demonstrate the different ways of simulating the policy, we're going to use the model from the tutorial Markovian policy graphs.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"using SDDP, HiGHS\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75)\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64, 2}[\n [ 1.0 ]',\n [ 0.75 0.25 ],\n [ 0.75 0.25 ; 0.25 0.75 ]\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer\n) do subproblem, node\n # Unpack the stage and Markov index.\n t, markov_state = node\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end)\n # Note how we can use `markov_state` to dispatch an `if` statement.\n probability = if markov_state == 1 # wet climate state\n [1/6, 1/3, 1/2]\n else # dry climate state\n [1/2, 1/3, 1/6]\n end\n\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation)\n end\nend\n\nSDDP.train(model; iteration_limit = 10, print_level = 0);\n\nmodel\n\n# output\n\nA policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#In-sample-Monte-Carlo-simulation","page":"Simulate using a different sampling scheme","title":"In-sample Monte Carlo simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"To simulate the policy using the data defined when model was created, use SDDP.InSampleMonteCarlo.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"simulations = SDDP.simulate(\n model, 20, sampling_scheme = SDDP.InSampleMonteCarlo()\n)\n\nsort(unique(\n [node[:noise_term] for simulation in simulations for node in simulation]\n))\n\n# output\n\n3-element Array{NamedTuple{(:inflow, :fuel_multiplier),Tuple{Float64,Float64}},1}:\n (inflow = 0.0, fuel_multiplier = 1.5)\n (inflow = 50.0, fuel_multiplier = 1.0)\n (inflow = 100.0, fuel_multiplier = 0.75)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Out-of-sample-Monte-Carlo-simulation","page":"Simulate using a different sampling scheme","title":"Out-of-sample Monte Carlo simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Instead of using the in-sample data, we can perform an out-of-sample simulation of the policy using the SDDP.OutOfSampleMonteCarlo sampling scheme.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"For each node, the SDDP.OutOfSampleMonteCarlo needs to define a new distribution for the transition probabilities between nodes in the policy graph, and a new distribution for the stagewise independent noise terms.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"note: Note\nThe support of the distribution for the stagewise independent noise terms does not have to be the same as the in-sample distributions.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"sampling_scheme = SDDP.OutOfSampleMonteCarlo(model) do node\n stage, markov_state = node\n if stage == 0\n # Called from the root node. Transition to (1, 1) with probability 1.0.\n # Only return the list of children, _not_ a list of noise terms.\n return [SDDP.Noise((1, 1), 1.0)]\n elseif stage == 3\n # Called from the final node. Return an empty list for the children,\n # and a single, deterministic realization for the noise terms.\n children = SDDP.Noise[]\n noise_terms = [SDDP.Noise((inflow = 75.0, fuel_multiplier = 1.2), 1.0)]\n return children, noise_terms\n else\n # Called from a normal node. Return the in-sample distribution for the\n # noise terms, but modify the transition probabilities so that the\n # Markov switching probability is now 50%.\n probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]\n noise_terms = [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]\n children = [\n SDDP.Noise((stage + 1, 1), 0.5), SDDP.Noise((stage + 1, 2), 0.5)\n ]\n return children, noise_terms\n end\nend\n\nsimulations = SDDP.simulate(model, 1, sampling_scheme = sampling_scheme)\n\nsimulations[1][3][:noise_term]\n\n# output\n\n(inflow = 75.0, fuel_multiplier = 1.2)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Alternatively, if you only want to modify the stagewise independent noise terms, pass use_insample_transition = true.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"sampling_scheme = SDDP.OutOfSampleMonteCarlo(\n model, use_insample_transition = true\n) do node\nstage, markov_state = node\n if stage == 3\n # Called from the final node. Return a single, deterministic\n # realization for the noise terms. Don't return the children because we\n # use the in-sample data.\n return [SDDP.Noise((inflow = 65.0, fuel_multiplier = 1.1), 1.0)]\n else\n # Called from a normal node. Return the in-sample distribution for the\n # noise terms. Don't return the children because we use the in-sample\n # data.\n probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]\n return [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]\n end\nend\n\nsimulations = SDDP.simulate(model, 1, sampling_scheme = sampling_scheme)\n\nsimulations[1][3][:noise_term]\n\n# output\n\n(inflow = 65.0, fuel_multiplier = 1.1)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Historical-simulation","page":"Simulate using a different sampling scheme","title":"Historical simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"simulations = SDDP.simulate(\n model,\n sampling_scheme = SDDP.Historical(\n [((1, 1), Ω[1]), ((2, 2), Ω[3]), ((3, 1), Ω[2])],\n ),\n)\n\n[stage[:node_index] for stage in simulations[1]]\n\n# output\n\n3-element Vector{Tuple{Int64, Int64}}:\n (1, 1)\n (2, 2)\n (3, 1)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"You can also pass a vector of scenarios, which are sampled sequentially:","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"julia> SDDP.Historical(\n [\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 10.0, fuel_multiplier = 1.4)), # Can be out-of-sample\n (3, (inflow = 65.0, fuel_multiplier = 1.1)),\n ],\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 100.0, fuel_multiplier = 0.75)),\n (3, (inflow = 0.0, fuel_multiplier = 1.5)),\n ],\n ],\n )\nA Historical sampler with 2 scenarios sampled sequentially.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Or a vector of scenarios and a corresponding vector of probabilities so that the historical scenarios are sampled probabilistically:","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"julia> SDDP.Historical(\n [\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 10.0, fuel_multiplier = 1.4)), # Can be out-of-sample\n (3, (inflow = 65.0, fuel_multiplier = 1.1)),\n ],\n [\n (1, (inflow = 65.0, fuel_multiplier = 1.1)),\n (2, (inflow = 100.0, fuel_multiplier = 0.75)),\n (3, (inflow = 0.0, fuel_multiplier = 1.5)),\n ],\n ],\n [0.3, 0.7],\n )\nA Historical sampler with 2 scenarios sampled probabilistically.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"tip: Tip\nYour sample space doesn't have to be a NamedTuple. It an be any Julia type! Use a Vector if that is easier, or define your own struct.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"EditURL = \"first_steps.jl\"","category":"page"},{"location":"tutorial/first_steps/#An-introduction-to-SDDP.jl","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"SDDP.jl is a solver for multistage stochastic optimization problems. By multistage, we mean problems in which an agent makes a sequence of decisions over time. By stochastic, we mean that the agent is making decisions in the presence of uncertainty that is gradually revealed over the multiple stages.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nMultistage stochastic programming has a lot in common with fields like stochastic optimal control, approximate dynamic programming, Markov decision processes, and reinforcement learning. If it helps, you can think of SDDP as Q-learning in which we approximate the value function using linear programming duality.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This tutorial is in two parts. First, it is an introduction to the background notation and theory we need, and second, it solves a simple multistage stochastic programming problem.","category":"page"},{"location":"tutorial/first_steps/#What-is-a-node?","page":"An introduction to SDDP.jl","title":"What is a node?","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A common feature of multistage stochastic optimization problems is that they model an agent controlling a system over time. To simplify things initially, we're going to start by describing what happens at an instant in time at which the agent makes a decision. Only after this will we extend our problem to multiple stages and the notion of time.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A node is a place at which the agent makes a decision.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nFor readers with a stochastic programming background, \"node\" is synonymous with \"stage\" in this section. However, for reasons that will become clear shortly, there can be more than one \"node\" per instant in time, which is why we prefer the term \"node\" over \"stage.\"","category":"page"},{"location":"tutorial/first_steps/#States,-controls,-and-random-variables","page":"An introduction to SDDP.jl","title":"States, controls, and random variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The system that we are modeling can be described by three types of variables.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"State variables track a property of the system over time.\nEach node has an associated incoming state variable (the value of the state at the start of the node), and an outgoing state variable (the value of the state at the end of the node).\nExamples of state variables include the volume of water in a reservoir, the number of units of inventory in a warehouse, or the spatial position of a moving vehicle.\nBecause state variables track the system over time, each node must have the same set of state variables.\nWe denote state variables by the letter x for the incoming state variable and x^prime for the outgoing state variable.\nControl variables are actions taken (implicitly or explicitly) by the agent within a node which modify the state variables.\nExamples of control variables include releases of water from the reservoir, sales or purchasing decisions, and acceleration or braking of the vehicle.\nControl variables are local to a node i, and they can differ between nodes. For example, some control variables may be available within certain nodes.\nWe denote control variables by the letter u.\nRandom variables are finite, discrete, exogenous random variables that the agent observes at the start of a node, before the control variables are decided.\nExamples of random variables include rainfall inflow into a reservoir, probabilistic perishing of inventory, and steering errors in a vehicle.\nRandom variables are local to a node i, and they can differ between nodes. For example, some nodes may have random variables, and some nodes may not.\nWe denote random variables by the Greek letter omega and the sample space from which they are drawn by Omega_i. The probability of sampling omega is denoted p_omega for simplicity.\nImportantly, the random variable associated with node i is independent of the random variables in all other nodes.","category":"page"},{"location":"tutorial/first_steps/#Dynamics","page":"An introduction to SDDP.jl","title":"Dynamics","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In a node i, the three variables are related by a transition function, which maps the incoming state, the controls, and the random variables to the outgoing state as follows: x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"As a result of entering a node i with the incoming state x, observing random variable omega, and choosing control u, the agent incurs a cost C_i(x u omega). (If the agent is a maximizer, this can be a profit, or a negative cost.) We call C_i the stage objective.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"To choose their control variables in node i, the agent uses a decision rule u = pi_i(x omega), which is a function that maps the incoming state variable and observation of the random variable to a control u. This control must satisfy some feasibility requirements u in U_i(x omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Here is a schematic which we can use to visualize a single node:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: Hazard-decision node)","category":"page"},{"location":"tutorial/first_steps/#Policy-graphs","page":"An introduction to SDDP.jl","title":"Policy graphs","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we have a node, we need to connect multiple nodes together to form a multistage stochastic program. We call the graph created by connecting nodes together a policy graph.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The simplest type of policy graph is a linear policy graph. Here's a linear policy graph with three nodes:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: Linear policy graph)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Here we have dropped the notations inside each node and replaced them by a label (1, 2, and 3) to represent nodes i=1, i=2, and i=3.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to nodes 1, 2, and 3, there is also a root node (the circle), and three arcs. Each arc has an origin node and a destination node, like 1 => 2, and a corresponding probability of transitioning from the origin to the destination. Unless specified, we assume that the arc probabilities are uniform over the number of outgoing arcs. Thus, in this picture the arc probabilities are all 1.0.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"State variables flow long the arcs of the graph. Thus, the outgoing state variable x^prime from node 1 becomes the incoming state variable x to node 2, and so on.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We denote the set of nodes by mathcalN, the root node by R, and the probability of transitioning from node i to node j by p_ij. (If no arc exists, then p_ij = 0.) We define the set of successors of node i as i^+ = j in mathcalN p_ij 0.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Each node in the graph corresponds to a place at which the agent makes a decision, and we call moments in time at which the agent makes a decision stages. By convention, we try to draw policy graphs from left-to-right, with the stages as columns. There can be more than one node in a stage! Here's an example of a structure we call Markovian policy graphs:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: Markovian policy graph)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Here each column represents a moment in time, the squiggly lines represent stochastic rainfall, and the rows represent the world in two discrete states: El Niño and La Niña. In the El Niño states, the distribution of the rainfall random variable is different to the distribution of the rainfall random variable in the La Niña states, and there is some switching probability between the two states that can be modelled by a Markov chain.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Moreover, policy graphs can have cycles! This allows them to model infinite horizon problems. Here's another example, taken from the paper Dowson (2020):","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"(Image: POWDer policy graph)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The columns represent time, and the rows represent different states of the world. In this case, the rows represent different prices that milk can be sold for at the end of each year. The squiggly lines denote a multivariate random variable that models the weekly amount of rainfall that occurs.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nThe sum of probabilities on the outgoing arcs of node i can be less than 1, i.e., sumlimits_jin i^+ p_ij le 1. What does this mean? One interpretation is that the probability is a discount factor. Another interpretation is that there is an implicit \"zero\" node that we have not modeled, with p_i0 = 1 - sumlimits_jin i^+ p_ij. This zero node has C_0(x u omega) = 0, and 0^+ = varnothing.","category":"page"},{"location":"tutorial/first_steps/#More-notation","page":"An introduction to SDDP.jl","title":"More notation","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Recall that each node i has a decision rule u = pi_i(x omega), which is a function that maps the incoming state variable and observation of the random variable to a control u.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The set of decision rules, with one element for each node in the policy graph, is called a policy.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The goal of the agent is to find a policy that minimizes the expected cost of starting at the root node with some initial condition x_R, and proceeding from node to node along the probabilistic arcs until they reach a node with no outgoing arcs (or it reaches an implicit \"zero\" node).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"min_pi mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"where","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"V_i^pi(x omega) = C_i(x u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"where u = pi_i(x omega) in U_i(x omega), and x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The expectations are a bit complicated, but they are equivalent to:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi) = sumlimits_j in i^+ p_ij sumlimits_varphi in Omega_j p_varphiV_j(x^prime varphi)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"An optimal policy is the set of decision rules that the agent can use to make decisions and achieve the smallest expected cost.","category":"page"},{"location":"tutorial/first_steps/#Assumptions","page":"An introduction to SDDP.jl","title":"Assumptions","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nThis section is important!","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The space of problems you can model with this framework is very large. Too large, in fact, for us to form tractable solution algorithms for! Stochastic dual dynamic programming requires the following assumptions in order to work:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 1: finite nodes","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There is a finite number of nodes in mathcalN.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 2: finite random variables","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The sample space Omega_i is finite and discrete for each node iinmathcalN.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 3: convex problems","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Given fixed omega, C_i(x u omega) is a convex function, T_i(x u omega) is linear, and U_i(x u omega) is a non-empty, bounded convex set with respect to x and u.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 4: no infinite loops","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For all loops in the policy graph, the product of the arc transition probabilities around the loop is strictly less than 1.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Assumption 5: relatively complete recourse","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This is a technical but important assumption. See Relatively complete recourse for more details.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nSDDP.jl relaxes assumption (3) to allow for integer state and control variables, but we won't go into the details here. Assumption (4) essentially means that we obtain a discounted-cost solution for infinite-horizon problems, instead of an average-cost solution; see Dowson (2020) for details.","category":"page"},{"location":"tutorial/first_steps/#Dynamic-programming-and-subproblems","page":"An introduction to SDDP.jl","title":"Dynamic programming and subproblems","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we have formulated our problem, we need some ways of computing optimal decision rules. One way is to just use a heuristic like \"choose a control randomly from the set of feasible controls.\" However, such a policy is unlikely to be optimal.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A better way of obtaining an optimal policy is to use Bellman's principle of optimality, a.k.a Dynamic Programming, and define a recursive subproblem as follows:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nWe add barx as a decision variable, along with the fishing constraint barx = x for two reasons: it makes it obvious that formulating a problem with x times u results in a bilinear program instead of a linear program (see Assumption 3), and it simplifies the implementation of the SDDP algorithm.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"These subproblems are very difficult to solve exactly, because they involve recursive optimization problems with lots of nested expectations.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Therefore, instead of solving them exactly, SDDP.jl works by iteratively approximating the expectation term of each subproblem, which is also called the cost-to-go term. For now, you don't need to understand the details, other than that there is a nasty cost-to-go term that we deal with behind-the-scenes.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The subproblem view of a multistage stochastic program is also important, because it provides a convenient way of communicating the different parts of the broader problem, and it is how we will communicate the problem to SDDP.jl. All we need to do is drop the cost-to-go term and fishing constraint, and define a new subproblem SP as:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx u omega) \n x^prime = T_i(barx u omega) \n u in U_i(barx omega)\nendaligned","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nWhen we talk about formulating a subproblem with SDDP.jl, this is the formulation we mean.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We've retained the transition function and uncertainty set because they help to motivate the different components of the subproblem. However, in general, the subproblem can be more general. A better (less restrictive) representation might be:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx x^prime u omega) \n (barx x^prime u) in mathcalX_i(omega)\nendaligned","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Note that the outgoing state variable can appear in the objective, and we can add constraints involving the incoming and outgoing state variables. It should be obvious how to map between the two representations.","category":"page"},{"location":"tutorial/first_steps/#Example:-hydro-thermal-scheduling","page":"An introduction to SDDP.jl","title":"Example: hydro-thermal scheduling","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Hydrothermal scheduling is the most common application of stochastic dual dynamic programming. To illustrate some of the basic functionality of SDDP.jl, we implement a very simple model of the hydrothermal scheduling problem.","category":"page"},{"location":"tutorial/first_steps/#Problem-statement","page":"An introduction to SDDP.jl","title":"Problem statement","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We consider the problem of scheduling electrical generation over three weeks in order to meet a known demand of 150 MWh in each week.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There are two generators: a thermal generator, and a hydro generator. In each week, the agent needs to decide how much energy to generate from thermal, and how much energy to generate from hydro.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The thermal generator has a short-run marginal cost of $50/MWh in the first stage, $100/MWh in the second stage, and $150/MWh in the third stage.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The hydro generator has a short-run marginal cost of $0/MWh.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The hydro generator draws water from a reservoir which has a maximum capacity of 200 MWh. (Although water is usually measured in m³, we measure it in the energy-equivalent MWh to simplify things. In practice, there is a conversion function between m³ flowing throw the turbine and MWh.) At the start of the first time period, the reservoir is full.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to the ability to generate electricity by passing water through the hydroelectric turbine, the hydro generator can also spill water down a spillway (bypassing the turbine) in order to prevent the water from over-topping the dam. We assume that there is no cost of spillage.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to water leaving the reservoir, water that flows into the reservoir through rainfall or rivers are referred to as inflows. These inflows are uncertain, and are the cause of the main trade-off in hydro-thermal scheduling: the desire to use water now to generate cheap electricity, against the risk that future inflows will be low, leading to blackouts or expensive thermal generation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For our simple model, we assume that the inflows can be modelled by a discrete distribution with the three outcomes given in the following table:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"ω 0 50 100\nP(ω) 1/3 1/3 1/3","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The value of the noise (the random variable) is observed by the agent at the start of each stage. This makes the problem a wait-and-see or hazard-decision formulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The goal of the agent is to minimize the expected cost of generation over the three weeks.","category":"page"},{"location":"tutorial/first_steps/#Formulating-the-problem","page":"An introduction to SDDP.jl","title":"Formulating the problem","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Before going further, we need to load SDDP.jl:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"using SDDP","category":"page"},{"location":"tutorial/first_steps/#Graph-structure","page":"An introduction to SDDP.jl","title":"Graph structure","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"First, we need to identify the structure of the policy graph. From the problem statement, we want to model the problem over three weeks in weekly stages. Therefore, the policy graph is a linear graph with three stages:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"graph = SDDP.LinearGraph(3)","category":"page"},{"location":"tutorial/first_steps/#Building-the-subproblem","page":"An introduction to SDDP.jl","title":"Building the subproblem","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Next, we need to construct the associated subproblem for each node in graph. To do so, we need to provide SDDP.jl a function which takes two arguments. The first is subproblem::Model, which is an empty JuMP model. The second is node, which is the name of each node in the policy graph. If the graph is linear, SDDP defaults to naming the nodes using the integers in 1:T. Here's an example that we are going to flesh out over the next few paragraphs:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # ... stuff to go here ...\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nIf you use a different type of graph, node may be a type different to Int. For example, in SDDP.MarkovianGraph, node is a Tuple{Int,Int}.","category":"page"},{"location":"tutorial/first_steps/#State-variables","page":"An introduction to SDDP.jl","title":"State variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The first part of the subproblem we need to identify are the state variables. Since we only have one reservoir, there is only one state variable, volume, the volume of water in the reservoir [MWh].","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The volume had bounds of [0, 200], and the reservoir was full at the start of time, so x_R = 200.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We add state variables to our subproblem using JuMP's @variable macro. However, in addition to the usual syntax, we also pass SDDP.State, and we need to provide the initial value (x_R) using the initial_value keyword.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The syntax for adding a state variable is a little obtuse, because volume is not single JuMP variable. Instead, volume is a struct with two fields, .in and .out, corresponding to the incoming and outgoing state variables respectively.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nWe don't need to add the fishing constraint barx = x; SDDP.jl does this automatically.","category":"page"},{"location":"tutorial/first_steps/#Control-variables","page":"An introduction to SDDP.jl","title":"Control variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The next part of the subproblem we need to identify are the control variables. The control variables for our problem are:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"thermal_generation: the quantity of energy generated from thermal [MWh/week]\nhydro_generation: the quantity of energy generated from hydro [MWh/week]\nhydro_spill: the volume of water spilled from the reservoir in each week [MWh/week]","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Each of these variables is non-negative.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"We add control variables to our subproblem as normal JuMP variables, using @variable or @variables:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nModeling is an art, and a tricky part of that art is figuring out which variables are state variables, and which are control variables. A good rule is: if you need a value of a control variable in some future node to make a decision, it is a state variable instead.","category":"page"},{"location":"tutorial/first_steps/#Random-variables","page":"An introduction to SDDP.jl","title":"Random variables","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The next step is to identify any random variables. In our example, we had","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"inflow: the quantity of water that flows into the reservoir each week [MWh/week]","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"To add an uncertain variable to the model, we create a new JuMP variable inflow, and then call the function SDDP.parameterize. The SDDP.parameterize function takes three arguments: the subproblem, a vector of realizations, and a corresponding vector of probabilities.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Note how we use the JuMP function JuMP.fix to set the value of the inflow variable to ω.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nSDDP.parameterize can only be called once in each subproblem definition! If your random variable is multi-variate, read Add multi-dimensional noise terms.","category":"page"},{"location":"tutorial/first_steps/#Transition-function-and-constraints","page":"An introduction to SDDP.jl","title":"Transition function and constraints","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we've identified our variables, we can define the transition function and the constraints.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For our problem, the state variable is the volume of water in the reservoir. The volume of water decreases in response to water being used for hydro generation and spillage. So the transition function is: volume.out = volume.in - hydro_generation - hydro_spill + inflow. (Note how we use volume.in and volume.out to refer to the incoming and outgoing state variables.)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There is also a constraint that the total generation must sum to 150 MWh.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Both the transition function and any additional constraint are added using JuMP's @constraint and @constraints macro.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/#Objective-function","page":"An introduction to SDDP.jl","title":"Objective function","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Finally, we need to add an objective function using @stageobjective. The objective of the agent is to minimize the cost of thermal generation. This is complicated by a fuel cost that depends on the node.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"One possibility is to use an if statement on node to define the correct objective:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n # Stage-objective\n if node == 1\n @stageobjective(subproblem, 50 * thermal_generation)\n elseif node == 2\n @stageobjective(subproblem, 100 * thermal_generation)\n else\n @assert node == 3\n @stageobjective(subproblem, 150 * thermal_generation)\n end\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"A second possibility is to use an array of fuel costs, and use node to index the correct value:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n # Stage-objective\n fuel_cost = [50, 100, 150]\n @stageobjective(subproblem, fuel_cost[node] * thermal_generation)\n return subproblem\nend","category":"page"},{"location":"tutorial/first_steps/#Constructing-the-model","page":"An introduction to SDDP.jl","title":"Constructing the model","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now that we've written our subproblem, we need to construct the full model. For that, we're going to need a linear solver. Let's choose HiGHS:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"using HiGHS","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nIn larger problems, you should use a more robust commercial LP solver like Gurobi. Read Words of warning for more details.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then, we can create a full model using SDDP.PolicyGraph, passing our subproblem_builder function as the first argument, and our graph as the second:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"model = SDDP.PolicyGraph(\n subproblem_builder,\n graph;\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"sense: the optimization sense. Must be :Min or :Max.\nlower_bound: you must supply a valid bound on the objective. For our problem, we know that we cannot incur a negative cost so $0 is a valid lower bound.\noptimizer: This is borrowed directly from JuMP's Model constructor: Model(HiGHS.Optimizer)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Because linear policy graphs are the most commonly used structure, we can use SDDP.LinearPolicyGraph instead of passing SDDP.LinearGraph(3) to SDDP.PolicyGraph.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"model = SDDP.LinearPolicyGraph(\n subproblem_builder;\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There is also the option is to use Julia's do syntax to avoid needing to define a subproblem_builder function separately:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, node\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Random variables\n @variable(subproblem, inflow)\n Ω = [0.0, 50.0, 100.0]\n P = [1 / 3, 1 / 3, 1 / 3]\n SDDP.parameterize(subproblem, Ω, P) do ω\n return JuMP.fix(inflow, ω)\n end\n # Transition function and constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in - hydro_generation - hydro_spill + inflow\n demand_constraint, hydro_generation + thermal_generation == 150\n end\n )\n # Stage-objective\n if node == 1\n @stageobjective(subproblem, 50 * thermal_generation)\n elseif node == 2\n @stageobjective(subproblem, 100 * thermal_generation)\n else\n @assert node == 3\n @stageobjective(subproblem, 150 * thermal_generation)\n end\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"info: Info\nJulia's do syntax is just a different way of passing an anonymous function inner to some function outer which takes inner as the first argument. For example, given:outer(inner::Function, x, y) = inner(x, y)thenouter(1, 2) do x, y\n return x^2 + y^2\nendis equivalent to:outer((x, y) -> x^2 + y^2, 1, 2)For our purpose, inner is subproblem_builder, and outer is SDDP.PolicyGraph.","category":"page"},{"location":"tutorial/first_steps/#Training-a-policy","page":"An introduction to SDDP.jl","title":"Training a policy","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Now we have a model, which is a description of the policy graph, we need to train a policy. Models can be trained using the SDDP.train function. It accepts a number of keyword arguments. iteration_limit terminates the training after the provided number of iterations.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"SDDP.train(model; iteration_limit = 10)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"There's a lot going on in this printout! Let's break it down.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The first section, \"problem,\" gives some problem statistics. In this example there are 3 nodes, 1 state variable, and 27 scenarios (3^3). We haven't solved this problem before so there are no existing cuts.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The \"options\" section lists some options we are using to solve the problem. For more information on the numerical stability report, read the Numerical stability report section.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The \"subproblem structure\" section also needs explaining. This looks at all of the nodes in the policy graph and reports the minimum and maximum number of variables and each constraint type in the corresponding subproblem. In this case each subproblem has 7 variables and various numbers of different constraint types. Note that the exact numbers may not correspond to the formulation as you wrote it, because SDDP.jl adds some extra variables for the cost-to-go function.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then comes the iteration log, which is the main part of the printout. It has the following columns:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"iteration: the SDDP iteration\nsimulation: the cost of the single forward pass simulation for that iteration. This value is stochastic and is not guaranteed to improve over time. However, it's useful to check that the units are reasonable, and that it is not deterministic if you intended for the problem to be stochastic, etc.\nbound: this is a lower bound (upper if maximizing) for the value of the optimal policy. This should be monotonically improving (increasing if minimizing, decreasing if maximizing).\ntime (s): the total number of seconds spent solving so far\nsolves: the total number of subproblem solves to date. This can be very large!\npid: the ID of the processor used to solve that iteration. This should be 1 unless you are using parallel computation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition, if the first character of a line is †, then SDDP.jl experienced numerical issues during the solve, but successfully recovered.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"The printout finishes with some summary statistics:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"status: why did the solver stop?\ntotal time (s), best bound, and total solves are the values from the last iteration of the solve.\nsimulation ci: a confidence interval that estimates the quality of the policy from the Simulation column.\nnumeric issues: the number of iterations that experienced numerical issues.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"warning: Warning\nThe simulation ci result can be misleading if you run a small number of iterations, or if the initial simulations are very bad. On a more technical note, it is an in-sample simulation, which may not reflect the true performance of the policy. See Obtaining bounds for more details.","category":"page"},{"location":"tutorial/first_steps/#Obtaining-the-decision-rule","page":"An introduction to SDDP.jl","title":"Obtaining the decision rule","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"After training a policy, we can create a decision rule using SDDP.DecisionRule:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"rule = SDDP.DecisionRule(model; node = 1)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then, to evaluate the decision rule, we use SDDP.evaluate:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"solution = SDDP.evaluate(\n rule;\n incoming_state = Dict(:volume => 150.0),\n noise = 50.0,\n controls_to_record = [:hydro_generation, :thermal_generation],\n)","category":"page"},{"location":"tutorial/first_steps/#Simulating-the-policy","page":"An introduction to SDDP.jl","title":"Simulating the policy","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Once you have a trained policy, you can also simulate it using SDDP.simulate. The return value from simulate is a vector with one element for each replication. Each element is itself a vector, with one element for each stage. Each element, corresponding to a particular stage in a particular replication, is a dictionary that records information from the simulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"simulations = SDDP.simulate(\n # The trained model to simulate.\n model,\n # The number of replications.\n 100,\n # A list of names to record the values of.\n [:volume, :thermal_generation, :hydro_generation, :hydro_spill],\n)\n\nreplication = 1\nstage = 2\nsimulations[replication][stage]","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Ignore many of the entries for now; they will be relevant later.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"One element of interest is :volume.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"outgoing_volume = map(simulations[1]) do node\n return node[:volume].out\nend","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Another is :thermal_generation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"thermal_generation = map(simulations[1]) do node\n return node[:thermal_generation]\nend","category":"page"},{"location":"tutorial/first_steps/#Obtaining-bounds","page":"An introduction to SDDP.jl","title":"Obtaining bounds","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Because the optimal policy is stochastic, one common approach to quantify the quality of the policy is to construct a confidence interval for the expected cost by summing the stage objectives along each simulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"objectives = map(simulations) do simulation\n return sum(stage[:stage_objective] for stage in simulation)\nend\n\nμ, ci = SDDP.confidence_interval(objectives)\nprintln(\"Confidence interval: \", μ, \" ± \", ci)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This confidence interval is an estimate for an upper bound of the policy's quality. We can calculate the lower bound using SDDP.calculate_bound.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"println(\"Lower bound: \", SDDP.calculate_bound(model))","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"tip: Tip\nThe upper- and lower-bounds are reversed if maximizing, i.e., SDDP.calculate_bound. returns an upper bound.","category":"page"},{"location":"tutorial/first_steps/#Custom-recorders","page":"An introduction to SDDP.jl","title":"Custom recorders","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"In addition to simulating the primal values of variables, we can also pass custom recorder functions. Each of these functions takes one argument, the JuMP subproblem corresponding to each node. This function gets called after we have solved each node as we traverse the policy graph in the simulation.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"For example, the dual of the demand constraint (which we named demand_constraint) corresponds to the price we should charge for electricity, since it represents the cost of each additional unit of demand. To calculate this, we can go:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"simulations = SDDP.simulate(\n model,\n 1, ## Perform a single simulation\n custom_recorders = Dict{Symbol,Function}(\n :price => (sp::JuMP.Model) -> JuMP.dual(sp[:demand_constraint]),\n ),\n)\n\nprices = map(simulations[1]) do node\n return node[:price]\nend","category":"page"},{"location":"tutorial/first_steps/#Extracting-the-marginal-water-values","page":"An introduction to SDDP.jl","title":"Extracting the marginal water values","text":"","category":"section"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"note: Note\nBy \"value function\" we mean mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi), note the function V_i(x omega).","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"First, we construct a value function from the first subproblem:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"V = SDDP.ValueFunction(model; node = 1)","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"Then we can evaluate V at a point:","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"cost, price = SDDP.evaluate(V, Dict(\"volume\" => 10))","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This returns the cost-to-go (cost), and the gradient of the cost-to-go function with respect to each state variable. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"","category":"page"},{"location":"tutorial/first_steps/","page":"An introduction to SDDP.jl","title":"An introduction to SDDP.jl","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"EditURL = \"StochDynamicProgramming.jl_stock.jl\"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/#StochDynamicProgramming:-the-stock-problem","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"","category":"section"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"This example comes from StochDynamicProgramming.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"using SDDP, HiGHS, Test\n\nfunction stock_example()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(5),\n lower_bound = -2,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(sp, 0 <= state <= 1, SDDP.State, initial_value = 0.5)\n @variable(sp, 0 <= control <= 0.5)\n @variable(sp, ξ)\n @constraint(sp, state.out == state.in - control + ξ)\n SDDP.parameterize(sp, 0.0:1/30:0.3) do ω\n return JuMP.fix(ξ, ω)\n end\n @stageobjective(sp, (sin(3 * stage) - 1) * control)\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ -1.471 atol = 0.001\n simulation_results = SDDP.simulate(model, 1_000)\n @test length(simulation_results) == 1_000\n μ = SDDP.Statistics.mean(\n sum(data[:stage_objective] for data in simulation) for\n simulation in simulation_results\n )\n @test μ ≈ -1.471 atol = 0.05\n return\nend\n\nstock_example()","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"EditURL = \"agriculture_mccardle_farm.jl\"","category":"page"},{"location":"examples/agriculture_mccardle_farm/#The-farm-planning-problem","page":"The farm planning problem","title":"The farm planning problem","text":"","category":"section"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"There are four stages. The first stage is a deterministic planning stage. The next three are wait-and-see operational stages. The uncertainty in the three operational stages is a Markov chain for weather. There are three Markov states: dry, normal, and wet.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"Inspired by R. McCardle, Farm management optimization. Masters thesis, University of Louisville, Louisville, Kentucky, United States of America (2009).","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"All data, including short variable names, is taken from that thesis.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"using SDDP, HiGHS, Test\n\nfunction test_mccardle_farm_model()\n S = [ # cutting, stage\n 0 1 2\n 0 0 1\n 0 0 0\n ]\n t = [60, 60, 245] # days in period\n D = [210, 210, 858] # demand\n q = [ # selling price per bale\n [4.5 4.5 4.5; 4.5 4.5 4.5; 4.5 4.5 4.5],\n [5.5 5.5 5.5; 5.5 5.5 5.5; 5.5 5.5 5.5],\n [6.5 6.5 6.5; 6.5 6.5 6.5; 6.5 6.5 6.5],\n ]\n b = [ # predicted yield (bales/acres) from cutting i in weather j.\n 30 75 37.5\n 15 37.5 18.25\n 7.5 18.75 9.325\n ]\n w = 3000 # max storage\n C = [50 50 50; 50 50 50; 50 50 50] # cost to grow hay\n r = [ # Cost per bale of hay from cutting i during weather condition j.\n [5 5 5; 5 5 5; 5 5 5],\n [6 6 6; 6 6 6; 6 6 6],\n [7 7 7; 7 7 7; 7 7 7],\n ]\n M = 60.0 # max acreage for planting\n H = 0.0 # initial inventory\n V = [0.05, 0.05, 0.05] # inventory cost\n L = 3000.0 # max demand for hay\n\n graph = SDDP.MarkovianGraph([\n ones(Float64, 1, 1),\n [0.14 0.69 0.17],\n [0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],\n [0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],\n ])\n\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, index\n stage, weather = index\n # ===================== State Variables =====================\n # Area planted.\n @variable(subproblem, 0 <= acres <= M, SDDP.State, initial_value = M)\n @variable(\n subproblem,\n bales[i = 1:3] >= 0,\n SDDP.State,\n initial_value = (i == 1 ? H : 0)\n )\n # ===================== Variables =====================\n @variables(subproblem, begin\n buy[1:3] >= 0 # Quantity of bales to buy from each cutting.\n sell[1:3] >= 0 # Quantity of bales to sell from each cutting.\n eat[1:3] >= 0 # Quantity of bales to eat from each cutting.\n pen_p[1:3] >= 0 # Penalties\n pen_n[1:3] >= 0 # Penalties\n end)\n # ===================== Constraints =====================\n if stage == 1\n @constraint(subproblem, acres.out <= acres.in)\n @constraint(subproblem, [i = 1:3], bales[i].in == bales[i].out)\n else\n @expression(\n subproblem,\n cut_ex[c = 1:3],\n bales[c].in + buy[c] - eat[c] - sell[c] + pen_p[c] - pen_n[c]\n )\n @constraints(\n subproblem,\n begin\n # Cannot plant more land than previously cropped.\n acres.out <= acres.in\n # In each stage we need to meet demand.\n sum(eat) >= D[stage-1]\n # We can buy and sell other cuttings.\n bales[stage-1].out ==\n cut_ex[stage-1] + acres.in * b[stage-1, weather]\n [c = 1:3; c != stage - 1], bales[c].out == cut_ex[c]\n # There is some maximum storage.\n sum(bales[i].out for i in 1:3) <= w\n # We can only sell what is in storage.\n [c = 1:3], sell[c] <= bales[c].in\n # Maximum sales quantity.\n sum(sell) <= L\n end\n )\n end\n # ===================== Stage objective =====================\n if stage == 1\n @stageobjective(subproblem, 0.0)\n else\n @stageobjective(\n subproblem,\n 1000 * (sum(pen_p) + sum(pen_n)) +\n # cost of growing\n C[stage-1, weather] * acres.in +\n sum(\n # inventory cost\n V[stage-1] * bales[cutting].in * t[stage-1] +\n # purchase cost\n r[cutting][stage-1, weather] * buy[cutting] +\n # feed cost\n S[cutting, stage-1] * eat[cutting] -\n # sell reward\n q[cutting][stage-1, weather] * sell[cutting] for\n cutting in 1:3\n )\n )\n end\n return\n end\n SDDP.train(model)\n @test SDDP.termination_status(model) == :simulation_stopping\n @test SDDP.calculate_bound(model) ≈ 4074.1391 atol = 1e-5\nend\n\ntest_mccardle_farm_model()","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"EditURL = \"vehicle_location.jl\"","category":"page"},{"location":"examples/vehicle_location/#Vehicle-location","page":"Vehicle location","title":"Vehicle location","text":"","category":"section"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"This problem is a version of the Ambulance dispatch problem. A hospital is located at 0 on the number line that stretches from 0 to 100. Ambulance bases are located at points 20, 40, 60, 80, and 100. When not responding to a call, Ambulances must be located at a base, or the hospital. In this example there are three ambulances.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"Example location:","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"H B B B B B\n0 ---- 20 ---- 40 ---- 60 ---- 80 ---- 100","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"Each stage, a call comes in from somewhere on the number line. The agent must decide which ambulance to dispatch. They pay the cost of twice the driving distance. If an ambulance is not dispatched in a stage, the ambulance can be relocated to a different base in preparation for future calls. This incurs a cost of the driving distance.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction vehicle_location_model(duality_handler)\n hospital_location = 0\n bases = vcat(hospital_location, [20, 40, 60, 80, 100])\n vehicles = [1, 2, 3]\n requests = 0:10:100\n shift_cost(src, dest) = abs(src - dest)\n function dispatch_cost(base, request)\n return 2 * (abs(request - hospital_location) + abs(request - base))\n end\n # Initial state of emergency vehicles at bases. All ambulances start at the\n # hospital.\n initial_state(b, v) = b == hospital_location ? 1.0 : 0.0\n model = SDDP.LinearPolicyGraph(\n stages = 10,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n # Current location of each vehicle at each base.\n @variable(\n sp,\n 0 <= location[b = bases, v = vehicles] <= 1,\n SDDP.State,\n initial_value = initial_state(b, v)\n )\n @variables(sp, begin\n # Which vehicle is dispatched?\n 0 <= dispatch[bases, vehicles] <= 1, Bin\n # Shifting vehicles between bases: [src, dest, vehicle]\n 0 <= shift[bases, bases, vehicles] <= 1, Bin\n end)\n # Flow of vehicles in and out of bases:\n @expression(\n sp,\n base_balance[b in bases, v in vehicles],\n location[b, v].in - dispatch[b, v] - sum(shift[b, :, v]) +\n sum(shift[:, b, v])\n )\n @constraints(\n sp,\n begin\n # Only one vehicle dispatched to call.\n sum(dispatch) == 1\n # Can only dispatch vehicle from base if vehicle is at that base.\n [b in bases, v in vehicles],\n dispatch[b, v] <= location[b, v].in\n # Can only shift vehicle if vehicle is at that src base.\n [b in bases, v in vehicles],\n sum(shift[b, :, v]) <= location[b, v].in\n # Can only shift vehicle if vehicle is not being dispatched.\n [b in bases, v in vehicles],\n sum(shift[b, :, v]) + dispatch[b, v] <= 1\n # Can't shift to same base.\n [b in bases, v in vehicles], shift[b, b, v] == 0\n # Update states for non-home/non-hospital bases.\n [b in bases[2:end], v in vehicles],\n location[b, v].out == base_balance[b, v]\n # Update states for home/hospital bases.\n [v in vehicles],\n location[hospital_location, v].out ==\n base_balance[hospital_location, v] + sum(dispatch[:, v])\n end\n )\n SDDP.parameterize(sp, requests) do request\n @stageobjective(\n sp,\n sum(\n # Distance to travel from base to emergency and then to hospital.\n dispatch[b, v] * dispatch_cost(b, request) +\n # Distance travelled by vehicles relocating bases.\n sum(\n shift_cost(b, dest) * shift[b, dest, v] for\n dest in bases\n ) for b in bases, v in vehicles\n )\n )\n end\n end\n if get(ARGS, 1, \"\") == \"--write\"\n # Run `$ julia vehicle_location.jl --write` to update the benchmark\n # model directory\n model_dir = joinpath(@__DIR__, \"..\", \"..\", \"..\", \"benchmarks\", \"models\")\n SDDP.write_to_file(\n model,\n joinpath(model_dir, \"vehicle_location.sof.json.gz\");\n test_scenarios = 100,\n )\n exit(0)\n end\n SDDP.train(\n model;\n iteration_limit = 20,\n log_frequency = 10,\n cut_deletion_minimum = 100,\n duality_handler = duality_handler,\n )\n Test.@test SDDP.calculate_bound(model) >= 1000\n return\nend\n\n# TODO(odow): find out why this fails\n# vehicle_location_model(SDDP.ContinuousConicDuality())","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/improve_computational_performance/#Improve-computational-performance","page":"Improve computational performance","title":"Improve computational performance","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP is a computationally intensive algorithm. Here are some suggestions for how the computational performance can be improved.","category":"page"},{"location":"guides/improve_computational_performance/#Numerical-stability-(again)","page":"Improve computational performance","title":"Numerical stability (again)","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"We've already discussed this in the Numerical stability section of Words of warning. But, it's so important that we're going to say it again: improving the problem scaling is one of the best ways to improve the numerical performance of your models.","category":"page"},{"location":"guides/improve_computational_performance/#Solver-selection","page":"Improve computational performance","title":"Solver selection","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"The majority of the solution time is spent inside the low-level solvers. Choosing which solver (and the associated settings) correctly can lead to big speed-ups.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Choose a commercial solver.\nOptions include CPLEX, Gurobi, and Xpress. Using free solvers such as CLP and HiGHS isn't a viable approach for large problems.\nTry different solvers.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Even commercial solvers can have wildly different solution times. We've seen models on which CPLEX was 50% fast than Gurobi, and vice versa.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Experiment with different solver options.\nUsing the default settings is usually a good option. However, sometimes it can pay to change these. In particular, forcing solvers to use the dual simplex algorithm (e.g., Method=1 in Gurobi ) is usually a performance win.","category":"page"},{"location":"guides/improve_computational_performance/#Single-cut-vs.-multi-cut","page":"Improve computational performance","title":"Single-cut vs. multi-cut","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There are two competing ways that cuts can be created in SDDP: single-cut and multi-cut. By default, SDDP.jl uses the single-cut version of SDDP.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"The performance of each method is problem-dependent. We recommend that you try both in order to see which one performs better. In general, the single-cut method works better when the number of realizations of the stagewise-independent random variable is large, whereas the multi-cut method works better on small problems. However, the multi-cut method can cause numerical stability problems, particularly if used in conjunction with objective or belief state variables.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"You can switch between the methods by passing the relevant flag to cut_type in SDDP.train.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.train(model; cut_type = SDDP.SINGLE_CUT)\nSDDP.train(model; cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"guides/improve_computational_performance/#Parallelism","page":"Improve computational performance","title":"Parallelism","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.jl can take advantage of the parallel nature of modern computers to solve problems across multiple cores.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nWe highly recommend that you read the Julia manual's section on parallel computing.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"You can start Julia from a command line with N processors using the -p flag:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"julia -p N","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Alternatively, you can use the Distributed.jl package:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"using Distributed\nDistributed.addprocs(N)","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"warning: Warning\nWorkers DON'T inherit their parent's Pkg environment. Therefore, if you started Julia with --project=/path/to/environment (or if you activated an environment from the REPL), you will need to put the following at the top of your script:using Distributed\n@everywhere begin\n import Pkg\n Pkg.activate(\"/path/to/environment\")\nend","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Currently SDDP.jl supports to parallel schemes, SDDP.Serial and SDDP.Asynchronous. Instances of these parallel schemes should be passed to the parallel_scheme argument of SDDP.train and SDDP.simulate.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"using SDDP, HiGHS\nmodel = SDDP.LinearPolicyGraph(\n stages = 2, lower_bound = 0, optimizer = HiGHS.Optimizer\n) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x.out)\nend\nSDDP.train(model; iteration_limit = 10, parallel_scheme = SDDP.Asynchronous())\nSDDP.simulate(model, 10; parallel_scheme = SDDP.Asynchronous())","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There is a large overhead for using the asynchronous solver. Even if you choose asynchronous mode, SDDP.jl will start in serial mode while the initialization takes place. Therefore, in the log you will see that the initial iterations take place on the master thread (Proc. ID = 1), and it is only after while that the solve switches to full parallelism.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nBecause of the large data communication requirements (all cuts have to be shared with all other cores), the solution time will not scale linearly with the number of cores.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nGiven the same number of iterations, the policy obtained from asynchronous mode will be worse than the policy obtained from serial mode. However, the asynchronous solver can take significantly less time to compute the same number of iterations.","category":"page"},{"location":"guides/improve_computational_performance/#Data-movement","page":"Improve computational performance","title":"Data movement","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"By default, data defined on the master process is not made available to the workers. Therefore, a model like the following:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"data = 1\nmodel = SDDP.LinearPolicyGraph(stages = 2, lower_bound = 0) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = data)\n @stageobjective(sp, x.out)\nend","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"will result in an UndefVarError error like UndefVarError: data not defined.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There are three solutions for this problem.","category":"page"},{"location":"guides/improve_computational_performance/#Option-1:-declare-data-inside-the-build-function","page":"Improve computational performance","title":"Option 1: declare data inside the build function","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"model = SDDP.LinearPolicyGraph(stages = 2) do sp, t\n data = 1\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\nend","category":"page"},{"location":"guides/improve_computational_performance/#Option-2:-use-@everywhere","page":"Improve computational performance","title":"Option 2: use @everywhere","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"@everywhere begin\n data = 1\nend\nmodel = SDDP.LinearPolicyGraph(stages = 2) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\nend","category":"page"},{"location":"guides/improve_computational_performance/#Option-3:-build-the-model-in-a-function","page":"Improve computational performance","title":"Option 3: build the model in a function","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"function build_model()\n data = 1\n return SDDP.LinearPolicyGraph(stages = 2) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\n end\nend\n\nmodel = build_model()","category":"page"},{"location":"guides/improve_computational_performance/#Initialization-hooks","page":"Improve computational performance","title":"Initialization hooks","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"warning: Warning\nThis is important if you use Gurobi!","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.Asynchronous accepts a pre-processing hook that is run on each worker process before the model is solved. The most useful situation is for solvers than need an initialization step. A good example is Gurobi, which can share an environment amongst all models on a worker. Notably, this environment cannot be shared amongst workers, so defining one environment at the top of a script will fail!","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"To initialize a new environment on each worker, use the following:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.train(\n model;\n parallel_scheme = SDDP.Asynchronous() do m::SDDP.PolicyGraph\n env = Gurobi.Env()\n set_optimizer(m, () -> Gurobi.Optimizer(env))\n end,\n)","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"EditURL = \"FAST_quickstart.jl\"","category":"page"},{"location":"examples/FAST_quickstart/#FAST:-the-quickstart-problem","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"","category":"section"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"An implementation of the QuickStart example from FAST","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"using SDDP, HiGHS, Test\n\nfunction fast_quickstart()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(2),\n lower_bound = -5,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 0.0)\n if t == 1\n @stageobjective(sp, x.out)\n else\n @variable(sp, s >= 0)\n @constraint(sp, s <= x.in)\n SDDP.parameterize(sp, [2, 3]) do ω\n return JuMP.set_upper_bound(s, ω)\n end\n @stageobjective(sp, -2s)\n end\n end\n\n det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\n set_silent(det)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) == -2\n\n SDDP.train(model)\n @test SDDP.calculate_bound(model) == -2\nend\n\nfast_quickstart()","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"EditURL = \"StructDualDynProg.jl_prob5.2_3stages.jl\"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/#StructDualDynProg:-Problem-5.2,-3-stages","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"","category":"section"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"This example comes from StochasticDualDynamicProgramming.jl.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"using SDDP, HiGHS, Test\n\nfunction test_prob52_3stages()\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n n = 4\n m = 3\n i_c = [16, 5, 32, 2]\n C = [25, 80, 6.5, 160]\n T = [8760, 7000, 1500] / 8760\n D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]\n p2 = [0.9, 0.1]\n @variable(sp, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n y[1:n, 1:m] >= 0\n v[1:n] >= 0\n penalty >= 0\n ξ[j = 1:m]\n end)\n @constraints(sp, begin\n [i = 1:n], x[i].out == x[i].in + v[i]\n [i = 1:n], sum(y[i, :]) <= x[i].in\n [j = 1:m], sum(y[:, j]) + penalty >= ξ[j]\n end)\n @stageobjective(sp, i_c'v + C' * y * T + 1e5 * penalty)\n if t != 1 # no uncertainty in first stage\n SDDP.parameterize(sp, 1:size(D2, 2), p2) do ω\n for j in 1:m\n JuMP.fix(ξ[j], D2[j, ω])\n end\n end\n end\n if t == 3\n @constraint(sp, sum(v) == 0)\n end\n end\n\n det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\n set_silent(det)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) ≈ 406712.49 atol = 0.1\n\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 406712.49 atol = 0.1\n return\nend\n\ntest_prob52_3stages()","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"EditURL = \"infinite_horizon_hydro_thermal.jl\"","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/#Infinite-horizon-hydro-thermal","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"","category":"section"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"using SDDP, HiGHS, Test, Statistics\n\nfunction infinite_hydro_thermal(; cut_type)\n Ω = [\n (inflow = 0.0, demand = 7.5),\n (inflow = 5.0, demand = 5),\n (inflow = 10.0, demand = 2.5),\n ]\n graph = SDDP.Graph(\n :root_node,\n [:week],\n [(:root_node => :week, 1.0), (:week => :week, 0.9)],\n )\n model = SDDP.PolicyGraph(\n graph;\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n @variable(\n subproblem,\n 5.0 <= reservoir <= 15.0,\n SDDP.State,\n initial_value = 10.0\n )\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n spill >= 0\n inflow\n demand\n end)\n @constraints(\n subproblem,\n begin\n reservoir.out == reservoir.in - hydro_generation - spill + inflow\n hydro_generation + thermal_generation == demand\n end\n )\n @stageobjective(subproblem, 10 * spill + thermal_generation)\n SDDP.parameterize(subproblem, Ω) do ω\n JuMP.fix(inflow, ω.inflow)\n return JuMP.fix(demand, ω.demand)\n end\n end\n SDDP.train(\n model;\n cut_type = cut_type,\n log_frequency = 100,\n sampling_scheme = SDDP.InSampleMonteCarlo(terminate_on_cycle = true),\n cycle_discretization_delta = 0.1,\n )\n @test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1\n\n results = SDDP.simulate(model, 500)\n objectives =\n [sum(s[:stage_objective] for s in simulation) for simulation in results]\n sample_mean = round(Statistics.mean(objectives); digits = 2)\n sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)\n println(\"Confidence_interval = $(sample_mean) ± $(sample_ci)\")\n @test sample_mean - sample_ci <= 119.167 <= sample_mean + sample_ci\n return\nend\n\ninfinite_hydro_thermal(cut_type = SDDP.SINGLE_CUT)\ninfinite_hydro_thermal(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"This page was generated using Literate.jl.","category":"page"},{"location":"apireference/#api_reference_list","page":"API Reference","title":"API Reference","text":"","category":"section"},{"location":"apireference/#Policy-graphs","page":"API Reference","title":"Policy graphs","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.Graph\nSDDP.add_node\nSDDP.add_edge\nSDDP.add_ambiguity_set\nSDDP.LinearGraph\nSDDP.MarkovianGraph\nSDDP.UnicyclicGraph\nSDDP.LinearPolicyGraph\nSDDP.MarkovianPolicyGraph\nSDDP.PolicyGraph","category":"page"},{"location":"apireference/#SDDP.Graph","page":"API Reference","title":"SDDP.Graph","text":"Graph(root_node::T) where T\n\nCreate an empty graph struture with the root node root_node.\n\nExample\n\njulia> graph = SDDP.Graph(0)\nRoot\n 0\nNodes\n {}\nArcs\n {}\n\njulia> graph = SDDP.Graph(:root)\nRoot\n root\nNodes\n {}\nArcs\n {}\n\njulia> graph = SDDP.Graph((0, 0))\nRoot\n (0, 0)\nNodes\n {}\nArcs\n {}\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.add_node","page":"API Reference","title":"SDDP.add_node","text":"add_node(graph::Graph{T}, node::T) where {T}\n\nAdd a node to the graph graph.\n\nExamples\n\njulia> graph = SDDP.Graph(:root);\n\njulia> SDDP.add_node(graph, :A)\n\njulia> graph\nRoot\n root\nNodes\n A\nArcs\n {}\n\njulia> graph = SDDP.Graph(0);\n\njulia> SDDP.add_node(graph, 2)\n\njulia> graph\nRoot\n 0\nNodes\n 2\nArcs\n {}\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_edge","page":"API Reference","title":"SDDP.add_edge","text":"add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T}\n\nAdd an edge to the graph graph.\n\nExamples\n\njulia> graph = SDDP.Graph(0);\n\njulia> SDDP.add_node(graph, 1)\n\njulia> SDDP.add_edge(graph, 0 => 1, 0.9)\n\njulia> graph\nRoot\n 0\nNodes\n 1\nArcs\n 0 => 1 w.p. 0.9\n\njulia> graph = SDDP.Graph(:root);\n\njulia> SDDP.add_node(graph, :A)\n\njulia> SDDP.add_edge(graph, :root => :A, 1.0)\n\njulia> graph\nRoot\n root\nNodes\n A\nArcs\n root => A w.p. 1.0\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_ambiguity_set","page":"API Reference","title":"SDDP.add_ambiguity_set","text":"add_ambiguity_set(\n graph::Graph{T},\n set::Vector{T},\n lipschitz::Vector{Float64},\n) where {T}\n\nAdd set to the belief partition of graph.\n\nlipschitz is a vector of Lipschitz constants, with one element for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.\n\nExamples\n\njulia> graph = SDDP.LinearGraph(3)\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n\njulia> SDDP.add_ambiguity_set(graph, [1, 2], [1e3, 1e2])\n\njulia> SDDP.add_ambiguity_set(graph, [3], [1e5])\n\njulia> graph\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\nPartitions\n {1, 2}\n {3}\n\n\n\n\n\nadd_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)\n\nAdd set to the belief partition of graph.\n\nlipschitz is a Lipschitz constant for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.\n\nExamples\n\njulia> graph = SDDP.LinearGraph(3);\n\njulia> SDDP.add_ambiguity_set(graph, [1, 2], 1e3)\n\njulia> SDDP.add_ambiguity_set(graph, [3], 1e5)\n\njulia> graph\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\nPartitions\n {1, 2}\n {3}\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.LinearGraph","page":"API Reference","title":"SDDP.LinearGraph","text":"LinearGraph(stages::Int)\n\nCreate a linear graph with stages number of nodes.\n\nExamples\n\njulia> graph = SDDP.LinearGraph(3)\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.MarkovianGraph","page":"API Reference","title":"SDDP.MarkovianGraph","text":"MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})\n\nConstruct a Markovian graph from the vector of transition matrices.\n\ntransition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.\n\nThe dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.\n\nExamples\n\njulia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]])\nRoot\n (0, 1)\nNodes\n (1, 1)\n (2, 1)\n (2, 2)\n (3, 1)\n (3, 2)\nArcs\n (0, 1) => (1, 1) w.p. 1.0\n (1, 1) => (2, 1) w.p. 0.5\n (1, 1) => (2, 2) w.p. 0.5\n (2, 1) => (3, 1) w.p. 0.8\n (2, 1) => (3, 2) w.p. 0.2\n (2, 2) => (3, 1) w.p. 0.2\n (2, 2) => (3, 2) w.p. 0.8\n\n\n\n\n\nMarkovianGraph(;\n stages::Int,\n transition_matrix::Matrix{Float64},\n root_node_transition::Vector{Float64},\n)\n\nConstruct a Markovian graph object with stages number of stages and time-independent Markov transition probabilities.\n\ntransition_matrix must be a square matrix, and the probability of transitioning from Markov state i in stage t to Markov state j in stage t + 1 is given by transition_matrix[i, j].\n\nroot_node_transition[i] is the probability of transitioning from the root node to Markov state i in the first stage.\n\nExamples\n\njulia> graph = SDDP.MarkovianGraph(;\n stages = 3,\n transition_matrix = [0.8 0.2; 0.2 0.8],\n root_node_transition = [0.5, 0.5],\n )\nRoot\n (0, 1)\nNodes\n (1, 1)\n (1, 2)\n (2, 1)\n (2, 2)\n (3, 1)\n (3, 2)\nArcs\n (0, 1) => (1, 1) w.p. 0.5\n (0, 1) => (1, 2) w.p. 0.5\n (1, 1) => (2, 1) w.p. 0.8\n (1, 1) => (2, 2) w.p. 0.2\n (1, 2) => (2, 1) w.p. 0.2\n (1, 2) => (2, 2) w.p. 0.8\n (2, 1) => (3, 1) w.p. 0.8\n (2, 1) => (3, 2) w.p. 0.2\n (2, 2) => (3, 1) w.p. 0.2\n (2, 2) => (3, 2) w.p. 0.8\n\n\n\n\n\nMarkovianGraph(\n simulator::Function;\n budget::Union{Int,Vector{Int}},\n scenarios::Int = 1000,\n)\n\nConstruct a Markovian graph by fitting Markov chain to scenarios generated by simulator().\n\nbudget is the total number of nodes in the resulting Markov chain. This can either be specified as a single Int, in which case we will attempt to intelligently distributed the nodes between stages. Alternatively, budget can be a Vector{Int}, which details the number of Markov state to have in each stage.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.UnicyclicGraph","page":"API Reference","title":"SDDP.UnicyclicGraph","text":"UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)\n\nConstruct a graph composed of num_nodes nodes that form a single cycle, with a probability of discount_factor of continuing the cycle.\n\nExamples\n\njulia> graph = SDDP.UnicyclicGraph(0.9; num_nodes = 2)\nRoot\n 0\nNodes\n 1\n 2\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 1 w.p. 0.9\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.LinearPolicyGraph","page":"API Reference","title":"SDDP.LinearPolicyGraph","text":"LinearPolicyGraph(builder::Function; stages::Int, kwargs...)\n\nCreate a linear policy graph with stages number of stages.\n\nKeyword arguments\n\nstages: the number of stages in the graph\nkwargs: other keyword arguments are passed to SDDP.PolicyGraph.\n\nExamples\n\njulia> SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t\n # ... build model ...\nend\nA policy graph with 2 nodes.\nNode indices: 1, 2\n\nis equivalent to\n\njulia> graph = SDDP.LinearGraph(2);\n\njulia> SDDP.PolicyGraph(graph; lower_bound = 0.0) do sp, t\n # ... build model ...\nend\nA policy graph with 2 nodes.\nNode indices: 1, 2\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.MarkovianPolicyGraph","page":"API Reference","title":"SDDP.MarkovianPolicyGraph","text":"MarkovianPolicyGraph(\n builder::Function;\n transition_matrices::Vector{Array{Float64,2}},\n kwargs...\n)\n\nCreate a Markovian policy graph based on the transition matrices given in transition_matrices.\n\nKeyword arguments\n\ntransition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t. The dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.\nkwargs: other keyword arguments are passed to SDDP.PolicyGraph.\n\nSee also\n\nSee SDDP.MarkovianGraph for other ways of specifying a Markovian policy graph.\n\nSee SDDP.PolicyGraph for the other keyword arguments.\n\nExamples\n\njulia> SDDP.MarkovianPolicyGraph(;\n transition_matrices = [ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]],\n lower_bound = 0.0,\n ) do sp, node\n # ... build model ...\n end\nA policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)\n\nis equivalent to\n\njulia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]]);\n\njulia> SDDP.PolicyGraph(graph; lower_bound = 0.0) do sp, t\n # ... build model ...\nend\nA policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.PolicyGraph","page":"API Reference","title":"SDDP.PolicyGraph","text":"PolicyGraph(\n builder::Function,\n graph::Graph{T};\n sense::Symbol = :Min,\n lower_bound = -Inf,\n upper_bound = Inf,\n optimizer = nothing,\n) where {T}\n\nConstruct a policy graph based on the graph structure of graph. (See SDDP.Graph for details.)\n\nKeyword arguments\n\nsense: whether we are minimizing (:Min) or maximizing (:Max).\nlower_bound: if mimimizing, a valid lower bound for the cost to go in all subproblems.\nupper_bound: if maximizing, a valid upper bound for the value to go in all subproblems.\noptimizer: the optimizer to use for each of the subproblems\n\nExamples\n\nfunction builder(subproblem::JuMP.Model, index)\n # ... subproblem definition ...\nend\n\nmodel = PolicyGraph(\n builder,\n graph;\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n)\n\nOr, using the Julia do ... end syntax:\n\nmodel = PolicyGraph(\n graph;\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, index\n # ... subproblem definitions ...\nend\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Subproblem-definition","page":"API Reference","title":"Subproblem definition","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"@stageobjective\nSDDP.parameterize\nSDDP.add_objective_state\nSDDP.objective_state\nSDDP.Noise","category":"page"},{"location":"apireference/#SDDP.@stageobjective","page":"API Reference","title":"SDDP.@stageobjective","text":"@stageobjective(subproblem, expr)\n\nSet the stage-objective of subproblem to expr.\n\nExamples\n\n@stageobjective(subproblem, 2x + y)\n\n\n\n\n\n","category":"macro"},{"location":"apireference/#SDDP.parameterize","page":"API Reference","title":"SDDP.parameterize","text":"parameterize(\n modify::Function,\n subproblem::JuMP.Model,\n realizations::Vector{T},\n probability::Vector{Float64} = fill(1.0 / length(realizations))\n) where {T}\n\nAdd a parameterization function modify to subproblem. The modify function takes one argument and modifies subproblem based on the realization of the noise sampled from realizations with corresponding probabilities probability.\n\nIn order to conduct an out-of-sample simulation, modify should accept arguments that are not in realizations (but still of type T).\n\nExamples\n\nSDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω\n JuMP.set_upper_bound(x, ω)\nend\n\n\n\n\n\nparameterize(node::Node, noise)\n\nParameterize node node with the noise noise.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_objective_state","page":"API Reference","title":"SDDP.add_objective_state","text":"add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)\n\nAdd an objective state variable to subproblem.\n\nRequired kwargs are:\n\ninitial_value: The initial value of the objective state variable at the root node.\nlipschitz: The lipschitz constant of the objective state variable.\n\nSetting a tight value for the lipschitz constant can significantly improve the speed of convergence.\n\nOptional kwargs are:\n\nlower_bound: A valid lower bound for the objective state variable. Can be -Inf.\nupper_bound: A valid upper bound for the objective state variable. Can be +Inf.\n\nSetting tight values for these optional variables can significantly improve the speed of convergence.\n\nIf the objective state is N-dimensional, each keyword argument must be an NTuple{N,Float64}. For example, initial_value = (0.0, 1.0).\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.objective_state","page":"API Reference","title":"SDDP.objective_state","text":"objective_state(subproblem::JuMP.Model)\n\nReturn the current objective state of the problem.\n\nCan only be called from SDDP.parameterize.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.Noise","page":"API Reference","title":"SDDP.Noise","text":"Noise(support, probability)\n\nAn atom of a discrete random variable at the point of support support and associated probability probability.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Training-the-policy","page":"API Reference","title":"Training the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.numerical_stability_report\nSDDP.train\nSDDP.termination_status\nSDDP.write_cuts_to_file\nSDDP.read_cuts_from_file\nSDDP.write_log_to_csv","category":"page"},{"location":"apireference/#SDDP.numerical_stability_report","page":"API Reference","title":"SDDP.numerical_stability_report","text":"numerical_stability_report(\n [io::IO = stdout,]\n model::PolicyGraph;\n by_node::Bool = false,\n print::Bool = true,\n warn::Bool = true,\n)\n\nPrint a report identifying possible numeric stability issues.\n\nKeyword arguments\n\nIf by_node, print a report for each node in the graph.\nIf print, print to io.\nIf warn, warn if the coefficients may cause numerical issues.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.train","page":"API Reference","title":"SDDP.train","text":"SDDP.train(model::PolicyGraph; kwargs...)\n\nTrain the policy for model.\n\nKeyword arguments\n\niteration_limit::Int: number of iterations to conduct before termination.\ntime_limit::Float64: number of seconds to train before termination.\nstoping_rules: a vector of SDDP.AbstractStoppingRules. Defaults to SimulationStoppingRule.\nprint_level::Int: control the level of printing to the screen. Defaults to 1. Set to 0 to disable all printing.\nlog_file::String: filepath at which to write a log of the training progress. Defaults to SDDP.log.\nlog_frequency::Int: control the frequency with which the logging is outputted (iterations/log). Defaults to 1.\nlog_every_seconds::Float64: control the frequency with which the logging is outputted (seconds/log). Defaults to 0.0.\nrun_numerical_stability_report::Bool: generate (and print) a numerical stability report prior to solve. Defaults to true.\nrefine_at_similar_nodes::Bool: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In almost all cases this should be set to true.\ncut_deletion_minimum::Int: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver specific; however, smaller values result in smaller subproblems (and therefore quicker solves), at the expense of more time spent performing cut selection.\nrisk_measure: the risk measure to use at each node. Defaults to Expectation.\nsampling_scheme: a sampling scheme to use on the forward pass of the algorithm. Defaults to InSampleMonteCarlo.\nbackward_sampling_scheme: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to CompleteSampler.\ncut_type: choose between SDDP.SINGLE_CUT and SDDP.MULTI_CUT versions of SDDP.\ndashboard::Bool: open a visualization of the training over time. Defaults to false.\nparallel_scheme::AbstractParallelScheme: specify a scheme for solving in parallel. Defaults to Serial().\nforward_pass::AbstractForwardPass: specify a scheme to use for the forward passes.\nforward_pass_resampling_probability::Union{Nothing,Float64}: set to a value in (0, 1) to enable RiskAdjustedForwardPass. Defaults to nothing (disabled).\nadd_to_existing_cuts::Bool: set to true to allow training a model that was previously trained. Defaults to false.\nduality_handler::AbstractDualityHandler: specify a duality handler to use when creating cuts.\npost_iteration_callback::Function: a callback with the signature post_iteration_callback(::IterationResult) that is evaluated after each iteration of the algorithm.\n\nThere is also a special option for infinite horizon problems\n\ncycle_discretization_delta: the maximum distance between states allowed on the forward pass. This is for advanced users only and needs to be used in conjunction with a different sampling_scheme.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.termination_status","page":"API Reference","title":"SDDP.termination_status","text":"termination_status(model::PolicyGraph)::Symbol\n\nQuery the reason why the training stopped.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.write_cuts_to_file","page":"API Reference","title":"SDDP.write_cuts_to_file","text":"write_cuts_to_file(\n model::PolicyGraph{T},\n filename::String;\n node_name_parser::Function = string,\n) where {T}\n\nWrite the cuts that form the policy in model to filename in JSON format.\n\nnode_name_parser is a function which converts the name of each node into a string representation. It has the signature: node_name_parser(::T)::String.\n\nSee also SDDP.read_cuts_from_file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.read_cuts_from_file","page":"API Reference","title":"SDDP.read_cuts_from_file","text":"read_cuts_from_file(\n model::PolicyGraph{T},\n filename::String;\n node_name_parser::Function = _node_name_parser,\n) where {T}\n\nRead cuts (saved using SDDP.write_cuts_to_file) from filename into model.\n\nSince T can be an arbitrary Julia type, the conversion to JSON is lossy. When reading, read_cuts_from_file only supports T=Int, T=NTuple{N, Int}, and T=Symbol. If you have manually created a policy graph with a different node type T, provide a function node_name_parser with the signature node_name_parser(T, name::String)::T where {T} that returns the name of each node given the string name name.\n\nIf node_name_parser returns nothing, those cuts are skipped.\n\nSee also SDDP.write_cuts_to_file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.write_log_to_csv","page":"API Reference","title":"SDDP.write_log_to_csv","text":"write_log_to_csv(model::PolicyGraph, filename::String)\n\nWrite the log of the most recent training to a csv for post-analysis.\n\nAssumes that the model has been trained via SDDP.train.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#api_stopping_rules","page":"API Reference","title":"Stopping rules","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractStoppingRule\nSDDP.stopping_rule_status\nSDDP.convergence_test\nSDDP.IterationLimit\nSDDP.TimeLimit\nSDDP.Statistical\nSDDP.BoundStalling\nSDDP.StoppingChain\nSDDP.SimulationStoppingRule\nSDDP.FirstStageStoppingRule","category":"page"},{"location":"apireference/#SDDP.AbstractStoppingRule","page":"API Reference","title":"SDDP.AbstractStoppingRule","text":"AbstractStoppingRule\n\nThe abstract type for the stopping-rule interface.\n\nYou need to define the following methods:\n\nSDDP.stopping_rule_status\nSDDP.convergence_test\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.stopping_rule_status","page":"API Reference","title":"SDDP.stopping_rule_status","text":"stopping_rule_status(::AbstractStoppingRule)::Symbol\n\nReturn a symbol describing the stopping rule.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.convergence_test","page":"API Reference","title":"SDDP.convergence_test","text":"convergence_test(\n model::PolicyGraph,\n log::Vector{Log},\n ::AbstractStoppingRule,\n)::Bool\n\nReturn a Bool indicating if the algorithm should terminate the training.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.IterationLimit","page":"API Reference","title":"SDDP.IterationLimit","text":"IterationLimit(limit::Int)\n\nTeriminate the algorithm after limit number of iterations.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.TimeLimit","page":"API Reference","title":"SDDP.TimeLimit","text":"TimeLimit(limit::Float64)\n\nTeriminate the algorithm after limit seconds of computation.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Statistical","page":"API Reference","title":"SDDP.Statistical","text":"Statistical(;\n num_replications,\n iteration_period = 1,\n z_score = 1.96,\n verbose = true,\n)\n\nPerform an in-sample Monte Carlo simulation of the policy with num_replications replications every iteration_periods. Terminate if the deterministic bound (lower if minimizing) calls into the confidence interval for the mean of the simulated cost. If verbose = true, print the confidence interval.\n\nNote that this tests assumes that the simulated values are normally distributed. In infinite horizon models, this is almost never the case. The distribution is usually closer to exponential or log-normal.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.BoundStalling","page":"API Reference","title":"SDDP.BoundStalling","text":"BoundStalling(num_previous_iterations::Int, tolerance::Float64)\n\nTeriminate the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) fails to improve by more than tolerance in absolute terms for more than num_previous_iterations consecutve iterations, provided it has improved relative to the bound after the first iteration.\n\nChecking for an improvement relative to the first iteration avoids early termination in a situation where the bound fails to improve for the first N iterations. This frequently happens in models with a large number of stages, where it takes time for the cuts to propogate backward enough to modify the bound of the root node.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.StoppingChain","page":"API Reference","title":"SDDP.StoppingChain","text":"StoppingChain(rules::AbstractStoppingRule...)\n\nTerminate once all of the rules are statified.\n\nThis stopping rule short-circuits, so subsequent rules are only tested if the previous pass.\n\nExamples\n\nA stopping rule that runs 100 iterations, then checks for the bound stalling:\n\nStoppingChain(IterationLimit(100), BoundStalling(5, 0.1))\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.SimulationStoppingRule","page":"API Reference","title":"SDDP.SimulationStoppingRule","text":"SimulationStoppingRule(;\n sampling_scheme::AbstractSamplingScheme = SDDP.InSampleMonteCarlo(),\n replications::Int = -1,\n period::Int = -1,\n distance_tol::Float64 = 1e-2,\n bound_tol::Float64 = 1e-4,\n)\n\nTerminate the algorithm using a mix of heuristics. Unless you know otherwise, this is typically a good default.\n\nTermination criteria\n\nFirst, we check that the deterministic bound has stabilized. That is, over the last five iterations, the deterministic bound has changed by less than an absolute or relative tolerance of bound_tol.\n\nThen, if we have not done one in the last period iterations, we perform a primal simulation of the policy using replications out-of-sample realizations from sampling_scheme. The realizations are stored and re-used in each simulation. From each simulation, we record the value of the stage objective. We terminate the policy if each of the trajectories in two consecutive simulations differ by less than distance_tol.\n\nBy default, replications and period are -1, and SDDP.jl will guess good values for these. Over-ride the default behavior by setting an appropriate value.\n\nExample\n\nSDDP.train(model; stopping_rules = [SimulationStoppingRule()])\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.FirstStageStoppingRule","page":"API Reference","title":"SDDP.FirstStageStoppingRule","text":"FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)\n\nTerminate the algorithm when the outgoing values of the first-stage state variables have not changed by more than atol for iterations number of consecutive iterations.\n\nExample\n\nSDDP.train(model; stopping_rules = [FirstStageStoppingRule()])\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Sampling-schemes","page":"API Reference","title":"Sampling schemes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractSamplingScheme\nSDDP.sample_scenario\nSDDP.InSampleMonteCarlo\nSDDP.OutOfSampleMonteCarlo\nSDDP.Historical\nSDDP.PSRSamplingScheme\nSDDP.SimulatorSamplingScheme","category":"page"},{"location":"apireference/#SDDP.AbstractSamplingScheme","page":"API Reference","title":"SDDP.AbstractSamplingScheme","text":"AbstractSamplingScheme\n\nThe abstract type for the sampling-scheme interface.\n\nYou need to define the following methods:\n\nSDDP.sample_scenario\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.sample_scenario","page":"API Reference","title":"SDDP.sample_scenario","text":"sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T}\n\nSample a scenario from the policy graph graph based on the sampling scheme.\n\nReturns ::Tuple{Vector{Tuple{T, <:Any}}, Bool}, where the first element is the scenario, and the second element is a Boolean flag indicating if the scenario was terminated due to the detection of a cycle.\n\nThe scenario is a list of tuples (type Vector{Tuple{T, <:Any}}) where the first component of each tuple is the index of the node, and the second component is the stagewise-independent noise term observed in that node.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.InSampleMonteCarlo","page":"API Reference","title":"SDDP.InSampleMonteCarlo","text":"InSampleMonteCarlo(;\n max_depth::Int = 0,\n terminate_on_cycle::Function = false,\n terminate_on_dummy_leaf::Function = true,\n rollout_limit::Function = (i::Int) -> typemax(Int),\n initial_node::Any = nothing,\n)\n\nA Monte Carlo sampling scheme using the in-sample data from the policy graph definition.\n\nIf terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.\n\nNote that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.\n\nControl which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.\n\nYou can use rollout_limit to set iteration specific depth limits. For example:\n\nInSampleMonteCarlo(rollout_limit = i -> 2 * i)\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.OutOfSampleMonteCarlo","page":"API Reference","title":"SDDP.OutOfSampleMonteCarlo","text":"OutOfSampleMonteCarlo(\n f::Function,\n graph::PolicyGraph;\n use_insample_transition::Bool = false,\n max_depth::Int = 0,\n terminate_on_cycle::Bool = false,\n terminate_on_dummy_leaf::Bool = true,\n rollout_limit::Function = i -> typemax(Int),\n initial_node = nothing,\n)\n\nCreate a Monte Carlo sampler using out-of-sample probabilities and/or supports for the stagewise-independent noise terms, and out-of-sample probabilities for the node-transition matrix.\n\nf is a function that takes the name of a node and returns a tuple containing a vector of new SDDP.Noise terms for the children of that node, and a vector of new SDDP.Noise terms for the stagewise-independent noise.\n\nIf f is called with the name of the root node (e.g., 0 in a linear policy graph, (0, 1) in a Markovian Policy Graph), then return a vector of SDDP.Noise for the children of the root node.\n\nIf use_insample_transition, the in-sample transition probabilities will be used. Therefore, f should only return a vector of the stagewise-independent noise terms, and f will not be called for the root node.\n\nIf terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.\n\nNote that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.\n\nControl which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.\n\nIf a node is deterministic, pass [SDDP.Noise(nothing, 1.0)] as the vector of noise terms.\n\nYou can use rollout_limit to set iteration specific depth limits. For example:\n\nOutOfSampleMonteCarlo(rollout_limit = i -> 2 * i)\n\nExamples\n\nGiven linear policy graph graph with T stages:\n\nsampler = OutOfSampleMonteCarlo(graph) do node\n if node == 0\n return [SDDP.Noise(1, 1.0)]\n else\n noise_terms = [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]\n children = node < T ? [SDDP.Noise(node + 1, 0.9)] : SDDP.Noise{Int}[]\n return children, noise_terms\n end\nend\n\nGiven linear policy graph graph with T stages:\n\nsampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node\n return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]\nend\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Historical","page":"API Reference","title":"SDDP.Historical","text":"Historical(\n scenarios::Vector{Vector{Tuple{T,S}}},\n probability::Vector{Float64};\n terminate_on_cycle::Bool = false,\n) where {T,S}\n\nA sampling scheme that samples a scenario from the vector of scenarios scenarios according to probability.\n\nExamples\n\nHistorical(\n [\n [(1, 0.5), (2, 1.0), (3, 0.5)],\n [(1, 0.5), (2, 0.0), (3, 1.0)],\n [(1, 1.0), (2, 0.0), (3, 0.0)]\n ],\n [0.2, 0.5, 0.3],\n)\n\n\n\n\n\nHistorical(\n scenarios::Vector{Vector{Tuple{T,S}}};\n terminate_on_cycle::Bool = false,\n) where {T,S}\n\nA deterministic sampling scheme that iterates through the vector of provided scenarios.\n\nExamples\n\nHistorical([\n [(1, 0.5), (2, 1.0), (3, 0.5)],\n [(1, 0.5), (2, 0.0), (3, 1.0)],\n [(1, 1.0), (2, 0.0), (3, 0.0)],\n])\n\n\n\n\n\nHistorical(\n scenario::Vector{Tuple{T,S}};\n terminate_on_cycle::Bool = false,\n) where {T,S}\n\nA deterministic sampling scheme that always samples scenario.\n\nExamples\n\nHistorical([(1, 0.5), (2, 1.5), (3, 0.75)])\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.PSRSamplingScheme","page":"API Reference","title":"SDDP.PSRSamplingScheme","text":"PSRSamplingScheme(N::Int; sampling_scheme = InSampleMonteCarlo())\n\nA sampling scheme with N scenarios, similar to how PSR does it.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.SimulatorSamplingScheme","page":"API Reference","title":"SDDP.SimulatorSamplingScheme","text":"SimulatorSamplingScheme(simulator::Function)\n\nCreate a sampling scheme based on a univariate scenario generator simulator, which returns a Vector{Float64} when called with no arguments like simulator().\n\nThis sampling scheme must be used with a Markovian graph constructed from the same simulator.\n\nThe sample space for SDDP.parameterize must be a tuple in which the first element is the Markov state.\n\nThis sampling scheme generates a new scenario by calling simulator(), and then picking the sequence of nodes in the Markovian graph that is closest to the new trajectory.\n\nExample\n\njulia> using SDDP\n\njulia> import HiGHS\n\njulia> simulator() = cumsum(rand(10))\nsimulator (generic function with 1 method)\n\njulia> model = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(simulator; budget = 20, scenarios = 100);\n sense = :Max,\n upper_bound = 12,\n optimizer = HiGHS.Optimizer,\n ) do sp, node\n t, markov_state = node\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @variable(sp, u >= 0)\n @constraint(sp, x.out == x.in - u)\n # Elements of Ω must be a tuple in which `markov_state` is the first\n # element.\n Ω = [(markov_state, (u = u_max,)) for u_max in (0.0, 0.5)]\n SDDP.parameterize(sp, Ω) do (markov_state, ω)\n set_upper_bound(u, ω.u)\n @stageobjective(sp, markov_state * u)\n end\n end;\n\njulia> SDDP.train(\n model;\n print_level = 0,\n iteration_limit = 10,\n sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),\n )\n\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Parallel-schemes","page":"API Reference","title":"Parallel schemes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractParallelScheme\nSDDP.Serial\nSDDP.Asynchronous","category":"page"},{"location":"apireference/#SDDP.AbstractParallelScheme","page":"API Reference","title":"SDDP.AbstractParallelScheme","text":"AbstractParallelScheme\n\nAbstract type for different parallelism schemes.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Serial","page":"API Reference","title":"SDDP.Serial","text":"Serial()\n\nRun SDDP in serial mode.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Asynchronous","page":"API Reference","title":"SDDP.Asynchronous","text":"Asynchronous(\n [init_callback::Function,]\n slave_pids::Vector{Int} = workers();\n use_master::Bool = true,\n)\n\nRun SDDP in asynchronous mode workers with pid's slave_pids.\n\nAfter initializing the models on each worker, call init_callback(model). Note that init_callback is run locally on the worker and not on the master thread.\n\nIf use_master is true, iterations are also conducted on the master process.\n\n\n\n\n\nAsynchronous(\n solver::Any,\n slave_pids::Vector{Int} = workers();\n use_master::Bool = true,\n)\n\nRun SDDP in asynchronous mode workers with pid's slave_pids.\n\nSet the optimizer on each worker by calling JuMP.set_optimizer(model, solver).\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Forward-passes","page":"API Reference","title":"Forward passes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractForwardPass\nSDDP.DefaultForwardPass\nSDDP.RevisitingForwardPass\nSDDP.RiskAdjustedForwardPass\nSDDP.AlternativeForwardPass\nSDDP.AlternativePostIterationCallback\nSDDP.RegularizedForwardPass","category":"page"},{"location":"apireference/#SDDP.AbstractForwardPass","page":"API Reference","title":"SDDP.AbstractForwardPass","text":"AbstractForwardPass\n\nAbstract type for different forward passes.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.DefaultForwardPass","page":"API Reference","title":"SDDP.DefaultForwardPass","text":"DefaultForwardPass(; include_last_node::Bool = true)\n\nThe default forward pass.\n\nIf include_last_node = false and the sample terminated due to a cycle, then the last node (which forms the cycle) is omitted. This can be useful option to set when training, but it comes at the cost of not knowing which node formed the cycle (if there are multiple possibilities).\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.RevisitingForwardPass","page":"API Reference","title":"SDDP.RevisitingForwardPass","text":"RevisitingForwardPass(\n period::Int = 500;\n sub_pass::AbstractForwardPass = DefaultForwardPass(),\n)\n\nA forward pass scheme that generate period new forward passes (using sub_pass), then revisits all previously explored forward passes. This can be useful to encourage convergence at a diversity of points in the state-space.\n\nSet period = typemax(Int) to disable.\n\nFor example, if period = 2, then the forward passes will be revisited as follows: 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ....\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.RiskAdjustedForwardPass","page":"API Reference","title":"SDDP.RiskAdjustedForwardPass","text":"RiskAdjustedForwardPass(;\n forward_pass::AbstractForwardPass,\n risk_measure::AbstractRiskMeasure,\n resampling_probability::Float64,\n rejection_count::Int = 5,\n)\n\nA forward pass that resamples a previous forward pass with resampling_probability probability, and otherwise samples a new forward pass using forward_pass.\n\nThe forward pass to revisit is chosen based on the risk-adjusted (using risk_measure) probability of the cumulative stage objectives.\n\nNote that this objective corresponds to the first time we visited the trajectory. Subsequent visits may have improved things, but we don't have the mechanisms in-place to update it. Therefore, remove the forward pass from resampling consideration after rejection_count revisits.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.AlternativeForwardPass","page":"API Reference","title":"SDDP.AlternativeForwardPass","text":"AlternativeForwardPass(\n forward_model::SDDP.PolicyGraph{T};\n forward_pass::AbstractForwardPass = DefaultForwardPass(),\n)\n\nA forward pass that simulates using forward_model, which may be different to the model used in the backwards pass.\n\nWhen using this forward pass, you should almost always pass SDDP.AlternativePostIterationCallback to the post_iteration_callback argument of SDDP.train.\n\nThis forward pass is most useful when the forward_model is non-convex and we use a convex approximation of the model in the backward pass.\n\nFor example, in optimal power flow models, we can use an AC-OPF formulation as the forward_model and a DC-OPF formulation as the backward model.\n\nFor more details see the paper:\n\nRosemberg, A., and Street, A., and Garcia, J.D., and Valladão, D.M., and Silva, T., and Dowson, O. (2021). Assessing the cost of network simplifications in long-term hydrothermal dispatch planning models. IEEE Transactions on Sustainable Energy. 13(1), 196-206.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.AlternativePostIterationCallback","page":"API Reference","title":"SDDP.AlternativePostIterationCallback","text":"AlternativePostIterationCallback(forward_model::PolicyGraph)\n\nA post-iteration callback that should be used whenever SDDP.AlternativeForwardPass is used.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.RegularizedForwardPass","page":"API Reference","title":"SDDP.RegularizedForwardPass","text":"RegularizedForwardPass(;\n rho::Float64 = 0.05,\n forward_pass::AbstractForwardPass = DefaultForwardPass(),\n)\n\nA forward pass that regularizes the outgoing first-stage state variables with an L-infty trust-region constraint about the previous iteration's solution. Specifically, the bounds of the outgoing state variable x are updated from (l, u) to max(l, x^k - rho * (u - l)) <= x <= min(u, x^k + rho * (u - l)), where x^k is the optimal solution of x in the previous iteration. On the first iteration, the value of the state at the root node is used.\n\nBy default, rho is set to 5%, which seems to work well empirically.\n\nPass a different forward_pass to control the forward pass within the regularized forward pass.\n\nThis forward pass is largely intended to be used for investment problems in which the first stage makes a series of capacity decisions that then influence the rest of the graph. An error is thrown if the first stage problem is not deterministic, and states are silently skipped if they do not have finite bounds.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Risk-Measures","page":"API Reference","title":"Risk Measures","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractRiskMeasure\nSDDP.adjust_probability","category":"page"},{"location":"apireference/#SDDP.AbstractRiskMeasure","page":"API Reference","title":"SDDP.AbstractRiskMeasure","text":"AbstractRiskMeasure\n\nThe abstract type for the risk measure interface.\n\nYou need to define the following methods:\n\nSDDP.adjust_probability\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.adjust_probability","page":"API Reference","title":"SDDP.adjust_probability","text":"adjust_probability(\n measure::Expectation\n risk_adjusted_probability::Vector{Float64},\n original_probability::Vector{Float64},\n noise_support::Vector{Noise{T}},\n objective_realizations::Vector{Float64},\n is_minimization::Bool,\n) where {T}\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Duality-handlers","page":"API Reference","title":"Duality handlers","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractDualityHandler\nSDDP.ContinuousConicDuality\nSDDP.LagrangianDuality\nSDDP.StrengthenedConicDuality\nSDDP.BanditDuality","category":"page"},{"location":"apireference/#SDDP.AbstractDualityHandler","page":"API Reference","title":"SDDP.AbstractDualityHandler","text":"AbstractDualityHandler\n\nThe abstract type for the duality handler interface.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.ContinuousConicDuality","page":"API Reference","title":"SDDP.ContinuousConicDuality","text":"ContinuousConicDuality()\n\nCompute dual variables in the backward pass using conic duality, relaxing any binary or integer restrictions as necessary.\n\nTheory\n\nGiven the problem\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n x̄ - x == 0 [λ]\n\nwhere S ⊆ ℝ×ℤ, we relax integrality and using conic duality to solve for λ in the problem:\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w)\n x̄ - x == 0 [λ]\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.LagrangianDuality","page":"API Reference","title":"SDDP.LagrangianDuality","text":"LagrangianDuality(;\n method::LocalImprovementSearch.AbstractSearchMethod =\n LocalImprovementSearch.BFGS(100),\n)\n\nObtain dual variables in the backward pass using Lagrangian duality.\n\nArguments\n\nmethod: the LocalImprovementSearch method for maximizing the Lagrangian dual problem.\n\nTheory\n\nGiven the problem\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n x̄ - x == 0 [λ]\n\nwhere S ⊆ ℝ×ℤ, we solve the problem max L(λ), where:\n\nL(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' h(x̄)\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n\nand where h(x̄) = x̄ - x.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.StrengthenedConicDuality","page":"API Reference","title":"SDDP.StrengthenedConicDuality","text":"StrengthenedConicDuality()\n\nObtain dual variables in the backward pass using strengthened conic duality.\n\nTheory\n\nGiven the problem\n\nmin Cᵢ(x̄, u, w) + θᵢ\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n x̄ - x == 0 [λ]\n\nwe first obtain an estimate for λ using ContinuousConicDuality.\n\nThen, we evaluate the Lagrangian function:\n\nL(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' (x̄ - x`)\n st (x̄, x′, u) in Xᵢ(w) ∩ S\n\nto obtain a better estimate of the intercept.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.BanditDuality","page":"API Reference","title":"SDDP.BanditDuality","text":"BanditDuality()\n\nFormulates the problem of choosing a duality handler as a multi-armed bandit problem. The arms to choose between are:\n\nContinuousConicDuality\nStrengthenedConicDuality\nLagrangianDuality\n\nOur problem isn't a typical multi-armed bandit for a two reasons:\n\nThe reward distribution is non-stationary (each arm converges to 0 as it keeps getting pulled.\nThe distribution of rewards is dependent on the history of the arms that were chosen.\n\nWe choose a very simple heuristic: pick the arm with the best mean + 1 standard deviation. That should ensure we consistently pick the arm with the best likelihood of improving the value function.\n\nIn future, we should consider discounting the rewards of earlier iterations, and focus more on the more-recent rewards.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Simulating-the-policy","page":"API Reference","title":"Simulating the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.simulate\nSDDP.calculate_bound\nSDDP.add_all_cuts","category":"page"},{"location":"apireference/#SDDP.simulate","page":"API Reference","title":"SDDP.simulate","text":"simulate(\n model::PolicyGraph,\n number_replications::Int = 1,\n variables::Vector{Symbol} = Symbol[];\n sampling_scheme::AbstractSamplingScheme =\n InSampleMonteCarlo(),\n custom_recorders = Dict{Symbol, Function}(),\n duality_handler::Union{Nothing,AbstractDualityHandler} = nothing,\n skip_undefined_variables::Bool = false,\n parallel_scheme::AbstractParallelScheme = Serial(),\n incoming_state::Dict{String,Float64} = _initial_state(model),\n )::Vector{Vector{Dict{Symbol,Any}}}\n\nPerform a simulation of the policy model with number_replications replications.\n\nReturn data structure\n\nReturns a vector with one element for each replication. Each element is a vector with one-element for each node in the scenario that was sampled. Each element in that vector is a dictionary containing information about the subproblem that was solved.\n\nIn that dictionary there are four special keys:\n\n:node_index, which records the index of the sampled node in the policy model\n:noise_term, which records the noise observed at the node\n:stage_objective, which records the stage-objective of the subproblem\n:bellman_term, which records the cost/value-to-go of the node.\n\nThe sum of :stage_objective + :bellman_term will equal the objective value of the solved subproblem.\n\nIn addition to the special keys, the dictionary will contain the result of key => JuMP.value(subproblem[key]) for each key in variables. This is useful to obtain the primal value of the state and control variables.\n\nPositonal arguments\n\nmodel: the model to simulate\nnumber_replications::Int = 1: the number of simulation replications to conduct, that is, the length of the simulation vector that is returned by this function. If omitted, this defaults to 1.`\nvariables::Vector{Symbol} = Symbol[]: a list of the variable names to record the value of in each stage.\n\nKeyword arguments\n\nsampling_scheme: the sampling scheme used when simulating.\ncustom_recorders: see Custom recorders section below.\nduality_handler: the SDDP.AbstractDualityHandler used to compute dual variables. If you do not require dual variables (or if they are not available), pass duality_handler = nothing.\nskip_undefined_variables: If you attempt to simulate the value of a variable that is only defined in some of the stage problems, an error will be thrown. To over-ride this (and return a NaN instead), pass skip_undefined_variables = true.\nparallel_scheme: Use parallel_scheme::[AbstractParallelScheme](@ref) to specify a scheme for simulating in parallel. Defaults to Serial.\ninitial_state: Use incoming_state to pass an initial value of the state variable, if it differs from that at the root node. Each key should be the string name of the state variable.\n\nCustom recorders\n\nFor more complicated data, the custom_recorders keyword argument can be used.\n\nFor example, to record the dual of a constraint named my_constraint, pass the following:\n\nsimulation_results = SDDP.simulate(model, 2;\n custom_recorders = Dict{Symbol, Function}(\n :constraint_dual => sp -> JuMP.dual(sp[:my_constraint])\n )\n)\n\nThe value of the dual in the first stage of the second replication can be accessed as:\n\nsimulation_results[2][1][:constraint_dual]\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.calculate_bound","page":"API Reference","title":"SDDP.calculate_bound","text":"SDDP.calculate_bound(\n model::PolicyGraph,\n state::Dict{Symbol,Float64} = model.initial_root_state;\n risk_measure::AbstractRiskMeasure = Expectation(),\n)\n\nCalculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is risk_measure.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_all_cuts","page":"API Reference","title":"SDDP.add_all_cuts","text":"add_all_cuts(model::PolicyGraph)\n\nAdd all cuts that may have been deleted back into the model.\n\nExplanation\n\nDuring the solve, SDDP.jl may decide to remove cuts for a variety of reasons.\n\nThese can include cuts that define the optimal value function, particularly around the extremes of the state-space (e.g., reservoirs empty).\n\nThis function ensures that all cuts discovered are added back into the model.\n\nYou should call this after train and before simulate.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Decision-rules","page":"API Reference","title":"Decision rules","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.DecisionRule\nSDDP.evaluate","category":"page"},{"location":"apireference/#SDDP.DecisionRule","page":"API Reference","title":"SDDP.DecisionRule","text":"DecisionRule(model::PolicyGraph{T}; node::T)\n\nCreate a decision rule for node node in model.\n\nExample\n\nrule = SDDP.DecisionRule(model; node = 1)\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.evaluate","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n rule::DecisionRule;\n incoming_state::Dict{Symbol,Float64},\n noise = nothing,\n controls_to_record = Symbol[],\n)\n\nEvalute the decision rule rule at the point described by the incoming_state and noise.\n\nIf the node is deterministic, omit the noise argument.\n\nPass a list of symbols to controls_to_record to save the optimal primal solution corresponding to the names registered in the model.\n\n\n\n\n\nevaluate(\n V::ValueFunction,\n point::Dict{Union{Symbol,String},<:Real}\n objective_state = nothing,\n belief_state = nothing\n)\n\nEvaluate the value function V at point in the state-space.\n\nReturns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.\n\nExamples\n\nevaluate(V, Dict(:volume => 1.0))\n\nIf the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:\n\nevaluate(V, Dict(Symbol(\"volume[1]\") => 1.0))\n\nYou can also use strings or symbols for the keys.\n\nevaluate(V, Dict(\"volume[1]\" => 1))\n\n\n\n\n\nevalute(V::ValueFunction{Nothing, Nothing}; kwargs...)\n\nEvalute the value function V at the point in the state-space specified by kwargs.\n\nExamples\n\nevaluate(V; volume = 1)\n\n\n\n\n\nevaluate(\n model::PolicyGraph{T},\n validation_scenarios::ValidationScenarios{T,S},\n) where {T,S}\n\nEvaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.\n\nExamples\n\nmodel, validation_scenarios = read_from_file(\"my_model.sof.json\")\ntrain(model; iteration_limit = 100)\nsimulations = evaluate(model, validation_scenarios)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Visualizing-the-policy","page":"API Reference","title":"Visualizing the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.SpaghettiPlot\nSDDP.add_spaghetti\nSDDP.publication_plot\nSDDP.ValueFunction\nSDDP.evaluate(::SDDP.ValueFunction, ::Dict{Symbol,Float64})\nSDDP.plot","category":"page"},{"location":"apireference/#SDDP.SpaghettiPlot","page":"API Reference","title":"SDDP.SpaghettiPlot","text":"SDDP.SpaghettiPlot(; stages, scenarios)\n\nInitialize a new SpaghettiPlot with stages stages and scenarios number of replications.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.add_spaghetti","page":"API Reference","title":"SDDP.add_spaghetti","text":"SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)\n\nDescription\n\nAdd a new figure to the SpaghettiPlot plt, where the y-value of the scenarioth line when x = stage is given by data_function(plt.simulations[scenario][stage]).\n\nKeyword arguments\n\nxlabel: set the xaxis label\nylabel: set the yaxis label\ntitle: set the title of the plot\nymin: set the minimum y value\nymax: set the maximum y value\ncumulative: plot the additive accumulation of the value across the stages\ninterpolate: interpolation method for lines between stages.\n\nDefaults to \"linear\" see the d3 docs \tfor all options.\n\nExamples\n\nsimulations = simulate(model, 10)\nplt = SDDP.spaghetti_plot(simulations)\nSDDP.add_spaghetti(plt; title = \"Stage objective\") do data\n return data[:stage_objective]\nend\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.publication_plot","page":"API Reference","title":"SDDP.publication_plot","text":"SDDP.publication_plot(\n data_function, simulations;\n quantile = [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0],\n kwargs...)\n\nCreate a Plots.jl recipe plot of the simulations.\n\nSee Plots.jl for the list of keyword arguments.\n\nExamples\n\nSDDP.publication_plot(simulations; title = \"My title\") do data\n return data[:stage_objective]\nend\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.ValueFunction","page":"API Reference","title":"SDDP.ValueFunction","text":"ValueFunction\n\nA representation of the value function. SDDP.jl uses the following unique representation of the value function that is undocumented in the literature.\n\nIt supports three types of state variables:\n\nx - convex \"resource\" states\nb - concave \"belief\" states\ny - concave \"objective\" states\n\nIn addition, we have three types of cuts:\n\nSingle-cuts (also called \"average\" cuts in the literature), which involve the risk-adjusted expectation of the cost-to-go.\nMulti-cuts, which use a different cost-to-go term for each realization w.\nRisk-cuts, which correspond to the facets of the dual interpretation of a coherent risk measure.\n\nTherefore, ValueFunction returns a JuMP model of the following form:\n\nV(x, b, y) = min: μᵀb + νᵀy + θ\n s.t. # \"Single\" / \"Average\" cuts\n μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J\n # \"Multi\" cuts\n μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K\n # \"Risk-set\" cuts\n θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.evaluate-Tuple{SDDP.ValueFunction, Dict{Symbol, Float64}}","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n V::ValueFunction,\n point::Dict{Union{Symbol,String},<:Real}\n objective_state = nothing,\n belief_state = nothing\n)\n\nEvaluate the value function V at point in the state-space.\n\nReturns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.\n\nExamples\n\nevaluate(V, Dict(:volume => 1.0))\n\nIf the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:\n\nevaluate(V, Dict(Symbol(\"volume[1]\") => 1.0))\n\nYou can also use strings or symbols for the keys.\n\nevaluate(V, Dict(\"volume[1]\" => 1))\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.plot","page":"API Reference","title":"SDDP.plot","text":"plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)\n\nThe SpaghettiPlot plot plt to filename. If filename is not given, it will be saved to a temporary directory. If open = true, then a browser window will be opened to display the resulting HTML file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Debugging-the-model","page":"API Reference","title":"Debugging the model","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.write_subproblem_to_file\nSDDP.deterministic_equivalent","category":"page"},{"location":"apireference/#SDDP.write_subproblem_to_file","page":"API Reference","title":"SDDP.write_subproblem_to_file","text":"write_subproblem_to_file(\n node::Node,\n filename::String;\n throw_error::Bool = false,\n)\n\nWrite the subproblem contained in node to the file filename.\n\nThe throw_error is an argument used internally by SDDP.jl. If set, an error will be thrown.\n\nExample\n\nSDDP.write_subproblem_to_file(model[1], \"subproblem_1.lp\")\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.deterministic_equivalent","page":"API Reference","title":"SDDP.deterministic_equivalent","text":"deterministic_equivalent(\n pg::PolicyGraph{T},\n optimizer = nothing;\n time_limit::Union{Real,Nothing} = 60.0,\n)\n\nForm a JuMP model that represents the deterministic equivalent of the problem.\n\nExamples\n\ndeterministic_equivalent(model)\n\ndeterministic_equivalent(model, HiGHS.Optimizer)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#StochOptFormat","page":"API Reference","title":"StochOptFormat","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.write_to_file\nSDDP.read_from_file\nBase.write(::IO, ::SDDP.PolicyGraph)\nBase.read(::IO, ::Type{SDDP.PolicyGraph})\nSDDP.evaluate(::SDDP.PolicyGraph{T}, ::SDDP.ValidationScenarios{T}) where {T}\nSDDP.ValidationScenarios\nSDDP.ValidationScenario","category":"page"},{"location":"apireference/#SDDP.write_to_file","page":"API Reference","title":"SDDP.write_to_file","text":"write_to_file(\n model::PolicyGraph,\n filename::String;\n compression::MOI.FileFormats.AbstractCompressionScheme =\n MOI.FileFormats.AutomaticCompression(),\n kwargs...\n)\n\nWrite model to filename in the StochOptFormat file format.\n\nPass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.\n\nSee Base.write(::IO, ::PolicyGraph) for information on the keyword arguments that can be provided.\n\nwarning: Warning\nThis function is experimental. See the full warning in Base.write(::IO, ::PolicyGraph).\n\nExamples\n\nwrite_to_file(model, \"my_model.sof.json\"; validation_scenarios = 10)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.read_from_file","page":"API Reference","title":"SDDP.read_from_file","text":"read_from_file(\n filename::String;\n compression::MOI.FileFormats.AbstractCompressionScheme =\n MOI.FileFormats.AutomaticCompression(),\n kwargs...\n)::Tuple{PolicyGraph, ValidationScenarios}\n\nReturn a tuple containing a PolicyGraph object and a ValidationScenarios read from filename in the StochOptFormat file format.\n\nPass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.\n\nSee Base.read(::IO, ::Type{PolicyGraph}) for information on the keyword arguments that can be provided.\n\nwarning: Warning\nThis function is experimental. See the full warning in Base.read(::IO, ::Type{PolicyGraph}).\n\nExamples\n\nmodel, validation_scenarios = read_from_file(\"my_model.sof.json\")\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Base.write-Tuple{IO, SDDP.PolicyGraph}","page":"API Reference","title":"Base.write","text":"Base.write(\n io::IO,\n model::PolicyGraph;\n validation_scenarios::Union{Nothing,Int,ValidationScenarios} = nothing,\n sampling_scheme::AbstractSamplingScheme = InSampleMonteCarlo(),\n kwargs...\n)\n\nWrite model to io in the StochOptFormat file format.\n\nPass an Int to validation_scenarios (default nothing) to specify the number of test scenarios to generate using the sampling_scheme sampling scheme. Alternatively, pass a ValidationScenarios object to manually specify the test scenarios to use.\n\nAny additional kwargs passed to write will be stored in the top-level of the resulting StochOptFormat file. Valid arguments include name, author, date, and description.\n\nCompatibility\n\nwarning: Warning\nTHIS FUNCTION IS EXPERIMENTAL. THINGS MAY CHANGE BETWEEN COMMITS. YOU SHOULD NOT RELY ON THIS FUNCTIONALITY AS A LONG-TERM FILE FORMAT (YET).\n\nIn addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:\n\nJuMP.fix\nJuMP.set_lower_bound\nJuMP.set_upper_bound\nJuMP.set_normalized_rhs\nChanges to the constant or affine terms in a stage objective.\n\nIf your model uses something other than this, this function will silently write an incorrect formulation of the problem.\n\nExamples\n\nopen(\"my_model.sof.json\", \"w\") do io\n write(\n io,\n model;\n validation_scenarios = 10,\n name = \"MyModel\",\n author = \"@odow\",\n date = \"2020-07-20\",\n description = \"Example problem for the SDDP.jl documentation\",\n )\nend\n\n\n\n\n\n","category":"method"},{"location":"apireference/#Base.read-Tuple{IO, Type{SDDP.PolicyGraph}}","page":"API Reference","title":"Base.read","text":"Base.read(\n io::IO,\n ::Type{PolicyGraph};\n bound::Float64 = 1e6,\n)::Tuple{PolicyGraph,ValidationScenarios}\n\nReturn a tuple containing a PolicyGraph object and a ValidationScenarios read from io in the StochOptFormat file format.\n\nSee also: evaluate.\n\nCompatibility\n\nwarning: Warning\nThis function is experimental. Things may change between commits. You should not rely on this functionality as a long-term file format (yet).\n\nIn addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:\n\nAdditive random variables in the constraints or in the objective\nMultiplicative random variables in the objective\n\nIf your model uses something other than this, this function may throw an error or silently build a non-convex model.\n\nExamples\n\nopen(\"my_model.sof.json\", \"r\") do io\n model, validation_scenarios = read(io, PolicyGraph)\nend\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.evaluate-Union{Tuple{T}, Tuple{SDDP.PolicyGraph{T}, SDDP.ValidationScenarios{T}}} where T","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n model::PolicyGraph{T},\n validation_scenarios::ValidationScenarios{T,S},\n) where {T,S}\n\nEvaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.\n\nExamples\n\nmodel, validation_scenarios = read_from_file(\"my_model.sof.json\")\ntrain(model; iteration_limit = 100)\nsimulations = evaluate(model, validation_scenarios)\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.ValidationScenarios","page":"API Reference","title":"SDDP.ValidationScenarios","text":"ValidationScenario{T,S}(scenarios::Vector{ValidationScenario{T,S}})\n\nAn AbstractSamplingScheme based on a vector of scenarios.\n\nEach scenario is a vector of Tuple{T, S} where the first element is the node to visit and the second element is the realization of the stagewise-independent noise term. Pass nothing if the node is deterministic.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.ValidationScenario","page":"API Reference","title":"SDDP.ValidationScenario","text":"ValidationScenario{T,S}(scenario::Vector{Tuple{T,S}})\n\nA single scenario for testing.\n\nSee also: ValidationScenarios.\n\n\n\n\n\n","category":"type"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"EditURL = \"markov_uncertainty.jl\"","category":"page"},{"location":"tutorial/markov_uncertainty/#Markovian-policy-graphs","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"In our previous tutorials (An introduction to SDDP.jl and Uncertainty in the objective function), we formulated a simple hydrothermal scheduling problem with stagewise-independent random variables in the right-hand side of the constraints and in the objective function. Now, in this tutorial, we introduce some stagewise-dependent uncertainty using a Markov chain.","category":"page"},{"location":"tutorial/markov_uncertainty/#Formulating-the-problem","page":"Markovian policy graphs","title":"Formulating the problem","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"In this tutorial we consider a Markov chain with two climate states: wet and dry. Each Markov state is associated with an integer, in this case the wet climate state is Markov state 1 and the dry climate state is Markov state 2. In the wet climate state, the probability of the high inflow increases to 50%, and the probability of the low inflow decreases to 1/6. In the dry climate state, the converse happens. There is also persistence in the climate state: the probability of remaining in the current state is 75%, and the probability of transitioning to the other climate state is 25%. We assume that the first stage starts in the wet climate state.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"Here is a picture of the model we're going to implement.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"(Image: Markovian policy graph)","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"There are five nodes in our graph. Each node is named by a tuple (t, i), where t is the stage for t=1,2,3, and i is the Markov state for i=1,2. As before, the wavy lines denote the stagewise-independent random variable.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"For each stage, we need to provide a Markov transition matrix. This is an MxN matrix, where the element A[i, j] gives the probability of transitioning from Markov state i in the previous stage to Markov state j in the current stage. The first stage is special because we assume there is a \"zero'th\" stage which has one Markov state (the round node in the graph above). Furthermore, the number of columns in the transition matrix of a stage (i.e. the number of Markov states) must equal the number of rows in the next stage's transition matrix. For our example, the vector of Markov transition matrices is given by:","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"T = Array{Float64,2}[[1.0]', [0.75 0.25], [0.75 0.25; 0.25 0.75]]","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"note: Note\nMake sure to add the ' after the first transition matrix so Julia can distinguish between a vector and a matrix.","category":"page"},{"location":"tutorial/markov_uncertainty/#Creating-a-model","page":"Markovian policy graphs","title":"Creating a model","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"using SDDP, HiGHS\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75),\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64,2}[\n [1.0]',\n [0.75 0.25],\n [0.75 0.25; 0.25 0.75],\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, node\n # Unpack the stage and Markov index.\n t, markov_state = node\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end\n )\n # Note how we can use `markov_state` to dispatch an `if` statement.\n probability = if markov_state == 1 # wet climate state\n [1 / 6, 1 / 3, 1 / 2]\n else # dry climate state\n [1 / 2, 1 / 3, 1 / 6]\n end\n\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(\n subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"tip: Tip\nFor more information on SDDP.MarkovianPolicyGraphs, read Create a general policy graph.","category":"page"},{"location":"tutorial/markov_uncertainty/#Training-and-simulating-the-policy","page":"Markovian policy graphs","title":"Training and simulating the policy","text":"","category":"section"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"As in the previous three tutorials, we train the policy:","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"SDDP.train(model)","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"simulations = SDDP.simulate(\n model,\n sampling_scheme = SDDP.Historical([\n ((1, 1), Ω[1]),\n ((2, 2), Ω[3]),\n ((3, 1), Ω[2]),\n ]),\n)\n\n[stage[:node_index] for stage in simulations[1]]","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"","category":"page"},{"location":"tutorial/markov_uncertainty/","page":"Markovian policy graphs","title":"Markovian policy graphs","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"EditURL = \"FAST_hydro_thermal.jl\"","category":"page"},{"location":"examples/FAST_hydro_thermal/#FAST:-the-hydro-thermal-problem","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"","category":"section"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"An implementation of the Hydro-thermal example from FAST","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"using SDDP, HiGHS, Test\n\nfunction fast_hydro_thermal()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(2),\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x <= 8, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n y >= 0\n p >= 0\n ξ\n end)\n @constraints(sp, begin\n p + y >= 6\n x.out <= x.in - y + ξ\n end)\n RAINFALL = (t == 1 ? [6] : [2, 10])\n SDDP.parameterize(sp, RAINFALL) do ω\n return JuMP.fix(ξ, ω)\n end\n @stageobjective(sp, 5 * p)\n end\n\n det = SDDP.deterministic_equivalent(model, HiGHS.Optimizer)\n set_silent(det)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) == 10\n SDDP.train(model)\n @test SDDP.calculate_bound(model) == 10\n return\nend\n\nfast_hydro_thermal()","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"EditURL = \"StochDynamicProgramming.jl_multistock.jl\"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/#StochDynamicProgramming:-the-multistock-problem","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"","category":"section"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"This example comes from StochDynamicProgramming.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"using SDDP, HiGHS, Test\n\nfunction test_multistock_example()\n model = SDDP.LinearPolicyGraph(\n stages = 5,\n lower_bound = -5.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n @variable(\n subproblem,\n 0 <= stock[i = 1:3] <= 1,\n SDDP.State,\n initial_value = 0.5\n )\n @variables(subproblem, begin\n 0 <= control[i = 1:3] <= 0.5\n ξ[i = 1:3] # Dummy for RHS noise.\n end)\n @constraints(\n subproblem,\n begin\n sum(control) - 0.5 * 3 <= 0\n [i = 1:3], stock[i].out == stock[i].in + control[i] - ξ[i]\n end\n )\n Ξ = collect(\n Base.product((0.0, 0.15, 0.3), (0.0, 0.15, 0.3), (0.0, 0.15, 0.3)),\n )[:]\n SDDP.parameterize(subproblem, Ξ) do ω\n return JuMP.fix.(ξ, ω)\n end\n @stageobjective(subproblem, (sin(3 * stage) - 1) * sum(control))\n end\n SDDP.train(\n model;\n iteration_limit = 100,\n cut_type = SDDP.SINGLE_CUT,\n log_frequency = 10,\n )\n @test SDDP.calculate_bound(model) ≈ -4.349 atol = 0.01\n\n simulation_results = SDDP.simulate(model, 5000)\n @test length(simulation_results) == 5000\n μ = SDDP.Statistics.mean(\n sum(data[:stage_objective] for data in simulation) for\n simulation in simulation_results\n )\n @test μ ≈ -4.349 atol = 0.1\n return\nend\n\ntest_multistock_example()","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"EditURL = \"plotting.jl\"","category":"page"},{"location":"tutorial/plotting/#Plotting-tools","page":"Plotting tools","title":"Plotting tools","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"In our previous tutorials, we formulated, solved, and simulated multistage stochastic optimization problems. However, we haven't really investigated what the solution looks like. Luckily, SDDP.jl includes a number of plotting tools to help us do that. In this tutorial, we explain the tools and make some pretty pictures.","category":"page"},{"location":"tutorial/plotting/#Preliminaries","page":"Plotting tools","title":"Preliminaries","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"The next two plot types help visualize the policy. Thus, we first need to create a policy and simulate some trajectories. So, let's take the model from Markovian policy graphs, train it for 20 iterations, and then simulate 100 Monte Carlo realizations of the policy.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"using SDDP, HiGHS\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75),\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64,2}[\n [1.0]',\n [0.75 0.25],\n [0.75 0.25; 0.25 0.75],\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, node\n t, markov_state = node\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end\n )\n probability =\n markov_state == 1 ? [1 / 6, 1 / 3, 1 / 2] : [1 / 2, 1 / 3, 1 / 6]\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(\n subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend\n\nSDDP.train(model, iteration_limit = 20, run_numerical_stability_report = false)\n\nsimulations = SDDP.simulate(\n model,\n 100,\n [:volume, :thermal_generation, :hydro_generation, :hydro_spill],\n)\n\nprintln(\"Completed $(length(simulations)) simulations.\")","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Great! Now we have some data in simulations to visualize.","category":"page"},{"location":"tutorial/plotting/#Spaghetti-plots","page":"Plotting tools","title":"Spaghetti plots","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"The first plotting utility we discuss is a spaghetti plot (you'll understand the name when you see the graph).","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"To create a spaghetti plot, begin by creating a new SDDP.SpaghettiPlot instance as follows:","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"plt = SDDP.SpaghettiPlot(simulations)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"We can add plots to plt using the SDDP.add_spaghetti function.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.add_spaghetti(plt; title = \"Reservoir volume\") do data\n return data[:volume].out\nend","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"In addition to returning values from the simulation, you can compute things:","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.add_spaghetti(plt; title = \"Fuel cost\", ymin = 0, ymax = 250) do data\n if data[:thermal_generation] > 0\n return data[:stage_objective] / data[:thermal_generation]\n else # No thermal generation, so return 0.0.\n return 0.0\n end\nend","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Note that there are many keyword arguments in addition to title. For example, we fixed the minimum and maximum values of the y-axis using ymin and ymax. See the SDDP.add_spaghetti documentation for all the arguments.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Having built the plot, we now need to display it using SDDP.plot.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.plot(plt, \"spaghetti_plot.html\")","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"This should open a webpage that looks like this one.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Using the mouse, you can highlight individual trajectories by hovering over them. This makes it possible to visualize a single trajectory across multiple dimensions.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"If you click on the plot, then trajectories that are close to the mouse pointer are shown darker and those further away are shown lighter.","category":"page"},{"location":"tutorial/plotting/#Publication-plots","page":"Plotting tools","title":"Publication plots","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Instead of the interactive Javascript plots, you can also create some publication ready plots using the SDDP.publication_plot function.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"info: Info\nYou need to install the Plots.jl package for this to work. We used the GR backend (gr()), but any Plots.jl backend should work.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.publication_plot implements a plot recipe to create ribbon plots of each variable against the stages. The first argument is the vector of simulation dictionaries and the second argument is the dictionary key that you want to plot. Standard Plots.jl keyword arguments such as title and xlabel can be used to modify the look of each plot. By default, the plot displays ribbons of the 0-100, 10-90, and 25-75 percentiles. The dark, solid line in the middle is the median (i.e. 50'th percentile).","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"import Plots\nPlots.plot(\n SDDP.publication_plot(simulations, title = \"Outgoing volume\") do data\n return data[:volume].out\n end,\n SDDP.publication_plot(simulations, title = \"Thermal generation\") do data\n return data[:thermal_generation]\n end;\n xlabel = \"Stage\",\n ylims = (0, 200),\n layout = (1, 2),\n)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"You can save this plot as a PDF using the Plots.jl function savefig:","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"Plots.savefig(\"my_picture.pdf\")","category":"page"},{"location":"tutorial/plotting/#Plotting-the-value-function","page":"Plotting tools","title":"Plotting the value function","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"You can obtain an object representing the value function of a node using SDDP.ValueFunction.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"V = SDDP.ValueFunction(model[(1, 1)])","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"The value function can be evaluated using SDDP.evaluate.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.evaluate(V; volume = 1)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"evaluate returns the height of the value function, and a subgradient with respect to the convex state variables.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"You can also plot the value function using SDDP.plot","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.plot(V, volume = 0:200, filename = \"value_function.html\")","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"This should open a webpage that looks like this one.","category":"page"},{"location":"tutorial/plotting/#Convergence-dashboard","page":"Plotting tools","title":"Convergence dashboard","text":"","category":"section"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"If the text-based logging isn't to your liking, you can open a visualization of the training by passing dashboard = true to SDDP.train.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"SDDP.train(model; dashboard = true)","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"By default, dashboard = false because there is an initial overhead associated with opening and preparing the plot.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"warning: Warning\nThe dashboard is experimental. There are known bugs associated with it, e.g., SDDP.jl#226.","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"","category":"page"},{"location":"tutorial/plotting/","page":"Plotting tools","title":"Plotting tools","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"EditURL = \"the_farmers_problem.jl\"","category":"page"},{"location":"examples/the_farmers_problem/#The-farmer's-problem","page":"The farmer's problem","title":"The farmer's problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"This problem is taken from Section 1.1 of the book Birge, J. R., & Louveaux, F. (2011). Introduction to Stochastic Programming. New York, NY: Springer New York. Paragraphs in quotes are taken verbatim.","category":"page"},{"location":"examples/the_farmers_problem/#Problem-description","page":"The farmer's problem","title":"Problem description","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Consider a European farmer who specializes in raising wheat, corn, and sugar beets on his 500 acres of land. During the winter, [they want] to decide how much land to devote to each crop.The farmer knows that at least 200 tons (T) of wheat and 240 T of corn are needed for cattle feed. These amounts can be raised on the farm or bought from a wholesaler. Any production in excess of the feeding requirement would be sold.Over the last decade, mean selling prices have been $170 and $150 per ton of wheat and corn, respectively. The purchase prices are 40% more than this due to the wholesaler’s margin and transportation costs.Another profitable crop is sugar beet, which [they expect] to sell at $36/T; however, the European Commission imposes a quota on sugar beet production. Any amount in excess of the quota can be sold only at $10/T. The farmer’s quota for next year is 6000 T.\"Based on past experience, the farmer knows that the mean yield on [their] land is roughly 2.5 T, 3 T, and 20 T per acre for wheat, corn, and sugar beets, respectively.[To introduce uncertainty,] assume some correlation among the yields of the different crops. A very simplified representation of this would be to assume that years are good, fair, or bad for all crops, resulting in above average, average, or below average yields for all crops. To fix these ideas, above and below average indicate a yield 20% above or below the mean yield.","category":"page"},{"location":"examples/the_farmers_problem/#Problem-data","page":"The farmer's problem","title":"Problem data","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The area of the farm.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MAX_AREA = 500.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"There are three crops:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"CROPS = [:wheat, :corn, :sugar_beet]","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Each of the crops has a different planting cost ($/acre).","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"PLANTING_COST = Dict(:wheat => 150.0, :corn => 230.0, :sugar_beet => 260.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The farmer requires a minimum quantity of wheat and corn, but not of sugar beet (tonnes).","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MIN_QUANTITIES = Dict(:wheat => 200.0, :corn => 240.0, :sugar_beet => 0.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"In Europe, there is a quota system for producing crops. The farmer owns the following quota for each crop (tonnes):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"QUOTA_MAX = Dict(:wheat => Inf, :corn => Inf, :sugar_beet => 6_000.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The farmer can sell crops produced under the quota for the following amounts ($/tonne):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SELL_IN_QUOTA = Dict(:wheat => 170.0, :corn => 150.0, :sugar_beet => 36.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"If they sell more than their allotted quota, the farmer earns the following on each tonne of crop above the quota ($/tonne):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SELL_NO_QUOTA = Dict(:wheat => 0.0, :corn => 0.0, :sugar_beet => 10.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The purchase prices for wheat and corn are 40% more than their sales price. However, the description does not address the purchase price of sugar beet. Therefore, we use a large value of $1,000/tonne.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"BUY_PRICE = Dict(:wheat => 238.0, :corn => 210.0, :sugar_beet => 1_000.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"On average, each crop has the following yield in tonnes/acre:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MEAN_YIELD = Dict(:wheat => 2.5, :corn => 3.0, :sugar_beet => 20.0)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"However, the yield is random. In good years, the yield is +20% above average, and in bad years, the yield is -20% below average.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"YIELD_MULTIPLIER = Dict(:good => 1.2, :fair => 1.0, :bad => 0.8)","category":"page"},{"location":"examples/the_farmers_problem/#Mathematical-formulation","page":"The farmer's problem","title":"Mathematical formulation","text":"","category":"section"},{"location":"examples/the_farmers_problem/#SDDP.jl-code","page":"The farmer's problem","title":"SDDP.jl code","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"note: Note\nIn what follows, we make heavy use of the fact that you can look up variables by their symbol name in a JuMP model as follows:@variable(model, x)\nmodel[:x]Read the JuMP documentation if this isn't familiar to you.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First up, load SDDP.jl and a solver. For this example, we use HiGHS.jl.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"using SDDP, HiGHS","category":"page"},{"location":"examples/the_farmers_problem/#State-variables","page":"The farmer's problem","title":"State variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"State variables are the information that flows between stages. In our example, the state variables are the areas of land devoted to growing each crop.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function add_state_variables(subproblem)\n @variable(subproblem, area[c = CROPS] >= 0, SDDP.State, initial_value = 0)\nend","category":"page"},{"location":"examples/the_farmers_problem/#First-stage-problem","page":"The farmer's problem","title":"First stage problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We can only plant a maximum of 500 acres, and we want to minimize the planting cost","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function create_first_stage_problem(subproblem)\n @constraint(\n subproblem,\n sum(subproblem[:area][c].out for c in CROPS) <= MAX_AREA\n )\n @stageobjective(\n subproblem,\n -sum(PLANTING_COST[c] * subproblem[:area][c].out for c in CROPS)\n )\nend","category":"page"},{"location":"examples/the_farmers_problem/#Second-stage-problem","page":"The farmer's problem","title":"Second stage problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now let's consider the second stage problem. This is more complicated than the first stage, so we've broken it down into four sections:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"control variables\nconstraints\nthe objective\nthe uncertainty","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First, let's add the second stage control variables.","category":"page"},{"location":"examples/the_farmers_problem/#Variables","page":"The farmer's problem","title":"Variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We add four types of control variables. Technically, the yield isn't a control variable. However, we add it as a dummy \"helper\" variable because it will be used when we add uncertainty.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_variables(subproblem)\n @variables(subproblem, begin\n 0 <= yield[c = CROPS] # tonnes/acre\n 0 <= buy[c = CROPS] # tonnes\n 0 <= sell_in_quota[c = CROPS] <= QUOTA_MAX[c] # tonnes\n 0 <= sell_no_quota[c = CROPS] # tonnes\n end)\nend","category":"page"},{"location":"examples/the_farmers_problem/#Constraints","page":"The farmer's problem","title":"Constraints","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We need to define is the minimum quantity constraint. This ensures that MIN_QUANTITIES[c] of each crop is produced.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_constraint_min_quantity(subproblem)\n @constraint(\n subproblem,\n [c = CROPS],\n subproblem[:yield][c] + subproblem[:buy][c] -\n subproblem[:sell_in_quota][c] - subproblem[:sell_no_quota][c] >=\n MIN_QUANTITIES[c]\n )\nend","category":"page"},{"location":"examples/the_farmers_problem/#Objective","page":"The farmer's problem","title":"Objective","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The objective of the second stage is to maximise revenue from selling crops, less the cost of buying corn and wheat if necessary to meet the minimum quantity constraint.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_objective(subproblem)\n @stageobjective(\n subproblem,\n sum(\n SELL_IN_QUOTA[c] * subproblem[:sell_in_quota][c] +\n SELL_NO_QUOTA[c] * subproblem[:sell_no_quota][c] -\n BUY_PRICE[c] * subproblem[:buy][c] for c in CROPS\n )\n )\nend","category":"page"},{"location":"examples/the_farmers_problem/#Random-variables","page":"The farmer's problem","title":"Random variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Then, in the SDDP.parameterize function, we set the coefficient using JuMP.set_normalized_coefficient.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_uncertainty(subproblem)\n @constraint(\n subproblem,\n uncertainty[c = CROPS],\n 1.0 * subproblem[:area][c].in == subproblem[:yield][c]\n )\n SDDP.parameterize(subproblem, [:good, :fair, :bad]) do ω\n for c in CROPS\n JuMP.set_normalized_coefficient(\n uncertainty[c],\n subproblem[:area][c].in,\n MEAN_YIELD[c] * YIELD_MULTIPLIER[ω],\n )\n end\n end\nend","category":"page"},{"location":"examples/the_farmers_problem/#Putting-it-all-together","page":"The farmer's problem","title":"Putting it all together","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now we're ready to build the multistage stochastic programming model. In addition to the things already discussed, we need a few extra pieces of information.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First, we are maximizing, so we set sense = :Max. Second, we need to provide a valid upper bound. (See Choosing an initial bound for more on this.) We know from Birge and Louveaux that the optimal solution is $108,390. So, let's choose $500,000 just to be safe.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Here is the full model.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"model = SDDP.LinearPolicyGraph(\n stages = 2,\n sense = :Max,\n upper_bound = 500_000.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, stage\n add_state_variables(subproblem)\n if stage == 1\n create_first_stage_problem(subproblem)\n else\n second_stage_variables(subproblem)\n second_stage_constraint_min_quantity(subproblem)\n second_stage_uncertainty(subproblem)\n second_stage_objective(subproblem)\n end\nend","category":"page"},{"location":"examples/the_farmers_problem/#Training-a-policy","page":"The farmer's problem","title":"Training a policy","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now that we've built a model, we need to train it using SDDP.train. The keyword iteration_limit stops the training after 20 iterations. See Choose a stopping rule for other ways to stop the training.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SDDP.train(model; iteration_limit = 20)","category":"page"},{"location":"examples/the_farmers_problem/#Checking-the-policy","page":"The farmer's problem","title":"Checking the policy","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Birge and Louveaux report that the optimal objective value is $108,390. Check that we got the correct solution using SDDP.calculate_bound:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"@assert isapprox(SDDP.calculate_bound(model), 108_390.0, atol = 0.1)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"EditURL = \"warnings.jl\"","category":"page"},{"location":"tutorial/warnings/#Words-of-warning","page":"Words of warning","title":"Words of warning","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"SDDP is a powerful solution technique for multistage stochastic programming. However, there are a number of subtle things to be aware of before creating your own models.","category":"page"},{"location":"tutorial/warnings/#Relatively-complete-recourse","page":"Words of warning","title":"Relatively complete recourse","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Models built in SDDP.jl need a property called relatively complete recourse.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"One definition of relatively complete recourse is that all feasible decisions (not necessarily optimal) in a subproblem lead to feasible decisions in future subproblems.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"For example, in the following problem, one feasible first stage decision is x.out = 0. But this causes an infeasibility in the second stage which requires x.in >= 1. This will throw an error about infeasibility if you try to solve.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n if t == 2\n @constraint(sp, x.in >= 1)\n end\n @stageobjective(sp, x.out)\nend\n\ntry #hide\n SDDP.train(model, iteration_limit = 1, print_level = 0)\ncatch err #hide\n showerror(stderr, err) #hide\nend #hide","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"warning: Warning\nThe actual constraints causing the infeasibilities can be deceptive! A good strategy to debug is to comment out all constraints. Then, one-by-one, un-comment the constraints and try resolving the model to check if it finds a feasible solution.","category":"page"},{"location":"tutorial/warnings/#Numerical-stability","page":"Words of warning","title":"Numerical stability","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"If you aren't aware, SDDP builds an outer-approximation to a convex function using cutting planes. This results in a formulation that is particularly hard for solvers like HiGHS, Gurobi, and CPLEX to deal with. As a result, you may run into weird behavior. This behavior could include:","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Iterations suddenly taking a long time (the solver stalled)\nSubproblems turning infeasible or unbounded after many iterations\nSolvers returning \"Numerical Error\" statuses","category":"page"},{"location":"tutorial/warnings/#Problem-scaling","page":"Words of warning","title":"Problem scaling","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"In almost all cases, the cause of this is poor problem scaling. For our purpose, poor problem scaling means having variables with very large numbers and variables with very small numbers in the same model.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"tip: Tip\nGurobi has an excellent set of articles on numerical issues and how to avoid them.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Consider, for example, the hydro-thermal scheduling problem we have been discussing in previous tutorials.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"If we define the volume of the reservoir in terms of m³, then a lake might have a capacity of 10^10 m³: @variable(subproblem, 0 <= volume <= 10^10). Moreover, the cost per cubic meter might be around $0.05/m³. To calculate the value of water in our reservoir, we need to multiple a variable on the order of 10^10, by one on the order of 10⁻²! That is twelve orders of magnitude!","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"To improve the performance of the SDDP algorithm (and reduce the chance of weird behavior), try to re-scale the units of the problem in order to reduce the largest difference in magnitude. For example, if we talk in terms of million m³, then we have a capacity of 10⁴ million m³, and a price of $50,000 per million m³. Now things are only one order of magnitude apart.","category":"page"},{"location":"tutorial/warnings/#Numerical-stability-report","page":"Words of warning","title":"Numerical stability report","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"To aid in the diagnose of numerical issues, you can call SDDP.numerical_stability_report. By default, this aggregates all of the nodes into a single report. You can produce a stability report for each node by passing by_node=true.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP\n\nmodel = SDDP.LinearPolicyGraph(stages = 2, lower_bound = -1e10) do subproblem, t\n @variable(subproblem, x >= -1e7, SDDP.State, initial_value = 1e-5)\n @constraint(subproblem, 1e9 * x.out >= 1e-6 * x.in + 1e-8)\n @stageobjective(subproblem, 1e9 * x.out)\nend\n\nSDDP.numerical_stability_report(model)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"The report analyses the magnitude (in absolute terms) of the coefficients in the constraint matrix, the objective function, any variable bounds, and in the RHS of the constraints. A warning will be thrown in SDDP.jl detects very large or small values. As discussed in Problem scaling, this is an indication that you should reformulate your model.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"By default, a numerical stability check is run when you call SDDP.train, although it can be turned off by passing run_numerical_stability_report = false.","category":"page"},{"location":"tutorial/warnings/#Solver-specific-options","page":"Words of warning","title":"Solver-specific options","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"If you have a particularly troublesome model, you should investigate setting solver-specific options to improve the numerical stability of each solver. For example, Gurobi has a NumericFocus option.","category":"page"},{"location":"tutorial/warnings/#Choosing-an-initial-bound","page":"Words of warning","title":"Choosing an initial bound","text":"","category":"section"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"One of the important requirements when building a SDDP model is to choose an appropriate bound on the objective (lower if minimizing, upper if maximizing). However, it can be hard to choose a bound if you don't know the solution! (Which is very likely.)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"The bound should not be as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen too small, it may cut off the feasible region and lead to a sub-optimal solution.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Consider the following simple model, where we first set lower_bound to 0.0.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 2)\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(subproblem, x.out == x.in - u)\n @constraint(subproblem, u + v == 1.5)\n @stageobjective(subproblem, t * v)\nend\n\nSDDP.train(model, iteration_limit = 5, run_numerical_stability_report = false)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"Now consider the case when we set the lower_bound to 10.0:","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 10.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 2)\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(subproblem, x.out == x.in - u)\n @constraint(subproblem, u + v == 1.5)\n @stageobjective(subproblem, t * v)\nend\n\nSDDP.train(model, iteration_limit = 5, run_numerical_stability_report = false)","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"How do we tell which is more appropriate? There are a few clues that you should look out for.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"The bound converges to a value above (if minimizing) the simulated cost of the policy. In this case, the problem is deterministic, so it is easy to tell. But you can also check by performing a Monte Carlo simulation like we did in An introduction to SDDP.jl.\nThe bound converges to different values when we change the bound. This is another clear give-away. The bound provided by the user is only used in the initial iterations. It should not change the value of the converged policy. Thus, if you don't know an appropriate value for the bound, choose an initial value, and then increase (or decrease) the value of the bound to confirm that the value of the policy doesn't change.\nThe bound converges to a value close to the bound provided by the user. This varies between models, but notice that 11.0 is quite close to 10.0 compared with 3.5 and 0.0.","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"","category":"page"},{"location":"tutorial/warnings/","page":"Words of warning","title":"Words of warning","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/#Add-a-multi-dimensional-state-variable","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"","category":"section"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"Just like normal JuMP variables, it is possible to create containers of state variables.","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"julia> model = SDDP.LinearPolicyGraph(\n stages=1, lower_bound = 0, optimizer = HiGHS.Optimizer\n ) do subproblem, t\n # A scalar state variable.\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 0)\n println(\"Lower bound of outgoing x is: \", JuMP.lower_bound(x.out))\n # A vector of state variables.\n @variable(subproblem, y[i = 1:2] >= i, SDDP.State, initial_value = i)\n println(\"Lower bound of outgoing y[1] is: \", JuMP.lower_bound(y[1].out))\n # A JuMP.Containers.DenseAxisArray of state variables.\n @variable(subproblem,\n z[i = 3:4, j = [:A, :B]] >= i, SDDP.State, initial_value = i)\n println(\"Lower bound of outgoing z[3, :B] is: \", JuMP.lower_bound(z[3, :B].out))\n end;\nLower bound of outgoing x is: 0.0\nLower bound of outgoing y[1] is: 1.0\nLower bound of outgoing z[3, :B] is: 3.0","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"EditURL = \"objective_uncertainty.jl\"","category":"page"},{"location":"tutorial/objective_uncertainty/#Uncertainty-in-the-objective-function","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"","category":"section"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"In the previous tutorial, An introduction to SDDP.jl, we created a stochastic hydro-thermal scheduling model. In this tutorial, we extend the problem by adding uncertainty to the fuel costs.","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"Previously, we assumed that the fuel cost was deterministic: $50/MWh in the first stage, $100/MWh in the second stage, and $150/MWh in the third stage. For this tutorial, we assume that in addition to these base costs, the actual fuel cost is correlated with the inflows.","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"Our new model for the uncertainty is given by the following table:","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"ω 1 2 3\nP(ω) 1/3 1/3 1/3\ninflow 0 50 100\nfuel multiplier 1.5 1.0 0.75","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"In stage t, the objective is now to minimize:","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"fuel_multiplier * fuel_cost[t] * thermal_generation","category":"page"},{"location":"tutorial/objective_uncertainty/#Creating-a-model","page":"Uncertainty in the objective function","title":"Creating a model","text":"","category":"section"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"To add an uncertain objective, we can simply call @stageobjective from inside the SDDP.parameterize function.","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end\n )\n fuel_cost = [50.0, 100.0, 150.0]\n # Parameterize the subproblem.\n Ω = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75),\n ]\n SDDP.parameterize(subproblem, Ω, [1 / 3, 1 / 3, 1 / 3]) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(\n subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend","category":"page"},{"location":"tutorial/objective_uncertainty/#Training-and-simulating-the-policy","page":"Uncertainty in the objective function","title":"Training and simulating the policy","text":"","category":"section"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"As in the previous two tutorials, we train and simulate the policy:","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"SDDP.train(model)\n\nsimulations = SDDP.simulate(model, 500)\n\nobjective_values =\n [sum(stage[:stage_objective] for stage in sim) for sim in simulations]\n\nusing Statistics\n\nμ = round(mean(objective_values), digits = 2)\nci = round(1.96 * std(objective_values) / sqrt(500), digits = 2)\n\nprintln(\"Confidence interval: \", μ, \" ± \", ci)\nprintln(\"Lower bound: \", round(SDDP.calculate_bound(model), digits = 2))","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"","category":"page"},{"location":"tutorial/objective_uncertainty/","page":"Uncertainty in the objective function","title":"Uncertainty in the objective function","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_a_risk_measure/#Add-a-risk-measure","page":"Add a risk measure","title":"Add a risk measure","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_a_risk_measure/#Training-a-risk-averse-model","page":"Add a risk measure","title":"Training a risk-averse model","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.jl supports a variety of risk measures. Two common ones are SDDP.Expectation and SDDP.WorstCase. Let's see how to train a policy using them. There are three possible ways.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"If the same risk measure is used at every node in the policy graph, we can just pass an instance of one of the risk measures to the risk_measure keyword argument of the SDDP.train function.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = SDDP.WorstCase(),\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"However, if you want different risk measures at different nodes, there are two options. First, you can pass risk_measure a dictionary of risk measures, with one entry for each node. The keys of the dictionary are the indices of the nodes.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = Dict(\n 1 => SDDP.Expectation(),\n 2 => SDDP.WorstCase()\n ),\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"An alternative method is to pass risk_measure a function that takes one argument, the index of a node, and returns an instance of a risk measure:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = (node_index) -> begin\n if node_index == 1\n return SDDP.Expectation()\n else\n return SDDP.WorstCase()\n end\n end,\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"note: Note\nIf you simulate the policy, the simulated value is the risk-neutral value of the policy.","category":"page"},{"location":"guides/add_a_risk_measure/#Risk-measures","page":"Add a risk measure","title":"Risk measures","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"To illustrate the risk-measures included in SDDP.jl, we consider a discrete random variable with four outcomes.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"The random variable is supported on the values 1, 2, 3, and 4:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"noise_supports = [1, 2, 3, 4]","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"The associated probability of each outcome is as follows:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"nominal_probability = [0.1, 0.2, 0.3, 0.4]","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"With each outcome ω, the agent observes a cost Z(ω):","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"cost_realizations = [5.0, 4.0, 6.0, 2.0]","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"We assume that we are minimizing:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"is_minimization = true","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"Finally, we create a vector that will be used to store the risk-adjusted probabilities:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"risk_adjusted_probability = zeros(4)","category":"page"},{"location":"guides/add_a_risk_measure/#Expectation","page":"Add a risk measure","title":"Expectation","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Expectation","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Expectation","page":"Add a risk measure","title":"SDDP.Expectation","text":"Expectation()\n\nThe Expectation risk measure. Identical to taking the expectation with respect to the nominal distribution.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"using SDDP\nSDDP.adjust_probability(\n SDDP.Expectation(),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Expectation is the default risk measure in SDDP.jl.","category":"page"},{"location":"guides/add_a_risk_measure/#Worst-case","page":"Add a risk measure","title":"Worst-case","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.WorstCase","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.WorstCase","page":"Add a risk measure","title":"SDDP.WorstCase","text":"WorstCase()\n\nThe worst-case risk measure. Places all of the probability weight on the worst outcome.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.WorstCase(),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Average-value-at-risk-(AV@R)","page":"Add a risk measure","title":"Average value at risk (AV@R)","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.AVaR","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.AVaR","page":"Add a risk measure","title":"SDDP.AVaR","text":"AVaR(β)\n\nThe average value at risk (AV@R) risk measure.\n\nComputes the expectation of the β fraction of worst outcomes. β must be in [0, 1]. When β=1, this is equivalent to the Expectation risk measure. When β=0, this is equivalent to the WorstCase risk measure.\n\nAV@R is also known as the conditional value at risk (CV@R) or expected shortfall.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.AVaR(0.5),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Convex-combination-of-risk-measures","page":"Add a risk measure","title":"Convex combination of risk measures","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"Using the axioms of coherent risk measures, it is easy to show that any convex combination of coherent risk measures is also a coherent risk measure. Convex combinations of risk measures can be created directly:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"cvx_comb_measure = 0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()\nSDDP.adjust_probability(\n cvx_comb_measure,\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"As a special case, the SDDP.EAVaR risk-measure is a convex combination of SDDP.Expectation and SDDP.AVaR:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.EAVaR(beta=0.25, lambda=0.4)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.EAVaR","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.EAVaR","page":"Add a risk measure","title":"SDDP.EAVaR","text":"EAVaR(;lambda=1.0, beta=1.0)\n\nA risk measure that is a convex combination of Expectation and Average Value @ Risk (also called Conditional Value @ Risk).\n\n λ * E[x] + (1 - λ) * AV@R(β)[x]\n\nKeyword Arguments\n\nlambda: Convex weight on the expectation ((1-lambda) weight is put on the AV@R component. Inreasing values of lambda are less risk averse (more weight on expectation).\nbeta: The quantile at which to calculate the Average Value @ Risk. Increasing values of beta are less risk averse. If beta=0, then the AV@R component is the worst case risk measure.\n\n\n\n\n\n","category":"function"},{"location":"guides/add_a_risk_measure/#Distributionally-robust","page":"Add a risk measure","title":"Distributionally robust","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.jl supports two types of distributionally robust risk measures: the modified Χ² method of Philpott et al. (2018), and a method based on the Wasserstein distance metric.","category":"page"},{"location":"guides/add_a_risk_measure/#Modified-Chi-squard","page":"Add a risk measure","title":"Modified Chi-squard","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.ModifiedChiSquared","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.ModifiedChiSquared","page":"Add a risk measure","title":"SDDP.ModifiedChiSquared","text":"ModifiedChiSquared(radius::Float64; minimum_std=1e-5)\n\nThe distributionally robust SDDP risk measure of Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP. Computational Management Science (2018) 165:431-454.\n\nExplanation\n\nIn a Distributionally Robust Optimization (DRO) approach, we modify the probabilities we associate with all future scenarios so that the resulting probability distribution is the \"worst case\" probability distribution, in some sense.\n\nIn each backward pass we will compute a worst case probability distribution vector p. We compute p so that:\n\np ∈ argmax p'z\n s.t. [r; p - a] in SecondOrderCone()\n sum(p) == 1\n p >= 0\n\nwhere\n\nz is a vector of future costs. We assume that our aim is to minimize future cost p'z. If we maximize reward, we would have p ∈ argmin{p'z}.\na is the uniform distribution\nr is a user specified radius - the larger the radius, the more conservative the policy.\n\nNotes\n\nThe largest radius that will work with S scenarios is sqrt((S-1)/S).\n\nIf the uncorrected standard deviation of the objecive realizations is less than minimum_std, then the risk-measure will default to Expectation().\n\nThis code was contributed by Lea Kapelevich.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.ModifiedChiSquared(0.5),\n risk_adjusted_probability,\n [0.25, 0.25, 0.25, 0.25],\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Wasserstein","page":"Add a risk measure","title":"Wasserstein","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Wasserstein","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Wasserstein","page":"Add a risk measure","title":"SDDP.Wasserstein","text":"Wasserstein(norm::Function, solver_factory; alpha::Float64)\n\nA distributionally-robust risk measure based on the Wasserstein distance.\n\nAs alpha increases, the measure becomes more risk-averse. When alpha=0, the measure is equivalent to the expectation operator. As alpha increases, the measure approaches the Worst-case risk measure.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"import HiGHS\nSDDP.adjust_probability(\n SDDP.Wasserstein(HiGHS.Optimizer; alpha=0.5) do x, y\n return abs(x - y)\n end,\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"guides/add_a_risk_measure/#Entropic","page":"Add a risk measure","title":"Entropic","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Entropic","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Entropic","page":"Add a risk measure","title":"SDDP.Entropic","text":"Entropic(γ::Float64)\n\nThe entropic risk measure as described by:\n\nDowson, O., Morton, D.P. & Pagnoncelli, B.K. Incorporating convex risk\nmeasures into multistage stochastic programming algorithms. Annals of\nOperations Research (2022). [doi](https://doi.org/10.1007/s10479-022-04977-w).\n\nAs γ increases, the measure becomes more risk-averse.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.Entropic(0.1),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\nrisk_adjusted_probability","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"EditURL = \"infinite_horizon_trivial.jl\"","category":"page"},{"location":"examples/infinite_horizon_trivial/#Infinite-horizon-trivial","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"","category":"section"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"using SDDP, HiGHS, Test\n\nfunction infinite_trivial()\n graph = SDDP.Graph(\n :root_node,\n [:week],\n [(:root_node => :week, 1.0), (:week => :week, 0.9)],\n )\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n @variable(subproblem, state, SDDP.State, initial_value = 0)\n @constraint(subproblem, state.in == state.out)\n @stageobjective(subproblem, 2.0)\n end\n SDDP.train(model; log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 2.0 / (1 - 0.9) atol = 1e-3\n return\nend\n\ninfinite_trivial()","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"EditURL = \"air_conditioning.jl\"","category":"page"},{"location":"examples/air_conditioning/#Air-conditioning","page":"Air conditioning","title":"Air conditioning","text":"","category":"section"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Taken from Anthony Papavasiliou's notes on SDDP","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Consider the following problem","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Produce air conditioners for 3 months\n200 units/month at 100 $/unit\nOvertime costs 300 $/unit\nKnown demand of 100 units for period 1\nEqually likely demand, 100 or 300 units, for periods 2, 3\nStorage cost is 50 $/unit\nAll demand must be met","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"The known optimal solution is $62,500","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"using SDDP, HiGHS, Test\n\nfunction air_conditioning_model(duality_handler)\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, stage\n @variable(\n sp,\n 0 <= stored_production <= 100,\n Int,\n SDDP.State,\n initial_value = 0\n )\n @variable(sp, 0 <= production <= 200, Int)\n @variable(sp, overtime >= 0, Int)\n @variable(sp, demand)\n DEMAND = [[100.0], [100.0, 300.0], [100.0, 300.0]]\n SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, DEMAND[stage])\n @constraint(\n sp,\n stored_production.out ==\n stored_production.in + production + overtime - demand\n )\n @stageobjective(\n sp,\n 100 * production + 300 * overtime + 50 * stored_production.out\n )\n end\n SDDP.train(model; duality_handler = duality_handler)\n @test isapprox(SDDP.calculate_bound(model), 62_500.0, atol = 0.1)\n return\nend\n\nfor duality_handler in [SDDP.LagrangianDuality(), SDDP.ContinuousConicDuality()]\n air_conditioning_model(duality_handler)\nend","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"EditURL = \"sldp_example_two.jl\"","category":"page"},{"location":"examples/sldp_example_two/#SLDP:-example-2","page":"SLDP: example 2","title":"SLDP: example 2","text":"","category":"section"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"This example is derived from Section 4.3 of the paper: Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz Dynamic Programming. Optimization Online. PDF","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction sldp_example_two(; first_stage_integer::Bool = true, N = 2)\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = -100.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x[1:2] <= 5, SDDP.State, initial_value = 0.0)\n if t == 1\n if first_stage_integer\n @variable(sp, 0 <= u[1:2] <= 5, Int)\n @constraint(sp, [i = 1:2], u[i] == x[i].out)\n end\n @stageobjective(sp, -1.5 * x[1].out - 4 * x[2].out)\n else\n @variable(sp, 0 <= y[1:4] <= 1, Bin)\n @variable(sp, ω[1:2])\n @stageobjective(sp, -16 * y[1] - 19 * y[2] - 23 * y[3] - 28 * y[4])\n @constraint(\n sp,\n 2 * y[1] + 3 * y[2] + 4 * y[3] + 5 * y[4] <= ω[1] - x[1].in\n )\n @constraint(\n sp,\n 6 * y[1] + 1 * y[2] + 3 * y[3] + 2 * y[4] <= ω[2] - x[2].in\n )\n steps = range(5, stop = 15, length = N)\n SDDP.parameterize(sp, [[i, j] for i in steps for j in steps]) do φ\n return JuMP.fix.(ω, φ)\n end\n end\n end\n if get(ARGS, 1, \"\") == \"--write\"\n # Run `$ julia sldp_example_two.jl --write` to update the benchmark\n # model directory\n model_dir = joinpath(@__DIR__, \"..\", \"..\", \"..\", \"benchmarks\", \"models\")\n SDDP.write_to_file(\n model,\n joinpath(model_dir, \"sldp_example_two_$(N).sof.json.gz\");\n test_scenarios = 30,\n )\n return\n end\n SDDP.train(model; log_frequency = 10)\n bound = SDDP.calculate_bound(model)\n\n if N == 2\n Test.@test bound <= -57.0\n elseif N == 3\n Test.@test bound <= -59.33\n elseif N == 6\n Test.@test bound <= -61.22\n end\n return\nend\n\nsldp_example_two(N = 2)\nsldp_example_two(N = 3)\nsldp_example_two(N = 6)","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"EditURL = \"objective_states.jl\"","category":"page"},{"location":"tutorial/objective_states/#Objective-states","page":"Objective states","title":"Objective states","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"There are many applications in which we want to model a price process that follows some auto-regressive process. Common examples include stock prices on financial exchanges and spot-prices in energy markets.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"However, it is well known that these cannot be incorporated in to SDDP because they result in cost-to-go functions that are convex with respect to some state variables (e.g., the reservoir levels) and concave with respect to other state variables (e.g., the spot price in the current stage).","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"To overcome this problem, the approach in the literature has been to discretize the price process in order to model it using a Markovian policy graph like those discussed in Markovian policy graphs.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"However, recent work offers a way to include stagewise-dependent objective uncertainty into the objective function of SDDP subproblems. Readers are directed to the following works for an introduction:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Downward, A., Dowson, O., and Baucke, R. (2017). Stochastic dual dynamic programming with stagewise dependent objective uncertainty. Optimization Online. link\nDowson, O. PhD Thesis. University of Auckland, 2018. link","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"The method discussed in the above works introduces the concept of an objective state into SDDP. Unlike normal state variables in SDDP (e.g., the volume of water in the reservoir), the cost-to-go function is concave with respect to the objective states. Thus, the method builds an outer approximation of the cost-to-go function in the normal state-space, and an inner approximation of the cost-to-go function in the objective state-space.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"warning: Warning\nSupport for objective states in SDDP.jl is experimental. Models are considerably more computational intensive, the interface is less user-friendly, and there are subtle gotchas to be aware of. Only use this if you have read and understood the theory behind the method.","category":"page"},{"location":"tutorial/objective_states/#One-dimensional-objective-states","page":"Objective states","title":"One-dimensional objective states","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Let's assume that the fuel cost is not fixed, but instead evolves according to a multiplicative auto-regressive process: fuel_cost[t] = ω * fuel_cost[t-1], where ω is drawn from the sample space [0.75, 0.9, 1.1, 1.25] with equal probability.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"An objective state can be added to a subproblem using the SDDP.add_objective_state function. This can only be called once per subproblem. If you want to add a multi-dimensional objective state, read Multi-dimensional objective states. SDDP.add_objective_state takes a number of keyword arguments. The two required ones are","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"initial_value: the value of the objective state at the root node of the policy graph (i.e., identical to the initial_value when defining normal state variables.\nlipschitz: the Lipschitz constant of the cost-to-go function with respect to the objective state. In other words, this value is the maximum change in the cost-to-go function at any point in the state space, given a one-unit change in the objective state.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"There are also two optional keyword arguments: lower_bound and upper_bound, which give SDDP.jl hints (importantly, not constraints) about the domain of the objective state. Setting these bounds appropriately can improve the speed of convergence.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Finally, SDDP.add_objective_state requires an update function. This function takes two arguments. The first is the incoming value of the objective state, and the second is the realization of the stagewise-independent noise term (set using SDDP.parameterize). The function should return the value of the objective state to be used in the current subproblem.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This connection with the stagewise-independent noise term means that SDDP.parameterize must be called in a subproblem that defines an objective state. Inside SDDP.parameterize, the value of the objective state to be used in the current subproblem (i.e., after the update function), can be queried using SDDP.objective_state.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Here is the full model with the objective state.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"using SDDP, HiGHS\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n\n # Add an objective state. ω will be the same value that is called in\n # `SDDP.parameterize`.\n\n SDDP.add_objective_state(\n subproblem,\n initial_value = 50.0,\n lipschitz = 10_000.0,\n lower_bound = 50.0,\n upper_bound = 150.0,\n ) do fuel_cost, ω\n return ω.fuel * fuel_cost\n end\n\n # Create the cartesian product of a multi-dimensional random variable.\n\n Ω = [\n (fuel = f, inflow = w) for f in [0.75, 0.9, 1.1, 1.25] for\n w in [0.0, 50.0, 100.0]\n ]\n\n SDDP.parameterize(subproblem, Ω) do ω\n # Query the current fuel cost.\n fuel_cost = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, fuel_cost * thermal_generation)\n return JuMP.fix(inflow, ω.inflow)\n end\nend","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"After creating our model, we can train and simulate as usual.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"SDDP.train(model; run_numerical_stability_report = false)\n\nsimulations = SDDP.simulate(model, 1)\n\nprint(\"Finished training and simulating.\")","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"To demonstrate how the objective states are updated, consider the sequence of noise observations:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"[stage[:noise_term] for stage in simulations[1]]","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This, the fuel cost in the first stage should be 0.75 * 50 = 37.5. The fuel cost in the second stage should be 1.1 * 37.5 = 41.25. The fuel cost in the third stage should be 0.75 * 41.25 = 30.9375.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"To confirm this, the values of the objective state in a simulation can be queried using the :objective_state key.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"[stage[:objective_state] for stage in simulations[1]]","category":"page"},{"location":"tutorial/objective_states/#Multi-dimensional-objective-states","page":"Objective states","title":"Multi-dimensional objective states","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"You can construct multi-dimensional price processes using NTuples. Just replace every scalar value associated with the objective state by a tuple. For example, initial_value = 1.0 becomes initial_value = (1.0, 2.0).","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"Here is an example:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(\n subproblem,\n begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n\n SDDP.add_objective_state(\n subproblem,\n initial_value = (50.0, 50.0),\n lipschitz = (10_000.0, 10_000.0),\n lower_bound = (50.0, 50.0),\n upper_bound = (150.0, 150.0),\n ) do fuel_cost, ω\n # fuel_cost is a tuple, containing the (fuel_cost[t-1], fuel_cost[t-2])\n # This function returns a new tuple containing\n # (fuel_cost[t], fuel_cost[t-1]). Thus, we need to compute the new\n # cost:\n new_cost = fuel_cost[1] + 0.5 * (fuel_cost[1] - fuel_cost[2]) + ω.fuel\n # And then return the appropriate tuple:\n return (new_cost, fuel_cost[1])\n end\n\n Ω = [\n (fuel = f, inflow = w) for f in [-10.0, -5.0, 5.0, 10.0] for\n w in [0.0, 50.0, 100.0]\n ]\n\n SDDP.parameterize(subproblem, Ω) do ω\n fuel_cost, _ = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, fuel_cost * thermal_generation)\n return JuMP.fix(inflow, ω.inflow)\n end\nend\n\nSDDP.train(model; run_numerical_stability_report = false)\n\nsimulations = SDDP.simulate(model, 1)\n\nprint(\"Finished training and simulating.\")","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This time, since our objective state is two-dimensional, the objective states are tuples with two elements:","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"[stage[:objective_state] for stage in simulations[1]]","category":"page"},{"location":"tutorial/objective_states/#objective_state_warnings","page":"Objective states","title":"Warnings","text":"","category":"section"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"There are number of things to be aware of when using objective states.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"The key assumption is that price is independent of the states and actions in the model.\nThat means that the price cannot appear in any @constraints. Nor can you use any @variables in the update function.\nChoosing an appropriate Lipschitz constant is difficult.\nThe points discussed in Choosing an initial bound are relevant. The Lipschitz constant should not be chosen as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen to small, it may cut of the feasible region and lead to a sub-optimal solution.\nYou need to ensure that the cost-to-go function is concave with respect to the objective state before the update.\nIf the update function is linear, this is always the case. In some situations, the update function can be nonlinear (e.g., multiplicative as we have above). In general, placing constraints on the price (e.g., clamp(price, 0, 1)) will destroy concavity. Caveat emptor. It's up to you if this is a problem. If it isn't you'll get a good heuristic with no guarantee of global optimality.","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"","category":"page"},{"location":"tutorial/objective_states/","page":"Objective states","title":"Objective states","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"EditURL = \"air_conditioning_forward.jl\"","category":"page"},{"location":"examples/air_conditioning_forward/#Training-with-a-different-forward-model","page":"Training with a different forward model","title":"Training with a different forward model","text":"","category":"section"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"using SDDP\nimport HiGHS\nimport Test\n\nfunction create_air_conditioning_model(; convex::Bool)\n return SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x <= 100, SDDP.State, initial_value = 0)\n @variable(sp, 0 <= u_production <= 200)\n @variable(sp, u_overtime >= 0)\n if !convex\n set_integer(x.out)\n set_integer(u_production)\n set_integer(u_overtime)\n end\n @constraint(sp, demand, x.in - x.out + u_production + u_overtime == 0)\n Ω = [[100.0], [100.0, 300.0], [100.0, 300.0]]\n SDDP.parameterize(ω -> JuMP.set_normalized_rhs(demand, ω), sp, Ω[t])\n @stageobjective(sp, 100 * u_production + 300 * u_overtime + 50 * x.out)\n end\nend\n\nconvex = create_air_conditioning_model(; convex = true)\nnon_convex = create_air_conditioning_model(; convex = false)\nSDDP.train(\n convex;\n forward_pass = SDDP.AlternativeForwardPass(non_convex),\n post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex),\n iteration_limit = 10,\n)\nTest.@test isapprox(SDDP.calculate_bound(non_convex), 62_500.0, atol = 0.1)\nTest.@test isapprox(SDDP.calculate_bound(convex), 62_500.0, atol = 0.1)","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"","category":"page"},{"location":"examples/air_conditioning_forward/","page":"Training with a different forward model","title":"Training with a different forward model","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"EditURL = \"objective_state_newsvendor.jl\"","category":"page"},{"location":"examples/objective_state_newsvendor/#Newsvendor","page":"Newsvendor","title":"Newsvendor","text":"","category":"section"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"This example is based on the classical newsvendor problem, but features an AR(1) spot-price.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":" V(x[t-1], ω[t]) = max p[t] × u[t]\n subject to x[t] = x[t-1] - u[t] + ω[t]\n u[t] ∈ [0, 1]\n x[t] ≥ 0\n p[t] = p[t-1] + ϕ[t]","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"The initial conditions are","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"x[0] = 2.0\np[0] = 1.5\nω[t] ~ {0, 0.05, 0.10, ..., 0.45, 0.5} with uniform probability.\nϕ[t] ~ {-0.25, -0.125, 0.125, 0.25} with uniform probability.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"using SDDP, HiGHS, Statistics, Test\n\nfunction joint_distribution(; kwargs...)\n names = tuple([first(kw) for kw in kwargs]...)\n values = tuple([last(kw) for kw in kwargs]...)\n output_type = NamedTuple{names,Tuple{eltype.(values)...}}\n distribution = map(output_type, Base.product(values...))\n return distribution[:]\nend\n\nfunction newsvendor_example(; cut_type)\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(3),\n sense = :Max,\n upper_bound = 50.0,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, stage\n @variables(subproblem, begin\n x >= 0, (SDDP.State, initial_value = 2)\n 0 <= u <= 1\n w\n end)\n @constraint(subproblem, x.out == x.in - u + w)\n SDDP.add_objective_state(\n subproblem,\n initial_value = 1.5,\n lower_bound = 0.75,\n upper_bound = 2.25,\n lipschitz = 100.0,\n ) do y, ω\n return y + ω.price_noise\n end\n noise_terms = joint_distribution(\n demand = 0:0.05:0.5,\n price_noise = [-0.25, -0.125, 0.125, 0.25],\n )\n SDDP.parameterize(subproblem, noise_terms) do ω\n JuMP.fix(w, ω.demand)\n price = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, price * u)\n end\n end\n SDDP.train(\n model;\n log_frequency = 10,\n time_limit = 20.0,\n cut_type = cut_type,\n )\n @test SDDP.calculate_bound(model) ≈ 4.04 atol = 0.05\n results = SDDP.simulate(model, 500)\n objectives =\n [sum(s[:stage_objective] for s in simulation) for simulation in results]\n @test round(Statistics.mean(objectives); digits = 2) ≈ 4.04 atol = 0.1\n return\nend\n\nnewsvendor_example(cut_type = SDDP.SINGLE_CUT)\nnewsvendor_example(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"EditURL = \"arma.jl\"","category":"page"},{"location":"tutorial/arma/#Auto-regressive-stochastic-processes","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"SDDP.jl assumes that the random variable in each node is independent of the random variables in all other nodes. However, a common request is to model the random variables by some auto-regressive process.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"There are two ways to do this:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model the random variable as a Markov chain\nuse the \"state-space expansion\" trick","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"info: Info\nThis tutorial is in the context of a hydro-thermal scheduling example, but it should be apparent how the ideas transfer to other applications.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"using SDDP\nimport HiGHS","category":"page"},{"location":"tutorial/arma/#state-space-expansion","page":"Auto-regressive stochastic processes","title":"The state-space expansion trick","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"In An introduction to SDDP.jl, we assumed that the inflows were stagewise-independent. However, in many cases this is not correct, and inflow models are more accurately described by an auto-regressive process such as:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"inflow_t = inflow_t-1 + varepsilon","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Here varepsilon is a random variable, and the inflow in stage t is the inflow in stage t-1 plus varepsilon (which might be negative).","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"For simplicity, we omit any coefficients and other terms, but this could easily be extended to a model like","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"inflow_t = a times inflow_t-1 + b + varepsilon","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"In practice, you can estimate a distribution for varepsilon by fitting the chosen statistical model to historical data, and then using the empirical residuals.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"To implement the auto-regressive model in SDDP.jl, we introduce inflow as a state variable.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"tip: Tip\nOur rule of thumb for \"when is something a state variable?\" is: if you need the value of a variable from a previous stage to compute something in stage t, then that variable is a state variable.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, g_h + g_t == 150)\n c = [50, 100, 150]\n @stageobjective(sp, c[t] * g_t)\n # =========================================================================\n # New stuff below Here\n # Add inflow as a state\n @variable(sp, inflow, SDDP.State, initial_value = 50.0)\n # Add the random variable as a control variable\n @variable(sp, ε)\n # The equation describing our statistical model\n @constraint(sp, inflow.out == inflow.in + ε)\n # The new water balance constraint using the state variable\n @constraint(sp, x.out == x.in - g_h - s + inflow.out)\n # Assume we have some empirical residuals:\n Ω = [-10.0, 0.1, 9.6]\n SDDP.parameterize(sp, Ω) do ω\n return JuMP.fix(ε, ω)\n end\nend","category":"page"},{"location":"tutorial/arma/#When-can-this-trick-be-used?","page":"Auto-regressive stochastic processes","title":"When can this trick be used?","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The state-space expansion trick should be used when:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The random variable appears additively in the objective or in the constraints. Something like inflow * decision_variable will not work.\nThe statistical model is linear, or can be written using the JuMP @constraint macro.\nThe dimension of the random variable is small (see Vector auto-regressive models for the multi-variate case).","category":"page"},{"location":"tutorial/arma/#The-Markov-chain-approach","page":"Auto-regressive stochastic processes","title":"The Markov chain approach","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"In the Markov chain approach, we model the stochastic process for inflow by a discrete Markov chain. Markov chains are nodes with transition probabilities between the nodes. SDDP.jl has good support for solving problems in which the uncertainty is formulated as a Markov chain.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The first step of the Markov chain approach is to write a function which simulates the stochastic process. Here is a simulator for our inflow model:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"function simulator()\n inflow = zeros(3)\n current = 50.0\n Ω = [-10.0, 0.1, 9.6]\n for t in 1:3\n current += rand(Ω)\n inflow[t] = current\n end\n return inflow\nend","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"When called with no arguments, it produces a vector of inflows:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"simulator()","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"warning: Warning\nThe simulator must return a Vector{Float64}, so it is limited to a uni-variate random variable. It is possible to do something similar for multi-variate random variable, but you'll have to manually construct the Markov transition matrix, and solution times scale poorly, even in the two-dimensional case.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The next step is to call SDDP.MarkovianGraph with our simulator. This function will attempt to fit a Markov chain to the stochastic process produced by your simulator. There are two key arguments:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"budget is the total number of nodes we want in the Markov chain\nscenarios is a limit on the number of times we can call simulator","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Here we can see we have created a MarkovianGraph with nodes like (2, 59.7). The first element of each node is the stage, and the second element is the inflow.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Create a SDDP.PolicyGraph using graph as follows:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model = SDDP.PolicyGraph(\n graph, # <--- New stuff\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, node\n t, inflow = node # <--- New stuff\n @variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, g_h + g_t == 150)\n c = [50, 100, 150]\n @stageobjective(sp, c[t] * g_t)\n # The new water balance constraint using the node:\n @constraint(sp, x.out == x.in - g_h - s + inflow)\nend","category":"page"},{"location":"tutorial/arma/#When-can-this-trick-be-used?-2","page":"Auto-regressive stochastic processes","title":"When can this trick be used?","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The Markov chain approach should be used when:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The random variable is uni-variate\nThe random variable appears in the objective function or as a variable coefficient in the constraint matrix\nIt's non-trivial to write the stochastic process as a series of constraints (for example, it uses nonlinear terms)\nThe number of nodes is modest (for example, a budget of hundreds, up to perhaps 1000)","category":"page"},{"location":"tutorial/arma/#Vector-auto-regressive-models","page":"Auto-regressive stochastic processes","title":"Vector auto-regressive models","text":"","category":"section"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"The state-space expansion section assumed that the random variable was uni-variate. However, the approach naturally extends to vector auto-regressive models. For example, if inflow is a 2-dimensional vector, then we can model a vector auto-regressive model to it as follows:","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"inflow_t = A times inflow_t-1 + b + varepsilon","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"Here A is a 2-by-2 matrix, and b and varepsilon are 2-by-1 vectors.","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 0 <= x <= 200, SDDP.State, initial_value = 200)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, g_h + g_t == 150)\n c = [50, 100, 150]\n @stageobjective(sp, c[t] * g_t)\n # =========================================================================\n # New stuff below Here\n # Add inflow as a state\n @variable(sp, inflow[1:2], SDDP.State, initial_value = 50.0)\n # Add the random variable as a control variable\n @variable(sp, ε[1:2])\n # The equation describing our statistical model\n A = [0.8 0.2; 0.2 0.8]\n @constraint(\n sp,\n [i = 1:2],\n inflow[i].out == sum(A[i, j] * inflow[j].in for j in 1:2) + ε[i],\n )\n # The new water balance constraint using the state variable\n @constraint(sp, x.out == x.in - g_h - s + inflow[1].out + inflow[2].out)\n # Assume we have some empirical residuals:\n Ω₁ = [-10.0, 0.1, 9.6]\n Ω₂ = [-10.0, 0.1, 9.6]\n Ω = [(ω₁, ω₂) for ω₁ in Ω₁ for ω₂ in Ω₂]\n SDDP.parameterize(sp, Ω) do ω\n JuMP.fix(ε[1], ω[1])\n JuMP.fix(ε[2], ω[2])\n return\n end\nend","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"","category":"page"},{"location":"tutorial/arma/","page":"Auto-regressive stochastic processes","title":"Auto-regressive stochastic processes","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"EditURL = \"mdps.jl\"","category":"page"},{"location":"tutorial/mdps/#Example:-Markov-Decision-Processes","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"","category":"section"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"(Image: ) (Image: )","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.jl can be used to solve a variety of Markov Decision processes. If the problem has continuous state and control spaces, and the objective and transition function are convex, then SDDP.jl can find a globally optimal policy. In other cases, SDDP.jl will find a locally optimal policy.","category":"page"},{"location":"tutorial/mdps/#A-simple-example","page":"Example: Markov Decision Processes","title":"A simple example","text":"","category":"section"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"A simple demonstration of this is the example taken from page 98 of the book \"Markov Decision Processes: Discrete stochastic Dynamic Programming\", by Martin L. Putterman.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The example, as described in Section 4.6.3 of the book, is to minimize a sum of squares of N non-negative variables, subject to a budget constraint that the variable values add up to M. Put mathematically, that is:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"beginaligned\nmin sumlimits_i=1^N x_i^2 \nst sumlimits_i=1^N x_i = M \n x_i ge 0 quad i in 1ldotsN\nendaligned","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The optimal objective value is M^2N, and the optimal solution is x_i = M N, which can be shown by induction.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"This can be reformulated as a Markov Decision Process by introducing a state variable, s, which tracks the un-spent budget over N stages.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"beginaligned\nV_t(s) = min x^2 + V_t+1(s^prime) \nst s^prime = s - x \n x le s \n x ge 0 \n s ge 0\nendaligned","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"and in the last stage V_N, there is an additional constraint that s^prime = 0.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The budget of M is computed by solving for V_1(M).","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"info: Info\nSince everything here is continuous and convex, SDDP.jl will find the globally optimal policy.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"If the reformulation from the single problem into the recursive form of the Markov Decision Process is not obvious, consult Putterman's book.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"We can model and solve this problem using SDDP.jl as follows:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"using SDDP\nimport Ipopt\n\nM, N = 5, 3\n\nmodel = SDDP.LinearPolicyGraph(\n stages = N,\n lower_bound = 0.0,\n optimizer = Ipopt.Optimizer,\n) do subproblem, node\n @variable(subproblem, s >= 0, SDDP.State, initial_value = M)\n @variable(subproblem, x >= 0)\n @stageobjective(subproblem, x^2)\n @constraint(subproblem, x <= s.in)\n @constraint(subproblem, s.out == s.in - x)\n if node == N\n fix(s.out, 0.0; force = true)\n end\n return\nend\n\nSDDP.train(model)","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Check that we got the theoretical optimum:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.calculate_bound(model), M^2 / N","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"And check that we found the theoretical value for each x_i:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"simulations = SDDP.simulate(model, 1, [:x])\nfor data in simulations[1]\n println(\"x_$(data[:node_index]) = $(data[:x])\")\nend","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Close enough! We don't get exactly 5/3 because of numerical tolerances within our choice of optimization solver (in this case, Ipopt).","category":"page"},{"location":"tutorial/mdps/#A-more-complicated-policy","page":"Example: Markov Decision Processes","title":"A more complicated policy","text":"","category":"section"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.jl is also capable of finding policies for other types of Markov Decision Processes. A classic example of a Markov Decision Process is the problem of finding a path through a maze.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Here's one example of a maze. Try changing the parameters to explore different mazes:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"M, N = 3, 4\ninitial_square = (1, 1)\nreward, illegal_squares, penalties = (3, 4), [(2, 2)], [(3, 1), (2, 4)]\npath = fill(\"⋅\", M, N)\npath[initial_square...] = \"1\"\nfor (k, v) in (illegal_squares => \"▩\", penalties => \"†\", [reward] => \"*\")\n for (i, j) in k\n path[i, j] = v\n end\nend\nprint(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\\n'))","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Our goal is to get from square 1 to square *. If we step on a †, we incur a penalty of 1. Squares with ▩ are blocked; we cannot move there.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"There are a variety of ways that we can solve this problem. We're going to solve it using a stationary binary stochastic programming formulation.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Our state variable will be a matrix of binary variables x_ij, where each element is 1 if the agent is in the square and 0 otherwise. In each period, we incur a reward of 1 if we are in the reward square and a penalty of -1 if we are in a penalties square. We cannot move to the illegal_squares, so those x_ij = 0. Feasibility between moves is modelled by constraints of the form:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"x^prime_ij le sumlimits_(ab)in P x_ab","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"where P is the set of squares from which it is valid to move from (a, b) to (i, j).","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Because we are looking for a stationary policy, we need a unicyclic graph with a discount factor:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"discount_factor = 0.9\ngraph = SDDP.UnicyclicGraph(discount_factor)","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Then we can formulate our full model:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"import HiGHS\n\nmodel = SDDP.PolicyGraph(\n graph;\n sense = :Max,\n upper_bound = 1 / (1 - discount_factor),\n optimizer = HiGHS.Optimizer,\n) do sp, _\n # Our state is a binary variable for each square\n @variable(\n sp,\n x[i = 1:M, j = 1:N],\n Bin,\n SDDP.State,\n initial_value = (i, j) == initial_square,\n )\n # Can only be in one square at a time\n @constraint(sp, sum(x[i, j].out for i in 1:M, j in 1:N) == 1)\n # Incur rewards and penalties\n @stageobjective(\n sp,\n x[reward...].out - sum(x[i, j].out for (i, j) in penalties)\n )\n # Some squares are illegal\n @constraint(sp, [(i, j) in illegal_squares], x[i, j].out <= 0)\n # Constraints on valid moves\n for i in 1:M, j in 1:N\n moves = [(i - 1, j), (i + 1, j), (i, j), (i, j + 1), (i, j - 1)]\n filter!(v -> 1 <= v[1] <= M && 1 <= v[2] <= N, moves)\n @constraint(sp, x[i, j].out <= sum(x[a, b].in for (a, b) in moves))\n end\n return\nend","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"The upper bound is obtained by assuming that we reach the reward square in one move and stay there.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"warning: Warning\nSince there are discrete decisions here, SDDP.jl is not guaranteed to find the globally optimal policy.","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"SDDP.train(model)","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Simulating a cyclic policy graph requires an explicit sampling_scheme that does not terminate early based on the cycle probability:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"simulations = SDDP.simulate(\n model,\n 1,\n [:x];\n sampling_scheme = SDDP.InSampleMonteCarlo(\n max_depth = 5,\n terminate_on_dummy_leaf = false,\n ),\n);\n","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"Fill in the path with the time-step in which we visit the square:","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"for (t, data) in enumerate(simulations[1]), i in 1:M, j in 1:N\n if data[:x][i, j].in > 0.5\n path[i, j] = \"$t\"\n end\nend\n\nprint(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\\n'))","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"tip: Tip\nThis formulation will likely struggle as the number of cells in the maze increases. Can you think of an equivalent formulation that uses fewer state variables?","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"","category":"page"},{"location":"tutorial/mdps/","page":"Example: Markov Decision Processes","title":"Example: Markov Decision Processes","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"EditURL = \"Hydro_thermal.jl\"","category":"page"},{"location":"examples/Hydro_thermal/#Hydro-thermal-scheduling","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/Hydro_thermal/#Problem-Description","page":"Hydro-thermal scheduling","title":"Problem Description","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"In a hydro-thermal problem, the agent controls a hydro-electric generator and reservoir. Each time period, they need to choose a generation quantity from thermal g_t, and hydro g_h, in order to meet demand w_d, which is a stagewise-independent random variable. The state variable, x, is the quantity of water in the reservoir at the start of each time period, and it has a minimum level of 5 units and a maximum level of 15 units. We assume that there are 10 units of water in the reservoir at the start of time, so that x_0 = 10. The state-variable is connected through time by the water balance constraint: x.out = x.in - g_h - s + w_i, where x.out is the quantity of water at the end of the time period, x.in is the quantity of water at the start of the time period, s is the quantity of water spilled from the reservoir, and w_i is a stagewise-independent random variable that represents the inflow into the reservoir during the time period.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"We assume that there are three stages, t=1, 2, 3, representing summer-fall, winter, and spring, and that we are solving this problem in an infinite-horizon setting with a discount factor of 0.95.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"In each stage, the agent incurs the cost of spillage, plus the cost of thermal generation. We assume that the cost of thermal generation is dependent on the stage t = 1, 2, 3, and that in each stage, w is drawn from the set (w_i, w_d) = {(0, 7.5), (3, 5), (10, 2.5)} with equal probability.","category":"page"},{"location":"examples/Hydro_thermal/#Importing-packages","page":"Hydro-thermal scheduling","title":"Importing packages","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"For this example, in addition to SDDP, we need HiGHS as a solver and Statisitics to compute the mean of our simulations.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"using HiGHS\nusing SDDP\nusing Statistics","category":"page"},{"location":"examples/Hydro_thermal/#Constructing-the-policy-graph","page":"Hydro-thermal scheduling","title":"Constructing the policy graph","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"There are three stages in our infinite-horizon problem, so we construct a unicyclic policy graph using SDDP.UnicyclicGraph:","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"graph = SDDP.UnicyclicGraph(0.95; num_nodes = 3)","category":"page"},{"location":"examples/Hydro_thermal/#Constructing-the-model","page":"Hydro-thermal scheduling","title":"Constructing the model","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Much of the macro code (i.e., lines starting with @) in the first part of the following should be familiar to users of JuMP.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Inside the do-end block, sp is a standard JuMP model, and t is an index for the state variable that will be called with t = 1, 2, 3.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"The state variable x, constructed by passing the SDDP.State tag to @variable is actually a Julia struct with two fields: x.in and x.out corresponding to the incoming and outgoing state variables respectively. Both x.in and x.out are standard JuMP variables. The initial_value keyword provides the value of the state variable in the root node (i.e., x_0).","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Compared to a JuMP model, one key difference is that we use @stageobjective instead of @objective. The SDDP.parameterize function takes a list of supports for w and parameterizes the JuMP model sp by setting the right-hand sides of the appropriate constraints (note how the constraints initially have a right-hand side of 0). By default, it is assumed that the realizations have uniform probability, but a probability mass vector can also be provided.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"model = SDDP.PolicyGraph(\n graph,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do sp, t\n @variable(sp, 5 <= x <= 15, SDDP.State, initial_value = 10)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, balance, x.out - x.in + g_h + s == 0)\n @constraint(sp, demand, g_h + g_t == 0)\n @stageobjective(sp, s + t * g_t)\n SDDP.parameterize(sp, [[0, 7.5], [3, 5], [10, 2.5]]) do w\n set_normalized_rhs(balance, w[1])\n return set_normalized_rhs(demand, w[2])\n end\nend","category":"page"},{"location":"examples/Hydro_thermal/#Training-the-policy","page":"Hydro-thermal scheduling","title":"Training the policy","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Once a model has been constructed, the next step is to train the policy. This can be achieved using SDDP.train. There are many options that can be passed, but iteration_limit terminates the training after the prescribed number of SDDP iterations.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"SDDP.train(model, iteration_limit = 100)","category":"page"},{"location":"examples/Hydro_thermal/#Simulating-the-policy","page":"Hydro-thermal scheduling","title":"Simulating the policy","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"After training, we can simulate the policy using SDDP.simulate.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"sims = SDDP.simulate(model, 100, [:g_t])\nmu = round(mean([s[1][:g_t] for s in sims]), digits = 2)\nprintln(\"On average, $(mu) units of thermal are used in the first stage.\")","category":"page"},{"location":"examples/Hydro_thermal/#Extracting-the-water-values","page":"Hydro-thermal scheduling","title":"Extracting the water values","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"V = SDDP.ValueFunction(model[1])\ncost, price = SDDP.evaluate(V, x = 10)","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"EditURL = \"hydro_valley.jl\"","category":"page"},{"location":"examples/hydro_valley/#Hydro-valleys","page":"Hydro valleys","title":"Hydro valleys","text":"","category":"section"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"(Image: ) (Image: )","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"This problem is a version of the hydro-thermal scheduling problem. The goal is to operate two hydro-dams in a valley chain over time in the face of inflow and price uncertainty.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"Turbine response curves are modelled by piecewise linear functions which map the flow rate into a power. These can be controlled by specifying the breakpoints in the piecewise linear function as the knots in the Turbine struct.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"The model can be created using the hydro_valley_model function. It has a few keyword arguments to allow automated testing of the library. hasstagewiseinflows determines if the RHS noise constraint should be added. hasmarkovprice determines if the price uncertainty (modelled by a Markov chain) should be added.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"In the third stage, the Markov chain has some unreachable states to test some code-paths in the library.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"We can also set the sense to :Min or :Max (the objective and bound are flipped appropriately).","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"using SDDP, HiGHS, Test, Random\n\nstruct Turbine\n flowknots::Vector{Float64}\n powerknots::Vector{Float64}\nend\n\nstruct Reservoir\n min::Float64\n max::Float64\n initial::Float64\n turbine::Turbine\n spill_cost::Float64\n inflows::Vector{Float64}\nend\n\nfunction hydro_valley_model(;\n hasstagewiseinflows::Bool = true,\n hasmarkovprice::Bool = true,\n sense::Symbol = :Max,\n)\n valley_chain = [\n Reservoir(\n 0,\n 200,\n 200,\n Turbine([50, 60, 70], [55, 65, 70]),\n 1000,\n [0, 20, 50],\n ),\n Reservoir(\n 0,\n 200,\n 200,\n Turbine([50, 60, 70], [55, 65, 70]),\n 1000,\n [0, 0, 20],\n ),\n ]\n\n turbine(i) = valley_chain[i].turbine\n\n # Prices[t, Markov state]\n prices = [\n 1 2 0\n 2 1 0\n 3 4 0\n ]\n\n # Transition matrix\n if hasmarkovprice\n transition =\n Array{Float64,2}[[1.0]', [0.6 0.4], [0.6 0.4 0.0; 0.3 0.7 0.0]]\n else\n transition = [ones(Float64, (1, 1)) for t in 1:3]\n end\n\n flipobj = (sense == :Max) ? 1.0 : -1.0\n lower = (sense == :Max) ? -Inf : -1e6\n upper = (sense == :Max) ? 1e6 : Inf\n\n N = length(valley_chain)\n\n # Initialise SDDP Model\n return m = SDDP.MarkovianPolicyGraph(\n sense = sense,\n lower_bound = lower,\n upper_bound = upper,\n transition_matrices = transition,\n optimizer = HiGHS.Optimizer,\n ) do subproblem, node\n t, markov_state = node\n\n # ------------------------------------------------------------------\n # SDDP State Variables\n # Level of upper reservoir\n @variable(\n subproblem,\n valley_chain[r].min <= reservoir[r = 1:N] <= valley_chain[r].max,\n SDDP.State,\n initial_value = valley_chain[r].initial\n )\n\n # ------------------------------------------------------------------\n # Additional variables\n @variables(\n subproblem,\n begin\n outflow[r = 1:N] >= 0\n spill[r = 1:N] >= 0\n inflow[r = 1:N] >= 0\n generation_quantity >= 0 # Total quantity of water\n # Proportion of levels to dispatch on\n 0 <=\n dispatch[r = 1:N, level = 1:length(turbine(r).flowknots)] <=\n 1\n rainfall[i = 1:N]\n end\n )\n\n # ------------------------------------------------------------------\n # Constraints\n @constraints(\n subproblem,\n begin\n # flow from upper reservoir\n reservoir[1].out ==\n reservoir[1].in + inflow[1] - outflow[1] - spill[1]\n\n # other flows\n flow[i = 2:N],\n reservoir[i].out ==\n reservoir[i].in + inflow[i] - outflow[i] - spill[i] +\n outflow[i-1] +\n spill[i-1]\n\n # Total quantity generated\n generation_quantity == sum(\n turbine(r).powerknots[level] * dispatch[r, level] for\n r in 1:N for level in 1:length(turbine(r).powerknots)\n )\n\n # ------------------------------------------------------------------\n # Flow out\n turbineflow[r = 1:N],\n outflow[r] == sum(\n turbine(r).flowknots[level] * dispatch[r, level] for\n level in 1:length(turbine(r).flowknots)\n )\n\n # Dispatch combination of levels\n dispatched[r = 1:N],\n sum(\n dispatch[r, level] for\n level in 1:length(turbine(r).flowknots)\n ) <= 1\n end\n )\n\n # rainfall noises\n if hasstagewiseinflows && t > 1 # in future stages random inflows\n @constraint(subproblem, inflow_noise[i = 1:N], inflow[i] <= rainfall[i])\n\n SDDP.parameterize(\n subproblem,\n [\n (valley_chain[1].inflows[i], valley_chain[2].inflows[i]) for i in 1:length(transition)\n ],\n ) do ω\n for i in 1:N\n JuMP.fix(rainfall[i], ω[i])\n end\n end\n else # in the first stage deterministic inflow\n @constraint(\n subproblem,\n initial_inflow_noise[i = 1:N],\n inflow[i] <= valley_chain[i].inflows[1]\n )\n end\n\n # ------------------------------------------------------------------\n # Objective Function\n if hasmarkovprice\n @stageobjective(\n subproblem,\n flipobj * (\n prices[t, markov_state] * generation_quantity -\n sum(valley_chain[i].spill_cost * spill[i] for i in 1:N)\n )\n )\n else\n @stageobjective(\n subproblem,\n flipobj * (\n prices[t, 1] * generation_quantity -\n sum(valley_chain[i].spill_cost * spill[i] for i in 1:N)\n )\n )\n end\n end\nend\n\nfunction test_hydro_valley_model()\n\n # For repeatability\n Random.seed!(11111)\n\n # deterministic\n deterministic_model =\n hydro_valley_model(hasmarkovprice = false, hasstagewiseinflows = false)\n SDDP.train(\n deterministic_model,\n iteration_limit = 10,\n cut_deletion_minimum = 1,\n print_level = 0,\n )\n @test SDDP.calculate_bound(deterministic_model) ≈ 835.0 atol = 1e-3\n\n # stagewise inflows\n stagewise_model = hydro_valley_model(hasmarkovprice = false)\n SDDP.train(stagewise_model, iteration_limit = 20, print_level = 0)\n @test SDDP.calculate_bound(stagewise_model) ≈ 838.33 atol = 1e-2\n\n # Markov prices\n markov_model = hydro_valley_model(hasstagewiseinflows = false)\n SDDP.train(markov_model, iteration_limit = 10, print_level = 0)\n @test SDDP.calculate_bound(markov_model) ≈ 851.8 atol = 1e-2\n\n # stagewise inflows and Markov prices\n markov_stagewise_model =\n hydro_valley_model(hasstagewiseinflows = true, hasmarkovprice = true)\n SDDP.train(markov_stagewise_model, iteration_limit = 10, print_level = 0)\n @test SDDP.calculate_bound(markov_stagewise_model) ≈ 855.0 atol = 1.0\n\n # risk averse stagewise inflows and Markov prices\n riskaverse_model = hydro_valley_model()\n SDDP.train(\n riskaverse_model,\n risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.66),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(riskaverse_model) ≈ 828.157 atol = 1.0\n\n # stagewise inflows and Markov prices\n worst_case_model = hydro_valley_model(sense = :Min)\n SDDP.train(\n worst_case_model,\n risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.0),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(worst_case_model) ≈ -780.867 atol = 1.0\n\n # stagewise inflows and Markov prices\n cutselection_model = hydro_valley_model()\n SDDP.train(\n cutselection_model,\n iteration_limit = 10,\n print_level = 0,\n cut_deletion_minimum = 2,\n )\n @test SDDP.calculate_bound(cutselection_model) ≈ 855.0 atol = 1.0\n\n # Distributionally robust Optimization\n dro_model = hydro_valley_model(hasmarkovprice = false)\n SDDP.train(\n dro_model,\n risk_measure = SDDP.ModifiedChiSquared(sqrt(2 / 3) - 1e-6),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(dro_model) ≈ 835.0 atol = 1.0\n\n dro_model = hydro_valley_model(hasmarkovprice = false)\n SDDP.train(\n dro_model,\n risk_measure = SDDP.ModifiedChiSquared(1 / 6),\n iteration_limit = 20,\n print_level = 0,\n )\n @test SDDP.calculate_bound(dro_model) ≈ 836.695 atol = 1.0\n # (Note) radius ≈ sqrt(2/3), will set all noise probabilities to zero except the worst case noise\n # (Why?):\n # The distance from the uniform distribution (the assumed \"true\" distribution)\n # to a corner of a unit simplex is sqrt(S-1)/sqrt(S) if we have S scenarios. The corner\n # of a unit simplex is just a unit vector, i.e.: [0 ... 0 1 0 ... 0]. With this probability\n # vector, only one noise has a non-zero probablity.\n # In the worst case rhsnoise (0 inflows) the profit is:\n # Reservoir1: 70 * $3 + 70 * $2 + 65 * $1 +\n # Reservoir2: 70 * $3 + 70 * $2 + 70 * $1\n ### = $835\nend\n\ntest_hydro_valley_model()","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/#Add-noise-in-the-constraint-matrix","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"","category":"section"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"DocTestSetup = quote\n using SDDP, HiGHS\nend","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"SDDP.jl supports coefficients in the constraint matrix through the JuMP.set_normalized_coefficient function.","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"julia> model = SDDP.LinearPolicyGraph(\n stages=3, lower_bound = 0, optimizer = HiGHS.Optimizer\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n @constraint(subproblem, emissions, 1x.out <= 1)\n SDDP.parameterize(subproblem, [0.2, 0.5, 1.0]) do ω\n JuMP.set_normalized_coefficient(emissions, x.out, ω)\n println(emissions)\n end\n @stageobjective(subproblem, -x.out)\n end\nA policy graph with 3 nodes.\n Node indices: 1, 2, 3\n\njulia> SDDP.simulate(model, 1);\nemissions : x_out <= 1\nemissions : 0.2 x_out <= 1\nemissions : 0.5 x_out <= 1","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"note: Note\nJuMP will normalize constraints by moving all variables to the left-hand side. Thus, @constraint(model, 0 <= 1 - x.out) becomes x.out <= 1. JuMP.set_normalized_coefficient sets the coefficient on the normalized constraint.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"EditURL = \"risk.jl\"","category":"page"},{"location":"explanation/risk/#Risk-aversion","page":"Risk aversion","title":"Risk aversion","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"(Image: ) (Image: )","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"In Introductory theory, we implemented a basic version of the SDDP algorithm. This tutorial extends that implementation to add risk-aversion.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Packages","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This tutorial uses the following packages. For clarity, we call import PackageName so that we must prefix PackageName. to all functions and structs provided by that package. Everything not prefixed is either part of base Julia, or we wrote it.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"import ForwardDiff\nimport HiGHS\nimport Ipopt\nimport JuMP\nimport Statistics","category":"page"},{"location":"explanation/risk/#Risk-aversion:-what-and-why?","page":"Risk aversion","title":"Risk aversion: what and why?","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Often, the agents making decisions in complex systems are risk-averse, that is, they care more about avoiding very bad outcomes, than they do about having a good average outcome.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"As an example, consumers in a hydro-thermal problem may be willing to pay a slightly higher electricity price on average, if it means that there is a lower probability of blackouts.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Risk aversion in multistage stochastic programming has been well studied in the academic literature, and is widely used in production implementations around the world.","category":"page"},{"location":"explanation/risk/#Risk-measures","page":"Risk aversion","title":"Risk measures","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"One way to add risk aversion to models is to use a risk measure. A risk measure is a function that maps a random variable to a real number.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"You are probably already familiar with lots of different risk measures. For example, the mean, median, mode, and maximum are all risk measures.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We call the act of applying a risk measure to a random variable \"computing the risk\" of a random variable.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"To keep things simple, and because we need it for SDDP, we restrict our attention to random variables Z with a finite sample space Omega and positive probabilities p_omega for all omega in Omega. We denote the realizations of Z by Z(omega) = z_omega.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"A risk measure, mathbbFZ, is a convex risk measure if it satisfies the following axioms:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Axiom 1: monotonicity","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Given two random variables Z_1 and Z_2, with Z_1 le Z_2 almost surely, then mathbbFZ_1 le FZ_2.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Axiom 2: translation equivariance","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Given two random variables Z_1 and Z_2, then for all a in mathbbR, mathbbFZ + a = mathbbFZ + a.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Axiom 3: convexity","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Given two random variables Z_1 and Z_2, then for all a in 0 1,","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFa Z_1 + (1 - a) Z_2 le a mathbbFZ_1 + (1-a)mathbbFZ_2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now we know what a risk measure is, let's see how we can use them to form risk-averse decision rules.","category":"page"},{"location":"explanation/risk/#Risk-averse-decision-rules:-Part-I","page":"Risk aversion","title":"Risk-averse decision rules: Part I","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We started this tutorial by explaining that we are interested in risk aversion because some agents are risk-averse. What that really means, is that they want a policy that is also risk-averse. The question then becomes, how do we create risk-averse decision rules and policies?","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Recall from Introductory theory that we can form an optimal decision rule using the recursive formulation:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If we can replace the expectation operator mathbbE with another (more risk-averse) risk measure mathbbF, then our decision rule will attempt to choose a control decision now that minimizes the risk of the future costs, as opposed to the expectation of the future costs. This makes our decisions more risk-averse, because we care more about the worst outcomes than we do about the average.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Therefore, we can form a risk-averse decision rule using the formulation:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"To convert this problem into a tractable equivalent, we apply Kelley's algorithm to the risk-averse cost-to-go term mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi), to obtain the approximated problem:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)quad k=1ldotsK\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nNote how we need to explicitly compute a risk-averse subgradient! (We need a subgradient because the function might not be differentiable.) When constructing cuts with the expectation operator in Introductory theory, we implicitly used the law of total expectation to combine the two expectations; we can't do that for a general risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"tip: Homework challenge\nIf it's not obvious why we can use Kelley's here, try to use the axioms of a convex risk measure to show that mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi) is a convex function w.r.t. x^prime if V_j is also a convex function.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Our challenge is now to find a way to compute the risk-averse cost-to-go function mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right, and a way to compute a subgradient of the risk-averse cost-to-go function with respect to x^prime.","category":"page"},{"location":"explanation/risk/#Primal-risk-measures","page":"Risk aversion","title":"Primal risk measures","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now we know what a risk measure is, and how we will use it, let's implement some code to see how we can compute the risk of some random variables.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"note: Note\nWe're going to start by implementing the primal version of each risk measure. We implement the dual version in the next section.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"First, we need some data:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Z = [1.0, 2.0, 3.0, 4.0]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"with probabilities:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"p = [0.1, 0.2, 0.4, 0.3]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We're going to implement a number of different risk measures, so to leverage Julia's multiple dispatch, we create an abstract type:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"abstract type AbstractRiskMeasure end","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and function to overload:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"\"\"\"\n primal_risk(F::AbstractRiskMeasure, Z::Vector{<:Real}, p::Vector{Float64})\n\nUse `F` to compute the risk of the random variable defined by a vector of costs\n`Z` and non-zero probabilities `p`.\n\"\"\"\nfunction primal_risk end","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"note: Note\nWe want Vector{<:Real} instead of Vector{Float64} because we're going to automatically differentiate this function in the next section.","category":"page"},{"location":"explanation/risk/#Expectation","page":"Risk aversion","title":"Expectation","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The expectation, mathbbE, also called the mean or the average, is the most widely used convex risk measure. The expectation of a random variable is just the sum of Z weighted by the probability:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFZ = mathbbE_pZ = sumlimits_omegainOmega p_omega z_omega","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct Expectation <: AbstractRiskMeasure end\n\nfunction primal_risk(::Expectation, Z::Vector{<:Real}, p::Vector{Float64})\n return sum(p[i] * Z[i] for i in 1:length(p))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Let's try it out:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk(Expectation(), Z, p)","category":"page"},{"location":"explanation/risk/#WorstCase","page":"Risk aversion","title":"WorstCase","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The worst-case risk measure, also called the maximum, is another widely used convex risk measure. This risk measure doesn't care about the probability vector p, only the cost vector Z:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFZ = maxZ = maxlimits_omegainOmega z_omega","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct WorstCase <: AbstractRiskMeasure end\n\nfunction primal_risk(::WorstCase, Z::Vector{<:Real}, ::Vector{Float64})\n return maximum(Z)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Let's try it out:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk(WorstCase(), Z, p)","category":"page"},{"location":"explanation/risk/#Entropic","page":"Risk aversion","title":"Entropic","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"A more interesting, and less widely used risk measure is the entropic risk measure. The entropic risk measure is parameterized by a value gamma 0, and computes the risk of a random variable as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbF_gammaZ = frac1gammalogleft(mathbbE_pe^gamma Zright) = frac1gammalogleft(sumlimits_omegainOmegap_omega e^gamma z_omegaright)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"tip: Homework challenge\nProve that the entropic risk measure satisfies the three axioms of a convex risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct Entropic <: AbstractRiskMeasure\n γ::Float64\n function Entropic(γ)\n if !(γ > 0)\n throw(DomainError(γ, \"Entropic risk measure must have γ > 0.\"))\n end\n return new(γ)\n end\nend\n\nfunction primal_risk(F::Entropic, Z::Vector{<:Real}, p::Vector{Float64})\n return 1 / F.γ * log(sum(p[i] * exp(F.γ * Z[i]) for i in 1:length(p)))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nexp(x) overflows when x 709. Therefore, if we are passed a vector of Float64, use arbitrary precision arithmetic with big.(Z).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function primal_risk(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n return Float64(primal_risk(F, big.(Z), p))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Let's try it out for different values of gamma:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1_000.0]\n println(\"γ = $(γ), F[Z] = \", primal_risk(Entropic(γ), Z, p))\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nThe entropic has two extremes. As gamma rightarrow 0, the entropic acts like the expectation risk measure, and as gamma rightarrow infty, the entropic acts like the worst-case risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Computing risk measures this way works well for computing the primal value. However, there isn't an obvious way to compute a subgradient of the risk-averse cost-to-go function, which we need for our cut calculation.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"There is a nice solution to this problem, and that is to use the dual representation of a risk measure, instead of the primal.","category":"page"},{"location":"explanation/risk/#Dual-risk-measures","page":"Risk aversion","title":"Dual risk measures","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Convex risk measures have a dual representation as follows:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFZ = suplimits_q inmathcalM(p) mathbbE_qZ - alpha(p q)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where alpha is a concave function that maps the probability vectors p and q to a real number, and mathcalM(p) subseteq mathcalP is a convex subset of the probability simplex:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathcalP = p ge 0sumlimits_omegainOmegap_omega = 1","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The dual of a convex risk measure can be interpreted as taking the expectation of the random variable Z with respect to the worst probability vector q that lies within the set mathcalM, less some concave penalty term alpha(p q).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If we define a function dual_risk_inner that computes q and α:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"\"\"\"\n dual_risk_inner(\n F::AbstractRiskMeasure, Z::Vector{Float64}, p::Vector{Float64}\n )::Tuple{Vector{Float64},Float64}\n\nReturn a tuple formed by the worst-case probability vector `q` and the\ncorresponding evaluation `α(p, q)`.\n\"\"\"\nfunction dual_risk_inner end","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"then we can write a generic dual_risk function as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk(\n F::AbstractRiskMeasure,\n Z::Vector{Float64},\n p::Vector{Float64},\n)\n q, α = dual_risk_inner(F, Z, p)\n return sum(q[i] * Z[i] for i in 1:length(q)) - α\nend","category":"page"},{"location":"explanation/risk/#Expectation-2","page":"Risk aversion","title":"Expectation","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"For the expectation risk measure, mathcalM(p) = p, and alpha(cdot cdot) = 0. Therefore:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(::Expectation, ::Vector{Float64}, p::Vector{Float64})\n return p, 0.0\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk(Expectation(), Z, p) == primal_risk(Expectation(), Z, p)","category":"page"},{"location":"explanation/risk/#Worst-case","page":"Risk aversion","title":"Worst-case","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"For the worst-case risk measure, mathcalM(p) = mathcalP, and alpha(cdot cdot) = 0. Therefore, the dual representation just puts all of the probability weight on the maximum outcome:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(::WorstCase, Z::Vector{Float64}, ::Vector{Float64})\n q = zeros(length(Z))\n _, index = findmax(Z)\n q[index] = 1.0\n return q, 0.0\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk(WorstCase(), Z, p) == primal_risk(WorstCase(), Z, p)","category":"page"},{"location":"explanation/risk/#Entropic-2","page":"Risk aversion","title":"Entropic","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"For the entropic risk measure, mathcalM(p) = mathcalP, and:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"alpha(p q) = frac1gammasumlimits_omegainOmega q_omega logleft(fracq_omegap_omegaright)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"One way to solve the dual problem is to explicitly solve a nonlinear optimization problem:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n N = length(p)\n model = JuMP.Model(Ipopt.Optimizer)\n JuMP.set_silent(model)\n # For this problem, the solve is more accurate if we turn off problem\n # scaling.\n JuMP.set_optimizer_attribute(model, \"nlp_scaling_method\", \"none\")\n JuMP.@variable(model, 0 <= q[1:N] <= 1)\n JuMP.@constraint(model, sum(q) == 1)\n JuMP.@NLexpression(\n model,\n α,\n 1 / F.γ * sum(q[i] * log(q[i] / p[i]) for i in 1:N),\n )\n JuMP.@NLobjective(model, Max, sum(q[i] * Z[i] for i in 1:N) - α)\n JuMP.optimize!(model)\n return JuMP.value.(q), JuMP.value(α)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n primal = primal_risk(Entropic(γ), Z, p)\n dual = dual_risk(Entropic(γ), Z, p)\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nThis method of solving the dual problem \"on-the-side\" is used by SDDP.jl for a number of risk measures, including a distributionally robust risk measure with the Wasserstein distance. Check out all the risk measures that SDDP.jl supports in Add a risk measure.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The \"on-the-side\" method is very general, and it lets us incorporate any convex risk measure into SDDP. However, this comes at an increased computational cost and potential numerical issues (e.g., not converging to the exact solution).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"However, for the entropic risk measure, Dowson, Morton, and Pagnoncelli (2020) derive the following closed form solution for q^*:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"q_omega^* = fracp_omega e^gamma z_omegasumlimits_varphi in Omega p_varphi e^gamma z_varphi","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This is faster because we don't need to use Ipopt, and it avoids some of the numerical issues associated with solving a nonlinear program.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n q, α = zeros(length(p)), big(0.0)\n peγz = p .* exp.(F.γ .* big.(Z))\n sum_peγz = sum(peγz)\n for i in 1:length(q)\n big_q = peγz[i] / sum_peγz\n α += big_q * log(big_q / p[i])\n q[i] = Float64(big_q)\n end\n return q, Float64(α / F.γ)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nAgain, note that we use big to avoid introducing overflow errors, before explicitly casting back to Float64 for the values we return.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n primal = primal_risk(Entropic(γ), Z, p)\n dual = dual_risk(Entropic(γ), Z, p)\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"explanation/risk/#Risk-averse-subgradients","page":"Risk aversion","title":"Risk-averse subgradients","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We ended the section on primal risk measures by explaining how we couldn't use the primal risk measure in the cut calculation because we needed some way of computing a risk-averse subgradient:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The reason we use the dual representation is because of the following theorem, which explains how to compute a risk-averse gradient.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: The risk-averse subgradient theorem\nLet omega in Omega index a random vector with finite support and with nominal probability mass function, p in mathcalP, which satisfies p 0.Consider a convex risk measure, mathbbF, with a convex risk set, mathcalM(p), so that mathbbF can be expressed as the dual form.Let V(xomega) be convex with respect to x for all fixed omegainOmega, and let lambda(tildex omega) be a subgradient of V(xomega) with respect to x at x = tildex for each omega in Omega.Then, sum_omegainOmegaq^*_omega lambda(tildexomega) is a subgradient of mathbbFV(xomega) at tildex, whereq^* in argmax_q in mathcalM(p)leftmathbbE_qV(tildexomega) - alpha(p q)right","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This theorem can be a little hard to unpack, so let's see an example:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function dual_risk_averse_subgradient(\n V::Function,\n # Use automatic differentiation to compute the gradient of V w.r.t. x,\n # given a fixed ω.\n λ::Function = (x, ω) -> ForwardDiff.gradient(x -> V(x, ω), x);\n F::AbstractRiskMeasure,\n Ω::Vector,\n p::Vector{Float64},\n x̃::Vector{Float64},\n)\n # Evaluate the function at x=x̃ for all ω ∈ Ω.\n V_ω = [V(x̃, ω) for ω in Ω]\n # Solve the dual problem to obtain an optimal q^*.\n q, α = dual_risk_inner(F, V_ω, p)\n # Compute the risk-averse subgradient by taking the expectation of the\n # subgradients w.r.t. q^*.\n dVdx = sum(q[i] * λ(x̃, ω) for (i, ω) in enumerate(Ω))\n return dVdx\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can compare the subgradient obtained with the dual form against the automatic differentiation of the primal_risk function.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function primal_risk_averse_subgradient(\n V::Function;\n F::AbstractRiskMeasure,\n Ω::Vector,\n p::Vector{Float64},\n x̃::Vector{Float64},\n)\n inner(x) = primal_risk(F, [V(x, ω) for ω in Ω], p)\n return ForwardDiff.gradient(inner, x̃)\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"As our example function, we use:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"V(x, ω) = ω * x[1]^2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"with:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Ω = [1.0, 2.0, 3.0]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"p = [0.3, 0.4, 0.3]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"at the point:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"x̃ = [3.0]","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If mathbbF is the expectation risk-measure, then:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFV(x omega) = 2 x^2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The function evaluation x=3 is 18 and the subgradient is 12. Let's check we get it right with the dual form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and the primal form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If mathbbF is the worst-case risk measure, then:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"mathbbFV(x omega) = 3 x^2","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The function evaluation at x=3 is 27, and the subgradient is 18. Let's check we get it right with the dual form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"dual_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"and the primal form:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"primal_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"If mathbbF is the entropic risk measure, the math is a little more difficult to derive analytically. However, we can check against our primal version:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n dual =\n dual_risk_averse_subgradient(V; F = Entropic(γ), Ω = Ω, p = p, x̃ = x̃)\n primal = primal_risk_averse_subgradient(\n V;\n F = Entropic(γ),\n Ω = Ω,\n p = p,\n x̃ = x̃,\n )\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Uh oh! What happened with the last line? It looks our primal_risk_averse_subgradient encountered an error and returned a subgradient of NaN. This is because of the overflow issue with exp(x). However, we can be confident that our dual method of computing the risk-averse subgradient is both correct and more numerically robust than the primal version.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nAs another sanity check, notice how as gamma rightarrow 0, we tend toward the solution of the expectation risk-measure [12], and as gamma rightarrow infty, we tend toward the solution of the worse-case risk measure [18].","category":"page"},{"location":"explanation/risk/#Risk-averse-decision-rules:-Part-II","page":"Risk aversion","title":"Risk-averse decision rules: Part II","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Why is the risk-averse subgradient theorem helpful? Using the dual representation of a convex risk measure, we can re-write the cut:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)quad k=1ldotsK","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"theta ge mathbbE_q_kleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right - alpha(p q_k)quad k=1ldotsK","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where q_k = mathrmargsuplimits_q inmathcalM(p) mathbbE_qV_j^k(x_k^prime varphi) - alpha(p q).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Therefore, we can formulate a risk-averse decision rule as:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbE_q_kleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right - alpha(p q_k)quad k=1ldotsK \n theta ge M\nendaligned","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"where q_k = mathrmargsuplimits_q inmathcalM(p) mathbbE_qV_j^k(x_k^prime varphi) - alpha(p q).","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Thus, to implement risk-averse SDDP, all we need to do is modify the backward pass to include this calculation of q_k, form the cut using q_k instead of p, and subtract the penalty term alpha(p q_k).","category":"page"},{"location":"explanation/risk/#Implementation","page":"Risk aversion","title":"Implementation","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now we're ready to implement our risk-averse version of SDDP.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"As a prerequisite, we need most of the code from Introductory theory.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"

      \nClick to view code from the tutorial \"Introductory theory\".","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"struct State\n in::JuMP.VariableRef\n out::JuMP.VariableRef\nend\n\nstruct Uncertainty\n parameterize::Function\n Ω::Vector{Any}\n P::Vector{Float64}\nend\n\nstruct Node\n subproblem::JuMP.Model\n states::Dict{Symbol,State}\n uncertainty::Uncertainty\n cost_to_go::JuMP.VariableRef\nend\n\nstruct PolicyGraph\n nodes::Vector{Node}\n arcs::Vector{Dict{Int,Float64}}\nend\n\nfunction Base.show(io::IO, model::PolicyGraph)\n println(io, \"A policy graph with $(length(model.nodes)) nodes\")\n println(io, \"Arcs:\")\n for (from, arcs) in enumerate(model.arcs)\n for (to, probability) in arcs\n println(io, \" $(from) => $(to) w.p. $(probability)\")\n end\n end\n return\nend\n\nfunction PolicyGraph(\n subproblem_builder::Function;\n graph::Vector{Dict{Int,Float64}},\n lower_bound::Float64,\n optimizer,\n)\n nodes = Node[]\n for t in 1:length(graph)\n model = JuMP.Model(optimizer)\n states, uncertainty = subproblem_builder(model, t)\n JuMP.@variable(model, cost_to_go >= lower_bound)\n obj = JuMP.objective_function(model)\n JuMP.@objective(model, Min, obj + cost_to_go)\n if length(graph[t]) == 0\n JuMP.fix(cost_to_go, 0.0; force = true)\n end\n push!(nodes, Node(model, states, uncertainty, cost_to_go))\n end\n return PolicyGraph(nodes, graph)\nend\n\nfunction sample_uncertainty(uncertainty::Uncertainty)\n r = rand()\n for (p, ω) in zip(uncertainty.P, uncertainty.Ω)\n r -= p\n if r < 0.0\n return ω\n end\n end\n return error(\"We should never get here because P should sum to 1.0.\")\nend\n\nfunction sample_next_node(model::PolicyGraph, current::Int)\n if length(model.arcs[current]) == 0\n return nothing\n else\n r = rand()\n for (to, probability) in model.arcs[current]\n r -= probability\n if r < 0.0\n return to\n end\n end\n return nothing\n end\nend\n\nfunction forward_pass(model::PolicyGraph, io::IO = stdout)\n incoming_state =\n Dict(k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states)\n simulation_cost = 0.0\n trajectory = Tuple{Int,Dict{Symbol,Float64}}[]\n t = 1\n while t !== nothing\n node = model.nodes[t]\n ω = sample_uncertainty(node.uncertainty)\n node.uncertainty.parameterize(ω)\n for (k, v) in incoming_state\n JuMP.fix(node.states[k].in, v; force = true)\n end\n JuMP.optimize!(node.subproblem)\n if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL\n error(\"Something went terribly wrong!\")\n end\n outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)\n stage_cost =\n JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)\n simulation_cost += stage_cost\n incoming_state = outgoing_state\n push!(trajectory, (t, outgoing_state))\n t = sample_next_node(model, t)\n end\n return trajectory, simulation_cost\nend\n\nfunction upper_bound(model::PolicyGraph; replications::Int)\n simulations = [forward_pass(model, devnull) for i in 1:replications]\n z = [s[2] for s in simulations]\n μ = Statistics.mean(z)\n tσ = 1.96 * Statistics.std(z) / sqrt(replications)\n return μ, tσ\nend\n\nfunction lower_bound(model::PolicyGraph)\n node = model.nodes[1]\n bound = 0.0\n for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)\n node.uncertainty.parameterize(ω)\n JuMP.optimize!(node.subproblem)\n bound += p * JuMP.objective_value(node.subproblem)\n end\n return bound\nend\n\nfunction evaluate_policy(\n model::PolicyGraph;\n node::Int,\n incoming_state::Dict{Symbol,Float64},\n random_variable,\n)\n the_node = model.nodes[node]\n the_node.uncertainty.parameterize(random_variable)\n for (k, v) in incoming_state\n JuMP.fix(the_node.states[k].in, v; force = true)\n end\n JuMP.optimize!(the_node.subproblem)\n return Dict(\n k => JuMP.value.(v) for\n (k, v) in JuMP.object_dictionary(the_node.subproblem)\n )\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"

      ","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"First, we need to modify the backward pass to compute the cuts using the risk-averse subgradient theorem:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function backward_pass(\n model::PolicyGraph,\n trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},\n io::IO = stdout;\n risk_measure::AbstractRiskMeasure,\n)\n println(io, \"| Backward pass\")\n for i in reverse(1:length(trajectory))\n index, outgoing_states = trajectory[i]\n node = model.nodes[index]\n println(io, \"| | Visiting node $(index)\")\n if length(model.arcs[index]) == 0\n continue\n end\n # =====================================================================\n # New! Create vectors to store the cut expressions, V(x,ω) and p:\n cut_expressions, V_ω, p = JuMP.AffExpr[], Float64[], Float64[]\n # =====================================================================\n for (j, P_ij) in model.arcs[index]\n next_node = model.nodes[j]\n for (k, v) in outgoing_states\n JuMP.fix(next_node.states[k].in, v; force = true)\n end\n for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)\n next_node.uncertainty.parameterize(φ)\n JuMP.optimize!(next_node.subproblem)\n V = JuMP.objective_value(next_node.subproblem)\n dVdx = Dict(\n k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states\n )\n # =============================================================\n # New! Construct and append the expression\n # `V_j^K(x_k, φ) + dVdx_j^K(x'_k, φ)ᵀ(x - x_k)` to the list of\n # cut expressions.\n push!(\n cut_expressions,\n JuMP.@expression(\n node.subproblem,\n V + sum(\n dVdx[k] * (x.out - outgoing_states[k]) for\n (k, x) in node.states\n ),\n )\n )\n # Add the objective value to Z:\n push!(V_ω, V)\n # Add the probability to p:\n push!(p, P_ij * pφ)\n # =============================================================\n end\n end\n # =====================================================================\n # New! Using the solutions in V_ω, compute q and α:\n q, α = dual_risk_inner(risk_measure, V_ω, p)\n println(io, \"| | | Z = \", Z)\n println(io, \"| | | p = \", p)\n println(io, \"| | | q = \", q)\n println(io, \"| | | α = \", α)\n # Then add the cut:\n c = JuMP.@constraint(\n node.subproblem,\n node.cost_to_go >=\n sum(q[i] * cut_expressions[i] for i in 1:length(q)) - α\n )\n # =====================================================================\n println(io, \"| | | Adding cut : \", c)\n end\n return nothing\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We also need to update the train loop of SDDP to pass a risk measure to the backward pass:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"function train(\n model::PolicyGraph;\n iteration_limit::Int,\n replications::Int,\n # =========================================================================\n # New! Add a risk_measure argument\n risk_measure::AbstractRiskMeasure,\n # =========================================================================\n io::IO = stdout,\n)\n for i in 1:iteration_limit\n println(io, \"Starting iteration $(i)\")\n outgoing_states, _ = forward_pass(model, io)\n # =====================================================================\n # New! Pass the risk measure to the backward pass.\n backward_pass(model, outgoing_states, io; risk_measure = risk_measure)\n # =====================================================================\n println(io, \"| Finished iteration\")\n println(io, \"| | lower_bound = \", lower_bound(model))\n end\n μ, tσ = upper_bound(model; replications = replications)\n println(io, \"Upper bound = $(μ) ± $(tσ)\")\n return\nend","category":"page"},{"location":"explanation/risk/#Risk-averse-bounds","page":"Risk aversion","title":"Risk-averse bounds","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"warning: Warning\nThis section is important.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"When we had a risk-neutral policy (i.e., we only used the expectation risk measure), we discussed how we could form valid lower and upper bounds.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"The upper bound is still valid as a Monte Carlo simulation of the expected cost of the policy. (Although this upper bound doesn't capture the change in the policy we wanted to achieve, namely that the impact of the worst outcomes were reduced.)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"However, if we use a different risk measure, the lower bound is no longer valid!","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"We can still calculate a \"lower bound\" as the objective of the first-stage approximated subproblem, and this will converge to a finite value. However, we can't meaningfully interpret it as a bound with respect to the optimal policy. Therefore, it's best to just ignore the lower bound when training a risk-averse policy.","category":"page"},{"location":"explanation/risk/#Example:-risk-averse-hydro-thermal-scheduling","page":"Risk aversion","title":"Example: risk-averse hydro-thermal scheduling","text":"","category":"section"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Now it's time for an example. We create the same problem as Introductory theory:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"model = PolicyGraph(\n graph = [Dict(2 => 1.0), Dict(3 => 1.0), Dict{Int,Float64}()],\n lower_bound = 0.0,\n optimizer = HiGHS.Optimizer,\n) do subproblem, t\n JuMP.set_silent(subproblem)\n JuMP.@variable(subproblem, volume_in == 200)\n JuMP.@variable(subproblem, 0 <= volume_out <= 200)\n states = Dict(:volume => State(volume_in, volume_out))\n JuMP.@variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n JuMP.@constraints(\n subproblem,\n begin\n volume_out == volume_in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end\n )\n fuel_cost = [50.0, 100.0, 150.0]\n JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)\n uncertainty =\n Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω\n return JuMP.fix(inflow, ω)\n end\n return states, uncertainty\nend","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Then we train a risk-averse policy, passing a risk measure to train:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"train(\n model;\n iteration_limit = 3,\n replications = 100,\n risk_measure = Entropic(1.0),\n)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"Finally, evaluate the decision rule:","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"evaluate_policy(\n model;\n node = 1,\n incoming_state = Dict(:volume => 150.0),\n random_variable = 75,\n)","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"info: Info\nFor this trivial example, the risk-averse policy isn't very different from the policy obtained using the expectation risk-measure. If you try it on some bigger/more interesting problems, you should see the expected cost increase, and the upper tail of the policy decrease.","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"","category":"page"},{"location":"explanation/risk/","page":"Risk aversion","title":"Risk aversion","text":"This page was generated using Literate.jl.","category":"page"}] } diff --git a/dev/tutorial/SDDP.log b/dev/tutorial/SDDP.log index 4f97c619e..817ebba94 100644 --- a/dev/tutorial/SDDP.log +++ b/dev/tutorial/SDDP.log @@ -4,7 +4,7 @@ problem nodes : 60 state variables : 9 - scenarios : 1.97983e+14 + scenarios : 4.73116e+14 existing cuts : false options solver : serial mode @@ -24,25 +24,25 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 -1.083408e+03 4.999236e+02 2.646229e-01 227 1 - 16 5.464957e+00 8.526075e+00 1.279864e+00 4642 1 - 34 8.729848e+00 6.782521e+00 2.317225e+00 8693 1 - 50 9.343050e+00 6.555701e+00 3.351536e+00 12335 1 - 65 9.810967e+00 6.455388e+00 4.381311e+00 15690 1 - 81 8.301378e+00 6.384775e+00 5.435284e+00 19182 1 - 91 9.613478e+00 6.351201e+00 6.449486e+00 22632 1 - 106 7.451472e+00 6.310884e+00 7.453968e+00 25812 1 - 120 6.109025e+00 6.248287e+00 8.468015e+00 28885 1 - 134 9.483938e+00 6.244512e+00 9.493798e+00 31968 1 - 193 8.369255e+00 6.216856e+00 1.459561e+01 45891 1 - 248 1.012527e+01 6.201280e+00 1.960202e+01 57846 1 - 253 8.853247e+00 6.196918e+00 2.007769e+01 58951 1 + 1 -8.247974e+02 5.824634e+02 2.382989e-01 252 1 + 24 7.629740e+00 8.046865e+00 1.239766e+00 6798 1 + 50 8.398914e+00 6.631298e+00 2.276382e+00 12715 1 + 74 9.910696e+00 6.365864e+00 3.318086e+00 18238 1 + 95 9.460788e+00 6.315605e+00 4.319570e+00 22920 1 + 110 7.639985e+00 6.309557e+00 5.335199e+00 27665 1 + 129 8.615365e+00 6.273265e+00 6.364477e+00 32128 1 + 148 7.973974e+00 6.272666e+00 7.416542e+00 36421 1 + 165 7.957840e+00 6.266964e+00 8.425059e+00 40470 1 + 181 9.355880e+00 6.260511e+00 9.426192e+00 44232 1 + 251 9.734387e+00 6.245928e+00 1.445950e+01 61932 1 + 313 8.952395e+00 6.240952e+00 1.946521e+01 77751 1 + 320 7.669271e+00 6.240801e+00 2.001882e+01 79390 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.007769e+01 -total solves : 58951 -best bound : 6.196918e+00 -simulation ci : -3.164338e+01 ± 6.122435e+01 +total time (s) : 2.001882e+01 +total solves : 79390 +best bound : 6.240801e+00 +simulation ci : -3.262793e+01 ± 6.778629e+01 numeric issues : 0 ------------------------------------------------------------------- @@ -67,19 +67,19 @@ subproblem structure numerical stability report matrix range [1e+00, 1e+00] objective range [1e+00, 1e+00] - bounds range [1e+00, 3e+00] + bounds range [5e-01, 3e+00] rhs range [0e+00, 0e+00] ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 4.944698e-01 3.010988e-03 13 1 - 40 2.232250e-01 2.104120e-01 7.277203e-02 560 1 + 1 0.000000e+00 4.920979e-01 2.429008e-03 13 1 + 40 2.172068e-01 1.437196e-01 4.517603e-02 560 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.277203e-02 +total time (s) : 4.517603e-02 total solves : 560 -best bound : 2.104120e-01 -simulation ci : 2.046787e-01 ± 2.065867e-02 +best bound : 1.437196e-01 +simulation ci : 1.233057e-01 ± 8.897681e-02 numeric issues : 0 ------------------------------------------------------------------- @@ -109,11 +109,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.079600e+03 3.157700e+02 5.507493e-02 104 1 - 10 6.829100e+02 6.829100e+02 2.178199e-01 1092 1 + 1 1.079600e+03 3.157700e+02 3.940701e-02 104 1 + 10 6.829100e+02 6.829100e+02 1.439669e-01 1092 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.178199e-01 +total time (s) : 1.439669e-01 total solves : 1092 best bound : 6.829100e+02 simulation ci : 7.289889e+02 ± 7.726064e+01 @@ -145,18 +145,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.480000e+02 7.686378e+01 6.213808e-02 208 1 - 8 7.203679e+02 1.903452e+02 1.065958e+00 6864 1 - 33 4.757134e+02 2.371499e+02 2.103278e+00 12064 1 - 56 1.523666e+02 2.542035e+02 3.117099e+00 16848 1 - 79 1.739874e+02 2.637883e+02 4.146516e+00 21632 1 - 100 2.532259e+02 2.682606e+02 5.122473e+00 26000 1 + 1 0.000000e+00 0.000000e+00 3.858495e-02 208 1 + 31 1.618454e+02 2.349663e+02 1.061260e+00 11648 1 + 66 4.914984e+02 2.594031e+02 2.080874e+00 18928 1 + 100 8.794976e+01 2.686162e+02 3.109930e+00 26000 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.122473e+00 +total time (s) : 3.109930e+00 total solves : 26000 -best bound : 2.682606e+02 -simulation ci : 3.003115e+02 ± 3.861668e+01 +best bound : 2.686162e+02 +simulation ci : 2.982893e+02 ± 4.962906e+01 numeric issues : 0 ------------------------------------------------------------------- @@ -185,14 +183,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.750000e+04 2.500000e+03 4.281044e-03 12 1 - 10 5.000000e+03 8.333333e+03 3.327012e-02 201 1 + 1 1.750000e+04 3.437500e+03 3.243208e-03 12 1 + 10 1.500000e+04 8.333333e+03 2.025819e-02 201 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.327012e-02 +total time (s) : 2.025819e-02 total solves : 201 best bound : 8.333333e+03 -simulation ci : 1.062500e+04 ± 4.215930e+03 +simulation ci : 9.656250e+03 ± 3.622966e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -221,14 +219,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.750000e+04 1.991887e+03 9.750128e-03 18 1 - 40 2.750000e+04 8.072917e+03 3.397300e-01 1320 1 + 1 1.562500e+04 1.991887e+03 4.980087e-03 18 1 + 40 1.875000e+03 8.072917e+03 1.440580e-01 1320 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.397300e-01 +total time (s) : 1.440580e-01 total solves : 1320 best bound : 8.072917e+03 -simulation ci : 9.772342e+03 ± 2.698713e+03 +simulation ci : 7.827991e+03 ± 2.451185e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -259,12 +257,12 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.499895e+01 1.562631e+00 3.831697e-02 6 1 - 24 8.333333e+00 8.333333e+00 1.041407e+00 150 1 - 40 8.333333e+00 8.333333e+00 1.723777e+00 246 1 + 1 2.499895e+01 1.562631e+00 2.783608e-02 6 1 + 29 8.333333e+00 8.333333e+00 1.030540e+00 180 1 + 40 8.333333e+00 8.333333e+00 1.413587e+00 246 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.723777e+00 +total time (s) : 1.413587e+00 total solves : 246 best bound : 8.333333e+00 simulation ci : 8.810723e+00 ± 8.167195e-01 @@ -297,15 +295,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 1.000000e+01 1.858091e-02 41 1 - 21 0.000000e+00 6.561000e+00 1.231539e+00 2577 1 - 60 2.500000e+01 6.561000e+00 2.008292e+00 4294 1 + 1 0.000000e+00 9.958140e+00 4.631996e-03 11 1 + 40 0.000000e+00 6.561000e+00 7.881789e-01 2736 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.008292e+00 -total solves : 4294 +total time (s) : 7.881789e-01 +total solves : 2736 best bound : 6.561000e+00 -simulation ci : 4.833333e+00 ± 1.799006e+00 +simulation ci : 6.500000e+00 ± 3.106175e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -330,14 +327,14 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 9.112500e+03 3.261069e+03 7.368302e-02 39 1 - 96 9.281250e+03 5.092593e+03 9.007711e-01 4944 1 + 1 7.087500e+03 3.261069e+03 2.431607e-02 39 1 + 60 0.000000e+00 5.092593e+03 3.536880e-01 3240 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 9.007711e-01 -total solves : 4944 +total time (s) : 3.536880e-01 +total solves : 3240 best bound : 5.092593e+03 -simulation ci : 4.991782e+03 ± 7.052548e+02 +simulation ci : 5.274733e+03 ± 9.757726e+02 numeric issues : 0 ------------------------------------------------------------------- @@ -362,14 +359,14 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 9.125000e+03 3.477859e+03 8.873916e-02 39 1 - 80 7.125000e+03 5.135984e+03 9.182131e-01 4320 1 + 1 1.512500e+04 4.523098e+03 2.660584e-02 39 1 + 61 5.500000e+03 5.135984e+03 3.949859e-01 3279 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 9.182131e-01 -total solves : 4320 +total time (s) : 3.949859e-01 +total solves : 3279 best bound : 5.135984e+03 -simulation ci : 5.529798e+03 ± 7.601528e+02 +simulation ci : 5.834490e+03 ± 1.028638e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -398,14 +395,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.812500e+04 3.958333e+03 4.574060e-03 12 1 - 40 9.375000e+03 1.062500e+04 1.368001e-01 642 1 + 1 3.375000e+04 5.735677e+03 3.451824e-03 12 1 + 40 1.125000e+04 1.062500e+04 7.753396e-02 642 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.368001e-01 +total time (s) : 7.753396e-02 total solves : 642 best bound : 1.062500e+04 -simulation ci : 1.146154e+04 ± 2.573397e+03 +simulation ci : 1.373820e+04 ± 3.341683e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -436,17 +433,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.371244e+06 8.466849e+04 9.043729e-01 67 1 - 2 2.473966e+06 1.120339e+05 2.813140e+01 1833 1 - 5 2.100208e+06 3.550252e+05 3.582787e+01 2214 1 - 7 2.777179e+06 3.603473e+05 4.138662e+01 2452 1 - 10 3.298793e+05 4.052698e+05 4.605443e+01 2701 1 + 1 1.234104e+06 5.017239e+04 2.624431e-01 35 1 + 2 2.507229e+06 1.429995e+05 2.269262e+01 2224 1 + 9 5.105361e+05 4.087944e+05 2.801921e+01 2649 1 + 10 4.827186e+05 4.254646e+05 2.911346e+01 2740 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.605443e+01 -total solves : 2701 -best bound : 4.052698e+05 -simulation ci : 1.253494e+06 ± 6.226140e+05 +total time (s) : 2.911346e+01 +total solves : 2740 +best bound : 4.254646e+05 +simulation ci : 7.571370e+05 ± 4.554385e+05 numeric issues : 0 ------------------------------------------------------------------- @@ -478,17 +474,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.717402e+06 1.512091e+05 1.765768e+00 63 1 - 2 1.733981e+06 1.907111e+05 8.241468e+01 2391 1 - 5 4.955408e+05 2.457192e+05 8.979027e+01 2600 1 - 8 4.534791e+05 3.367099e+05 9.589290e+01 2749 1 - 10 1.063543e+05 3.749701e+05 9.865890e+01 2819 1 + 1 5.694420e+04 4.857797e+04 2.946680e-01 15 1 + 2 3.984452e+06 1.346608e+05 5.208563e+01 2264 1 + 5 8.213595e+05 3.670880e+05 5.894946e+01 2533 1 + 10 2.560906e+05 4.061131e+05 6.420150e+01 2724 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 9.865890e+01 -total solves : 2819 -best bound : 3.749701e+05 -simulation ci : 5.754022e+05 ± 3.938386e+05 +total time (s) : 6.420150e+01 +total solves : 2724 +best bound : 4.061131e+05 +simulation ci : 6.357277e+05 ± 7.415943e+05 numeric issues : 0 ------------------------------------------------------------------- @@ -519,17 +514,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 5.740190e+05 4.874882e+04 5.277851e-01 27 1 - 2 5.943082e+04 6.694876e+04 3.211227e+01 2095 1 - 7 1.635773e+06 3.624599e+05 3.897763e+01 2314 1 - 9 8.573350e+05 4.046607e+05 4.439623e+01 2473 1 - 10 3.731937e+06 4.049763e+05 4.670413e+01 2536 1 + 1 1.885912e+06 6.803197e+04 9.583139e-01 72 1 + 2 1.251866e+05 1.645924e+05 2.313234e+01 2082 1 + 6 6.644346e+05 3.605045e+05 2.831640e+01 2310 1 + 10 3.856251e+05 4.126893e+05 3.301754e+01 2547 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.670413e+01 -total solves : 2536 -best bound : 4.049763e+05 -simulation ci : 7.959406e+05 ± 7.054922e+05 +total time (s) : 3.301754e+01 +total solves : 2547 +best bound : 4.126893e+05 +simulation ci : 9.463587e+05 ± 5.532752e+05 numeric issues : 0 ------------------------------------------------------------------- @@ -553,14 +547,14 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.875000e+04 3.753370e+03 1.785588e-02 18 1 - 20 5.000000e+03 8.072917e+03 1.256318e-01 660 1 + 1 2.812500e+04 1.991887e+03 1.509690e-02 18 1 + 20 1.625000e+04 8.072917e+03 1.686609e-01 660 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.256318e-01 +total time (s) : 1.686609e-01 total solves : 660 best bound : 8.072917e+03 -simulation ci : 9.419861e+03 ± 4.758022e+03 +simulation ci : 1.003599e+04 ± 4.615369e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -584,11 +578,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 3.000000e+00 3.839016e-03 6 1 - 5 3.500000e+00 3.500000e+00 9.364128e-03 33 1 + 1 6.500000e+00 3.000000e+00 2.931118e-03 6 1 + 5 3.500000e+00 3.500000e+00 6.483078e-03 33 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 9.364128e-03 +total time (s) : 6.483078e-03 total solves : 33 best bound : 3.500000e+00 simulation ci : 4.100000e+00 ± 1.176000e+00 @@ -615,11 +609,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 1.100000e+01 3.684044e-03 6 1 - 5 5.500000e+00 1.100000e+01 8.239031e-03 33 1 + 1 6.500000e+00 1.100000e+01 2.869129e-03 6 1 + 5 5.500000e+00 1.100000e+01 5.830050e-03 33 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 8.239031e-03 +total time (s) : 5.830050e-03 total solves : 33 best bound : 1.100000e+01 simulation ci : 5.700000e+00 ± 3.920000e-01 diff --git a/dev/tutorial/arma/index.html b/dev/tutorial/arma/index.html index 42c315e36..9945c8874 100644 --- a/dev/tutorial/arma/index.html +++ b/dev/tutorial/arma/index.html @@ -40,34 +40,36 @@ end return inflow end
      simulator (generic function with 1 method)

      When called with no arguments, it produces a vector of inflows:

      simulator()
      3-element Vector{Float64}:
      - 59.6
      - 49.6
      - 39.6
      Warning

      The simulator must return a Vector{Float64}, so it is limited to a uni-variate random variable. It is possible to do something similar for multi-variate random variable, but you'll have to manually construct the Markov transition matrix, and solution times scale poorly, even in the two-dimensional case.

      The next step is to call SDDP.MarkovianGraph with our simulator. This function will attempt to fit a Markov chain to the stochastic process produced by your simulator. There are two key arguments:

      graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
      Root
      + 50.1
      + 59.7
      + 49.7
      Warning

      The simulator must return a Vector{Float64}, so it is limited to a uni-variate random variable. It is possible to do something similar for multi-variate random variable, but you'll have to manually construct the Markov transition matrix, and solution times scale poorly, even in the two-dimensional case.

      The next step is to call SDDP.MarkovianGraph with our simulator. This function will attempt to fit a Markov chain to the stochastic process produced by your simulator. There are two key arguments:

      graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
      Root
        (0, 0.0)
       Nodes
      - (1, 53.268362713207)
      - (1, 59.6)
      - (2, 50.97838503763715)
      + (1, 45.56476428430297)
      + (2, 45.647403671564405)
        (2, 59.7)
      - (3, 39.00579904539024)
      - (3, 51.01378868860214)
      - (3, 68.66382408556382)
      - (3, 69.3)
      + (2, 66.46365424683167)
      + (3, 38.416706823891026)
      + (3, 50.3323841079844)
      + (3, 68.66541689662692)
      + (3, 76.06365424683166)
       Arcs
      - (0, 0.0) => (1, 53.268362713207) w.p. 0.6666666666666666
      - (0, 0.0) => (1, 59.6) w.p. 0.3333333333333333
      - (1, 53.268362713207) => (2, 50.97838503763715) w.p. 0.85
      - (1, 53.268362713207) => (2, 59.7) w.p. 0.15
      - (1, 59.6) => (2, 50.97838503763715) w.p. 0.7
      - (1, 59.6) => (2, 59.7) w.p. 0.3
      - (2, 50.97838503763715) => (3, 68.66382408556382) w.p. 0.08333333333333333
      - (2, 50.97838503763715) => (3, 39.00579904539024) w.p. 0.4583333333333333
      - (2, 50.97838503763715) => (3, 51.01378868860214) w.p. 0.4583333333333333
      - (2, 50.97838503763715) => (3, 69.3) w.p. 0.0
      - (2, 59.7) => (3, 68.66382408556382) w.p. 0.3333333333333333
      - (2, 59.7) => (3, 39.00579904539024) w.p. 0.0
      - (2, 59.7) => (3, 51.01378868860214) w.p. 0.5
      - (2, 59.7) => (3, 69.3) w.p. 0.16666666666666666

      Here we can see we have created a MarkovianGraph with nodes like (2, 59.7). The first element of each node is the stage, and the second element is the inflow.

      Create a SDDP.PolicyGraph using graph as follows:

      model = SDDP.PolicyGraph(
      + (0, 0.0) => (1, 45.56476428430297) w.p. 1.0
      + (1, 45.56476428430297) => (2, 59.7) w.p. 0.3
      + (1, 45.56476428430297) => (2, 45.647403671564405) w.p. 0.6666666666666666
      + (1, 45.56476428430297) => (2, 66.46365424683167) w.p. 0.03333333333333333
      + (2, 45.647403671564405) => (3, 68.66541689662692) w.p. 0.0
      + (2, 45.647403671564405) => (3, 38.416706823891026) w.p. 0.65
      + (2, 45.647403671564405) => (3, 50.3323841079844) w.p. 0.35
      + (2, 45.647403671564405) => (3, 76.06365424683166) w.p. 0.0
      + (2, 59.7) => (3, 68.66541689662692) w.p. 0.5555555555555556
      + (2, 59.7) => (3, 38.416706823891026) w.p. 0.0
      + (2, 59.7) => (3, 50.3323841079844) w.p. 0.4444444444444444
      + (2, 59.7) => (3, 76.06365424683166) w.p. 0.0
      + (2, 66.46365424683167) => (3, 68.66541689662692) w.p. 0.0
      + (2, 66.46365424683167) => (3, 38.416706823891026) w.p. 0.0
      + (2, 66.46365424683167) => (3, 50.3323841079844) w.p. 0.0
      + (2, 66.46365424683167) => (3, 76.06365424683166) w.p. 1.0

      Here we can see we have created a MarkovianGraph with nodes like (2, 59.7). The first element of each node is the stage, and the second element is the inflow.

      Create a SDDP.PolicyGraph using graph as follows:

      model = SDDP.PolicyGraph(
           graph,  # <--- New stuff
           sense = :Min,
           lower_bound = 0.0,
      @@ -84,7 +86,7 @@
           # The new water balance constraint using the node:
           @constraint(sp, x.out == x.in - g_h - s + inflow)
       end
      A policy graph with 8 nodes.
      - Node indices: (1, 53.268362713207), (1, 59.6), (2, 50.97838503763715), (2, 59.7), (3, 39.00579904539024), (3, 51.01378868860214), (3, 68.66382408556382), (3, 69.3)
      + Node indices: (1, 45.56476428430297), (2, 45.647403671564405), (2, 59.7), (2, 66.46365424683167), (3, 38.416706823891026), (3, 50.3323841079844), (3, 68.66541689662692), (3, 76.06365424683166)
       

      When can this trick be used?

      The Markov chain approach should be used when:

      Vector auto-regressive models

      The state-space expansion section assumed that the random variable was uni-variate. However, the approach naturally extends to vector auto-regressive models. For example, if inflow is a 2-dimensional vector, then we can model a vector auto-regressive model to it as follows:

      \[inflow_{t} = A \times inflow_{t-1} + b + \varepsilon\]

      Here A is a 2-by-2 matrix, and b and $\varepsilon$ are 2-by-1 vectors.

      model = SDDP.LinearPolicyGraph(
           stages = 3,
           sense = :Min,
      @@ -124,4 +126,4 @@
           end
       end
      A policy graph with 3 nodes.
        Node indices: 1, 2, 3
      -

      This page was generated using Literate.jl.

      +

      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/convex.cuts.json b/dev/tutorial/convex.cuts.json index e1275bf77..416b1aca5 100644 --- a/dev/tutorial/convex.cuts.json +++ b/dev/tutorial/convex.cuts.json @@ -1 +1 @@ -[{"risk_set_cuts":[],"node":"1","single_cuts":[{"state":{"x":0.3349461006592547},"intercept":136942.1292824911,"coefficients":{"x":-317616.66663406935}},{"state":{"x":0.0},"intercept":321380.87547200953,"coefficients":{"x":-318249.99997748446}},{"state":{"x":2.009674979712126},"intercept":20520.215693455786,"coefficients":{"x":-1583.333385922418}},{"state":{"x":1.6747289444253708},"intercept":41506.84241237109,"coefficients":{"x":-2084.7222417758903}},{"state":{"x":1.3397828599947537},"intercept":85274.88839712719,"coefficients":{"x":-102389.32864985468}},{"state":{"x":1.0048367937075533},"intercept":119829.19657022016,"coefficients":{"x":-102389.32869556746}},{"state":{"x":0.6698921986092845},"intercept":160624.5749627731,"coefficients":{"x":-318276.828490851}},{"state":{"x":0.3349461006592547},"intercept":288146.80790651374,"coefficients":{"x":-349750.116063391}},{"state":{"x":0.0},"intercept":415256.1059336243,"coefficients":{"x":-349750.1161353031}},{"state":{"x":0.3349461006592547},"intercept":301263.25741897605,"coefficients":{"x":-349750.1160641953}},{"state":{"x":0.0},"intercept":419409.6482794747,"coefficients":{"x":-349750.1161366207}},{"state":{"x":2.149134087341046e-9},"intercept":419725.98273305735,"coefficients":{"x":-349750.11613673304}},{"state":{"x":2.339784601145899},"intercept":63989.87440068573,"coefficients":{"x":-34033.44933059884}},{"state":{"x":5.004837035142476},"intercept":20860.61670770407,"coefficients":{"x":-1583.3334860331213}},{"state":{"x":7.669890949778228},"intercept":48623.833851726646,"coefficients":{"x":-1980.486147760888}},{"state":{"x":7.33494488161516},"intercept":67815.76181308898,"coefficients":{"x":-1881.461918054299}},{"state":{"x":1.2284853662089397},"intercept":146329.67901676503,"coefficients":{"x":-112337.53670164582}},{"state":{"x":1.928485280284813},"intercept":117157.39647010295,"coefficients":{"x":-37119.34959359316}},{"state":{"x":1.9284854707847305},"intercept":117613.27746945353,"coefficients":{"x":-37156.886472180275}},{"state":{"x":1.2284855567796527},"intercept":152430.07828509514,"coefficients":{"x":-123153.8839496637}},{"state":{"x":1.928485470855527},"intercept":119549.67670153294,"coefficients":{"x":-40582.06321544427}},{"state":{"x":1.300000085933977},"intercept":149824.74419438874,"coefficients":{"x":-52483.050023046366}},{"state":{"x":0.0},"intercept":450887.4494490257,"coefficients":{"x":-334236.29923905403}},{"state":{"x":0.0},"intercept":460755.2469552457,"coefficients":{"x":-334236.29924299003}},{"state":{"x":0.528485644872456},"intercept":287722.1912255519,"coefficients":{"x":-330467.6533300426}},{"state":{"x":1.2284855561216683},"intercept":169121.43974469075,"coefficients":{"x":-118132.41019655067}},{"state":{"x":1.9284854701975414},"intercept":124815.92909977707,"coefficients":{"x":-38991.9298697983}},{"state":{"x":1.563433955934224},"intercept":142849.5352155579,"coefficients":{"x":-50389.374385511794}},{"state":{"x":2.2634338699894725},"intercept":116039.7752831945,"coefficients":{"x":-17539.968581451958}},{"state":{"x":1.9284854446275197},"intercept":132585.12442937362,"coefficients":{"x":-43912.91984834727}},{"state":{"x":2.933330840341916},"intercept":110398.52999279171,"coefficients":{"x":-20093.414599176882}},{"state":{"x":2.5983822713120563},"intercept":118286.00337616671,"coefficients":{"x":-20902.0058943669}},{"state":{"x":2.2634337058694722},"intercept":125474.8181426826,"coefficients":{"x":-21474.72647143846}},{"state":{"x":1.9284852805075634},"intercept":134503.55251781366,"coefficients":{"x":-44977.56494898151}},{"state":{"x":1.2284853663568827},"intercept":177370.12113415267,"coefficients":{"x":-112398.42004437505}},{"state":{"x":1.9284852804327572},"intercept":137115.6278679765,"coefficients":{"x":-43161.80147990902}},{"state":{"x":1.300000085933977},"intercept":169333.57584067344,"coefficients":{"x":-112217.05951774173}},{"state":{"x":0.0},"intercept":473939.7462640453,"coefficients":{"x":-358821.03811642376}},{"state":{"x":0.0},"intercept":477603.6253374758,"coefficients":{"x":-358821.03813087137}},{"state":{"x":0.5284854537286539},"intercept":299332.6140515584,"coefficients":{"x":-331284.57046263316}},{"state":{"x":1.2284853649778664},"intercept":181046.7351821983,"coefficients":{"x":-112657.11052307887}},{"state":{"x":1.9284852790537406},"intercept":138279.88899345766,"coefficients":{"x":-43243.72016270678}},{"state":{"x":4.009690998690161},"intercept":101412.31731692393,"coefficients":{"x":-8202.301873100883}},{"state":{"x":3.674742451855502},"intercept":112427.98127826303,"coefficients":{"x":-11813.759577400975}},{"state":{"x":8.339793880928266},"intercept":75912.98263407225,"coefficients":{"x":-7792.186799750915}},{"state":{"x":8.004845337869945},"intercept":88545.42957649406,"coefficients":{"x":-7402.577478155595}},{"state":{"x":10.669896795487116},"intercept":79929.47806361246,"coefficients":{"x":-7032.448613594324}},{"state":{"x":10.33494835669888},"intercept":93298.21048231564,"coefficients":{"x":-6680.826193356874}},{"state":{"x":16.700000619636278},"intercept":63908.75731236918,"coefficients":{"x":-6346.784887520225}},{"state":{"x":15.400000550788462},"intercept":84742.99482620183,"coefficients":{"x":-6029.445647976816}},{"state":{"x":16.100000481942235},"intercept":93180.06449848262,"coefficients":{"x":-5727.973370538778}},{"state":{"x":14.800000413094724},"intercept":112746.69364523608,"coefficients":{"x":-5441.5747079564335}},{"state":{"x":15.500000344249582},"intercept":121086.69230484182,"coefficients":{"x":-5169.49597835885}},{"state":{"x":16.200000275409966},"intercept":129612.79888722773,"coefficients":{"x":-4911.021185089117}},{"state":{"x":14.900000206562234},"intercept":147616.49285554286,"coefficients":{"x":-4665.470132105047}},{"state":{"x":10.600000137716897},"intercept":178094.34984003945,"coefficients":{"x":-4432.196632140576}},{"state":{"x":9.30000006883099},"intercept":193825.5940450645,"coefficients":{"x":-4210.586808043118}},{"state":{"x":9.300000043897636},"intercept":203640.3776045465,"coefficients":{"x":-4000.0574751455547}},{"state":{"x":60.54430899779122},"intercept":21804.16736008844,"coefficients":{"x":-2216.684830073195}},{"state":{"x":60.209362897449054},"intercept":42530.94115658899,"coefficients":{"x":-2105.8505898652124}},{"state":{"x":60.20936291354831},"intercept":61477.42253248088,"coefficients":{"x":-2000.5580619199682}},{"state":{"x":59.509362952457124},"intercept":80770.27401964575,"coefficients":{"x":-1900.5301605165635}},{"state":{"x":60.20936291354831},"intercept":96283.72615552429,"coefficients":{"x":-1805.5036542688958}},{"state":{"x":59.50936293633894},"intercept":113275.52177201177,"coefficients":{"x":-1715.2284733532424}},{"state":{"x":60.209362897430125},"intercept":126876.46937545041,"coefficients":{"x":-1629.4670514378681}},{"state":{"x":60.10936303364453},"intercept":140902.82272900903,"coefficients":{"x":-1547.9937006105029}},{"state":{"x":58.80936299132889},"intercept":155804.3572831814,"coefficients":{"x":-1470.594017355675}},{"state":{"x":59.509362953235915},"intercept":166899.64731666754,"coefficients":{"x":-1397.0643178293785}},{"state":{"x":60.2093629143271},"intercept":177326.1920807834,"coefficients":{"x":-1327.211103270859}},{"state":{"x":58.50936331908151},"intercept":190149.16863243742,"coefficients":{"x":-1260.8505494849326}},{"state":{"x":59.20936328102749},"intercept":199202.087955599,"coefficients":{"x":-1197.8080233730432}},{"state":{"x":57.90936324181699},"intercept":209980.4722969024,"coefficients":{"x":-1137.9176235972348}},{"state":{"x":56.609363203841234},"intercept":220013.30761167657,"coefficients":{"x":-1081.0217438405884}},{"state":{"x":57.30936316591449},"intercept":227294.2615297105,"coefficients":{"x":-1026.9706580515326}},{"state":{"x":58.009363127972264},"intercept":234127.38141316085,"coefficients":{"x":-975.6221265314313}},{"state":{"x":58.70936308998651},"intercept":240539.24828080914,"coefficients":{"x":-926.8410215667132}},{"state":{"x":59.40936305190519},"intercept":246554.90463422917,"coefficients":{"x":-880.4989718300168}},{"state":{"x":58.109363012457294},"intercept":253870.88990548946,"coefficients":{"x":-836.4740246056379}},{"state":{"x":58.80936297445084},"intercept":259079.88336885558,"coefficients":{"x":-794.650324721552}},{"state":{"x":59.50936293635786},"intercept":263963.5948881892,"coefficients":{"x":-754.9178098111072}},{"state":{"x":60.209362897449054},"intercept":268541.53017339314,"coefficients":{"x":-717.1719206262577}},{"state":{"x":60.209362897449054},"intercept":273308.9768690575,"coefficients":{"x":-681.3133258958653}},{"state":{"x":60.20936289793636},"intercept":277758.6196961197,"coefficients":{"x":-647.2476608974373}},{"state":{"x":59.50936292847143},"intercept":282340.7401021529,"coefficients":{"x":-614.8852791597275}},{"state":{"x":60.20936288956262},"intercept":285782.74916643824,"coefficients":{"x":-584.1410164886038}},{"state":{"x":60.879255456722646},"intercept":289021.70771291794,"coefficients":{"x":-554.933966932989}},{"state":{"x":60.54430899779122},"intercept":292582.34649985284,"coefficients":{"x":-527.187269858362}},{"state":{"x":60.209362897449054},"intercept":295894.66158472176,"coefficients":{"x":-500.82790764025503}},{"state":{"x":60.209362897449054},"intercept":298815.22108481,"coefficients":{"x":-475.7865135289224}},{"state":{"x":60.20936289656806},"intercept":301534.28261062107,"coefficients":{"x":-451.9971891206636}},{"state":{"x":60.54430899779122},"intercept":303920.8695980284,"coefficients":{"x":-429.39733092198253}},{"state":{"x":60.209362897449054},"intercept":306418.5242342302,"coefficients":{"x":-407.92746563684636}},{"state":{"x":60.209362889562605},"intercept":308607.10383519344,"coefficients":{"x":-387.5310936125363}},{"state":{"x":60.87925547282167},"intercept":310394.4497953674,"coefficients":{"x":-368.1545401714737}},{"state":{"x":60.54430901389048},"intercept":312413.2772752625,"coefficients":{"x":-349.7468144058305}},{"state":{"x":60.20936291354831},"intercept":314284.53039123217,"coefficients":{"x":-332.2594749311926}},{"state":{"x":59.50936293635785},"intercept":316133.1476017885,"coefficients":{"x":-315.64650244219723}},{"state":{"x":60.20936289744905},"intercept":317421.67657086515,"coefficients":{"x":-299.86417856297396}},{"state":{"x":60.209362914843176},"intercept":318820.7240608635,"coefficients":{"x":-284.8709708744099}},{"state":{"x":58.1093630115763},"intercept":320684.92480299406,"coefficients":{"x":-270.6274236149551}},{"state":{"x":58.80936297356985},"intercept":321676.07934732956,"coefficients":{"x":-257.09605343035656}},{"state":{"x":59.509362935476865},"intercept":322596.7007784033,"coefficients":{"x":-244.24125200632722}},{"state":{"x":60.20936289656805},"intercept":323451.36449112784,"coefficients":{"x":-232.02919063578042}},{"state":{"x":60.544309009528725},"intercept":324324.8326990033,"coefficients":{"x":-220.42773232694913}},{"state":{"x":60.20936290918656},"intercept":325272.8999888703,"coefficients":{"x":-209.40634693828224}},{"state":{"x":59.80000136709824},"intercept":326160.44701275585,"coefficients":{"x":-198.9360308248812}},{"state":{"x":58.50000132805474},"intercept":327144.67279057705,"coefficients":{"x":-188.98923054559089}},{"state":{"x":59.20000129000199},"intercept":327686.2899568634,"coefficients":{"x":-179.5397702614997}},{"state":{"x":59.900001251655695},"intercept":328186.17835747474,"coefficients":{"x":-170.5627829757973}},{"state":{"x":55.60000121220227},"intercept":329457.33003648685,"coefficients":{"x":-162.0346451533204}},{"state":{"x":54.30000117429905},"intercept":330149.39699785714,"coefficients":{"x":-153.9329142549728}},{"state":{"x":50.00000113455484},"intercept":331217.61737014697,"coefficients":{"x":-146.23627002707133}},{"state":{"x":50.70000109669439},"intercept":331489.3146330467,"coefficients":{"x":-138.92445798432374}},{"state":{"x":49.400001058831165},"intercept":332000.04916011024,"coefficients":{"x":-131.97823658479848}},{"state":{"x":48.10000102097335},"intercept":332461.2815764951,"coefficients":{"x":-125.37932686649489}},{"state":{"x":43.800000982494645},"intercept":333234.0163407663,"coefficients":{"x":-119.11036354621976}},{"state":{"x":42.50000094467532},"intercept":333589.15453019703,"coefficients":{"x":-113.15484929143382}},{"state":{"x":41.200000906863764},"intercept":333905.9884980523,"coefficients":{"x":-107.49711177416516}},{"state":{"x":41.900000869058026},"intercept":333983.2163045458,"coefficients":{"x":-102.12226205477424}},{"state":{"x":42.60000083125093},"intercept":334048.25100011635,"coefficients":{"x":-97.01615472275887}},{"state":{"x":43.30000079344009},"intercept":334102.11881626165,"coefficients":{"x":-92.16535364994226}},{"state":{"x":39.00000075562737},"intercept":334583.5592808385,"coefficients":{"x":-87.55709520729505}},{"state":{"x":37.70000071782567},"intercept":334762.35736409354,"coefficients":{"x":-83.17925068668285}},{"state":{"x":38.40000068002623},"intercept":334759.07083545544,"coefficients":{"x":-79.02029884517445}},{"state":{"x":39.10000064222559},"intercept":334749.5017058001,"coefficients":{"x":-75.06929456157937}},{"state":{"x":37.800000604423715},"intercept":334876.9181134043,"coefficients":{"x":-71.31584180357781}},{"state":{"x":33.50000056662411},"intercept":335188.2639414154,"coefficients":{"x":-67.75006723482612}},{"state":{"x":29.200000528831833},"intercept":335463.22621739004,"coefficients":{"x":-73.63013864170121}},{"state":{"x":29.90000049104735},"intercept":335440.3131610871,"coefficients":{"x":-71.31583882504135}},{"state":{"x":25.600000453210296},"intercept":335758.6789827405,"coefficients":{"x":-78.2657037961941}},{"state":{"x":26.30000041540323},"intercept":335710.3885177224,"coefficients":{"x":-75.06937491136674}},{"state":{"x":22.000000377586975},"intercept":336073.51285189914,"coefficients":{"x":-87.78363445933047}},{"state":{"x":22.700000339816047},"intercept":336014.93407243525,"coefficients":{"x":-86.31800941003938}},{"state":{"x":23.400000302046532},"intercept":335955.89728814154,"coefficients":{"x":-85.38982283978487}},{"state":{"x":22.10000026427509},"intercept":336066.7009076423,"coefficients":{"x":-88.48358684053099}},{"state":{"x":22.800000226502853},"intercept":336008.83211801393,"coefficients":{"x":-85.78167510445252}},{"state":{"x":23.500000188713294},"intercept":335948.80257426447,"coefficients":{"x":-85.05015237692115}},{"state":{"x":19.200000150936212},"intercept":336326.3952993617,"coefficients":{"x":-90.22474294578436}},{"state":{"x":14.9000001131704},"intercept":336743.29083179653,"coefficients":{"x":-104.00334878165742}},{"state":{"x":10.600000075422402},"intercept":337203.886053104,"coefficients":{"x":-113.25406663260009}},{"state":{"x":9.30000003768237},"intercept":337352.75906500634,"coefficients":{"x":-113.28561501105881}},{"state":{"x":0.0},"intercept":609373.2910990788,"coefficients":{"x":-318249.99987846677}},{"state":{"x":0.41032975022810514},"intercept":521172.46569048543,"coefficients":{"x":-318249.99989345187}},{"state":{"x":0.0},"intercept":665381.5847814942,"coefficients":{"x":-318249.999985083}},{"state":{"x":0.04527797424972637},"intercept":655349.4317950493,"coefficients":{"x":-318249.99998414743}},{"state":{"x":0.745277885491313},"intercept":443779.19950260164,"coefficients":{"x":-101773.15988530831}},{"state":{"x":0.41032973676265144},"intercept":543922.4166775611,"coefficients":{"x":-349528.1659950379}},{"state":{"x":0.41032975022810514},"intercept":549491.0175426689,"coefficients":{"x":-349528.1660076839}},{"state":{"x":0.0},"intercept":694676.2148941597,"coefficients":{"x":-349528.1672077252}},{"state":{"x":0.41032973676265155},"intercept":551812.8215785245,"coefficients":{"x":-349528.16599506757}},{"state":{"x":0.4103297386651363},"intercept":551989.6498057761,"coefficients":{"x":-349528.165995135}},{"state":{"x":0.0},"intercept":695467.4471632504,"coefficients":{"x":-349528.16720804694}},{"state":{"x":0.41032975484437445},"intercept":552063.3721435834,"coefficients":{"x":-349528.16599516413}},{"state":{"x":0.7452778904251882},"intercept":452561.4017870642,"coefficients":{"x":-112267.25301329384}},{"state":{"x":0.4103297416965271},"intercept":552068.9917537717,"coefficients":{"x":-349528.16606111015}},{"state":{"x":2.6698962584869776},"intercept":342016.4742130089,"coefficients":{"x":-1625.1265625703186}},{"state":{"x":7.334948119258714},"intercept":339073.9497017015,"coefficients":{"x":-1066.9638847553845}},{"state":{"x":1.0151717245929266},"intercept":426454.82079364976,"coefficients":{"x":-112267.2527204439}},{"state":{"x":1.7151716364693914},"intercept":375150.5793061724,"coefficients":{"x":-36839.168623019425}},{"state":{"x":1.7151716439145377},"intercept":375799.39809452114,"coefficients":{"x":-37134.630012956324}},{"state":{"x":2.0501199941828268},"intercept":363531.0049598635,"coefficients":{"x":-13342.633722207604}},{"state":{"x":1.7151716308430982},"intercept":378591.579488222,"coefficients":{"x":-40409.79729223}},{"state":{"x":0.9151719969445737},"intercept":443973.5230485997,"coefficients":{"x":-115859.08681156671}},{"state":{"x":1.615171908821407},"intercept":384625.01705218677,"coefficients":{"x":-41547.211472922456}},{"state":{"x":0.3151718207883671},"intercept":597598.982701263,"coefficients":{"x":-330773.2835944105}},{"state":{"x":1.0151717320380724},"intercept":436273.0239142781,"coefficients":{"x":-109920.04062573492}},{"state":{"x":1.7151716439145375},"intercept":381700.67688021396,"coefficients":{"x":-39666.5134850962}},{"state":{"x":2.05011999980912},"intercept":368414.4433841178,"coefficients":{"x":-39666.51316421095}},{"state":{"x":1.7151716364693916},"intercept":381700.6771758348,"coefficients":{"x":-39666.513474147134}},{"state":{"x":1.715171636661929},"intercept":381700.6771681976,"coefficients":{"x":-39666.513474147134}},{"state":{"x":1.0151717245929268},"intercept":436273.02473303245,"coefficients":{"x":-109920.04075556585}},{"state":{"x":1.7151716364693916},"intercept":381700.67718366964,"coefficients":{"x":-39666.51350039113}},{"state":{"x":1.7151716308503226},"intercept":381700.67740655853,"coefficients":{"x":-39666.51350039112}},{"state":{"x":0.31517180773238423},"intercept":599942.607696024,"coefficients":{"x":-330177.7292840487}},{"state":{"x":1.0151717189820906},"intercept":437015.17189735686,"coefficients":{"x":-109731.44855050495}},{"state":{"x":1.7151716308585554},"intercept":381935.6904719714,"coefficients":{"x":-39606.7926532522}},{"state":{"x":0.0},"intercept":707091.9885754088,"coefficients":{"x":-352364.9585723134}},{"state":{"x":0.0},"intercept":708069.4338504731,"coefficients":{"x":-352364.9585845846}},{"state":{"x":0.0},"intercept":708378.958187585,"coefficients":{"x":-352364.95858466154}},{"state":{"x":0.3151718077241525},"intercept":602082.8305952749,"coefficients":{"x":-330158.8176626934}},{"state":{"x":1.0151717189738583},"intercept":437692.90914799704,"coefficients":{"x":-109725.4598545268}},{"state":{"x":1.7151716308503229},"intercept":382150.3072677721,"coefficients":{"x":-39604.89627144595}},{"state":{"x":0.31517180771427117},"intercept":602150.7325318167,"coefficients":{"x":-330158.21714359516}},{"state":{"x":1.0151717189639775},"intercept":437714.41142794647,"coefficients":{"x":-109725.2697045374}},{"state":{"x":1.715171630840442},"intercept":382157.11632313684,"coefficients":{"x":-39604.83605729375}},{"state":{"x":1.0151717245929275},"intercept":437714.4108103082,"coefficients":{"x":-109725.26970507845}},{"state":{"x":1.7151716364693919},"intercept":382157.11610790936,"coefficients":{"x":-39604.836063289185}},{"state":{"x":1.7151716366619294},"intercept":382157.1161002839,"coefficients":{"x":-39604.83606328919}},{"state":{"x":1.0151717320380724},"intercept":437714.4099942934,"coefficients":{"x":-109725.26980903423}},{"state":{"x":1.7151716439145375},"intercept":382157.1158132016,"coefficients":{"x":-39604.83607006052}},{"state":{"x":2.050120007254266},"intercept":368891.54077959346,"coefficients":{"x":-39604.835545203525}},{"state":{"x":1.7151716439145375},"intercept":382157.11581354134,"coefficients":{"x":-39604.8360502405}},{"state":{"x":2.05011999980912},"intercept":368891.5410747054,"coefficients":{"x":-39604.83566777353}},{"state":{"x":1.7151716364693914},"intercept":382157.116108092,"coefficients":{"x":-39604.836059883186}},{"state":{"x":1.715171636469392},"intercept":382157.116108092,"coefficients":{"x":-39604.836059883186}},{"state":{"x":1.7151716366658858},"intercept":382157.1161003099,"coefficients":{"x":-39604.836059883186}},{"state":{"x":3.0048451074053517},"intercept":353687.4103814716,"coefficients":{"x":-5808.501234965626}},{"state":{"x":5.669896740059619},"intercept":345464.1348005382,"coefficients":{"x":-3127.2304313499026}},{"state":{"x":5.334948369823053},"intercept":350442.61528422975,"coefficients":{"x":-2930.5792351424507}},{"state":{"x":2.382613567446231},"intercept":369621.42104894295,"coefficients":{"x":-14419.548210341427}},{"state":{"x":2.4398007083321422},"intercept":368816.9504648176,"coefficients":{"x":-14419.548125362211}},{"state":{"x":5.139800335457808},"intercept":354843.0188712851,"coefficients":{"x":-2806.0335170324474}},{"state":{"x":7.839799883628016},"intercept":351220.31864229596,"coefficients":{"x":-2665.7318187256506}},{"state":{"x":8.539798828832305},"intercept":353154.64745350496,"coefficients":{"x":-2532.4452223182607}},{"state":{"x":6.239798074674895},"intercept":362251.93528665625,"coefficients":{"x":-2405.823015610418}},{"state":{"x":6.9397975644983285},"intercept":363716.98623287503,"coefficients":{"x":-2285.531908690872}},{"state":{"x":9.639797195599103},"intercept":360804.36582378496,"coefficients":{"x":-2171.2553303366485}},{"state":{"x":7.339796822790145},"intercept":368604.1523184576,"coefficients":{"x":-2062.692597515912}},{"state":{"x":8.039796178424211},"intercept":369860.2505034268,"coefficients":{"x":-1959.5579902532425}},{"state":{"x":8.739795573682764},"intercept":371010.705805409,"coefficients":{"x":-1861.5801239277425}},{"state":{"x":9.439794725632556},"intercept":371951.76006161456,"coefficients":{"x":-1768.501145592536}},{"state":{"x":7.139794552780202},"intercept":377741.7039360395,"coefficients":{"x":-1680.0761498767063}},{"state":{"x":4.839793993074789},"intercept":382996.5783725663,"coefficients":{"x":-1697.3816320652304}},{"state":{"x":7.5397935682472035},"intercept":379858.783704872,"coefficients":{"x":-1612.5125871747157}},{"state":{"x":8.239792986754653},"intercept":379971.34582189587,"coefficients":{"x":-1531.8869869876828}},{"state":{"x":5.939792332282819},"intercept":384344.6505077289,"coefficients":{"x":-1603.5283230115456}},{"state":{"x":8.639791984516588},"intercept":381172.2883106789,"coefficients":{"x":-1523.3519307099568}},{"state":{"x":11.339791187124831},"intercept":378186.5959736722,"coefficients":{"x":-1447.184349700667}},{"state":{"x":11.004843541553612},"intercept":379549.35797008924,"coefficients":{"x":-1374.8251485611602}},{"state":{"x":10.669895767781565},"intercept":380660.6721099735,"coefficients":{"x":-1306.0839083768578}},{"state":{"x":10.334947898942149},"intercept":381542.2762267912,"coefficients":{"x":-1240.7797311940224}},{"state":{"x":1.4250979425068437},"intercept":418942.25079202896,"coefficients":{"x":-36109.92944954851}},{"state":{"x":0.025098108946302763},"intercept":719948.5152342843,"coefficients":{"x":-328734.8110774045}},{"state":{"x":0.7250980201935538},"intercept":501445.60638384835,"coefficients":{"x":-116167.50140400497}},{"state":{"x":1.4250979321020525},"intercept":429540.57089945296,"coefficients":{"x":-38369.70890469166}},{"state":{"x":0.72509798787327},"intercept":504425.1327319565,"coefficients":{"x":-116883.0983022717}},{"state":{"x":1.4250978997817687},"intercept":430665.59242840187,"coefficients":{"x":-38596.31458240223}},{"state":{"x":1.4250978997817687},"intercept":430683.8419021359,"coefficients":{"x":-38596.314585694614}},{"state":{"x":1.4250978981885558},"intercept":430684.2120144252,"coefficients":{"x":-38596.31458576909}},{"state":{"x":1.7600462754248252},"intercept":417756.4466212484,"coefficients":{"x":-38596.31442397807}},{"state":{"x":1.4250978988195058},"intercept":430684.21964894363,"coefficients":{"x":-38596.31458577064}},{"state":{"x":0.7250980306053558},"intercept":504749.64082859183,"coefficients":{"x":-116954.85677104325}},{"state":{"x":1.4250979425138535},"intercept":430786.98044146685,"coefficients":{"x":-38619.03809850157}},{"state":{"x":0.0},"intercept":739030.0076901517,"coefficients":{"x":-354652.37087301177}},{"state":{"x":0.36004645322143863},"intercept":623168.2810254502,"coefficients":{"x":-329529.3620918994}},{"state":{"x":1.0600463644890283},"intercept":469822.9223263139,"coefficients":{"x":-117213.65988576249}},{"state":{"x":1.760046276387089},"intercept":419198.1319072207,"coefficients":{"x":-38700.99226821251}},{"state":{"x":1.425097899781769},"intercept":432187.0592390869,"coefficients":{"x":-38700.99258119783}},{"state":{"x":1.4250979425068435},"intercept":432187.5851905142,"coefficients":{"x":-38700.99258130903}},{"state":{"x":0.025098114377070584},"intercept":735782.4153011278,"coefficients":{"x":-354734.3253067276}},{"state":{"x":0.7250980256243217},"intercept":510228.0959075583,"coefficients":{"x":-125221.18423139154}},{"state":{"x":1.4250979375328194},"intercept":432550.06947009836,"coefficients":{"x":-41236.70859696359}},{"state":{"x":4.66989675859105},"intercept":389411.6988150315,"coefficients":{"x":-1976.2469020019405}},{"state":{"x":7.334948380142432},"intercept":386353.78033283446,"coefficients":{"x":-1644.536743066966}},{"state":{"x":2.3996876152552122},"intercept":404810.7436780995,"coefficients":{"x":-13726.084459432013}},{"state":{"x":1.6996877015746403},"intercept":424737.74071966234,"coefficients":{"x":-42097.58574161577}},{"state":{"x":2.399687615255212},"intercept":406242.83480350795,"coefficients":{"x":-14914.23549942127}},{"state":{"x":1.6996877058363418},"intercept":424969.00880496635,"coefficients":{"x":-42473.83358608228}},{"state":{"x":2.3996876195169126},"intercept":406333.01951182686,"coefficients":{"x":-15033.380648195945}},{"state":{"x":2.734636021387657},"intercept":401638.51337554003,"coefficients":{"x":-6343.904057691865}},{"state":{"x":2.3996876195169126},"intercept":407641.28038122656,"coefficients":{"x":-16092.283611000435}},{"state":{"x":2.734636017125957},"intercept":403129.4158950782,"coefficients":{"x":-7738.126183430908}},{"state":{"x":2.399687615255213},"intercept":407687.3257080362,"coefficients":{"x":-16533.787160386222}},{"state":{"x":1.6996877581604466},"intercept":426409.617719063,"coefficients":{"x":-40518.06563342573}},{"state":{"x":2.3996876718410176},"intercept":408143.51830095146,"coefficients":{"x":-15914.460637854505}},{"state":{"x":3.339793705838115},"intercept":398897.17717503593,"coefficients":{"x":-6622.912372104978}},{"state":{"x":3.004845279861981},"intercept":401472.8277538199,"coefficients":{"x":-7770.16810265536}},{"state":{"x":5.669896856628672},"intercept":390953.9048599489,"coefficients":{"x":-2104.1033487698496}},{"state":{"x":5.334948428305068},"intercept":393539.01106811175,"coefficients":{"x":-2282.598809963035}}],"multi_cuts":[]}] \ No newline at end of file +[{"risk_set_cuts":[],"node":"1","single_cuts":[{"state":{"x":0.0},"intercept":243326.59328732066,"coefficients":{"x":-317616.6666712048}},{"state":{"x":0.0},"intercept":321380.8754755208,"coefficients":{"x":-318249.99997748446}},{"state":{"x":0.33494610065925506},"intercept":239887.3841453035,"coefficients":{"x":-318249.9999574604}},{"state":{"x":0.0},"intercept":354558.1883954335,"coefficients":{"x":-318250.0000165849}},{"state":{"x":2.6812138430446143e-9},"intercept":357155.1901772344,"coefficients":{"x":-318250.00002266077}},{"state":{"x":1.0048367303169852},"intercept":104149.44115704585,"coefficients":{"x":-101729.16675297216}},{"state":{"x":5.669892135225995},"intercept":17254.291381194027,"coefficients":{"x":-633.3333467438426}},{"state":{"x":5.3349460650066005},"intercept":35106.20650764328,"coefficients":{"x":-1034.4444730304883}},{"state":{"x":1.9809704167390831},"intercept":69858.25642494485,"coefficients":{"x":-33491.810216234095}},{"state":{"x":1.6460220549813644},"intercept":82179.35277291868,"coefficients":{"x":-33797.575767946604}},{"state":{"x":1.9809704169447855},"intercept":76724.14425434978,"coefficients":{"x":-12285.899049868674}},{"state":{"x":1.6460220551870675},"intercept":92015.42364257542,"coefficients":{"x":-37054.770730002434}},{"state":{"x":1.2809705124686894},"intercept":113456.55571800526,"coefficients":{"x":-105619.70135434106}},{"state":{"x":1.9809704244406223},"intercept":91446.6803845803,"coefficients":{"x":-38286.773386679946}},{"state":{"x":1.6460220626829039},"intercept":104270.77243264552,"coefficients":{"x":-38286.77347935933}},{"state":{"x":0.9460221435449663},"intercept":151682.4895223297,"coefficients":{"x":-116793.84617774215}},{"state":{"x":1.6460220555174825},"intercept":105172.88862157828,"coefficients":{"x":-41825.25266027065}},{"state":{"x":1.2809705049648241},"intercept":120441.26195678097,"coefficients":{"x":-41825.25302902434}},{"state":{"x":1.9809704169367566},"intercept":93658.50409095317,"coefficients":{"x":-18085.198209709324}},{"state":{"x":1.6460220551790388},"intercept":108763.17175459271,"coefficients":{"x":-43661.6973356451}},{"state":{"x":1.3000000880281017},"intercept":119645.34483646575,"coefficients":{"x":-41825.252867727184}},{"state":{"x":0.0},"intercept":415304.8258753429,"coefficients":{"x":-334383.40556602017}},{"state":{"x":0.24602223943067753},"intercept":351453.12270556454,"coefficients":{"x":-334383.4055266088}},{"state":{"x":0.9460221507103888},"intercept":175072.10715549017,"coefficients":{"x":-123604.81718493038}},{"state":{"x":1.646022062682904},"intercept":112579.60083524587,"coefficients":{"x":-43982.06018701095}},{"state":{"x":0.9460221430088487},"intercept":176219.7751278543,"coefficients":{"x":-123706.26544265742}},{"state":{"x":1.6460220549813644},"intercept":112943.02906374943,"coefficients":{"x":-44014.1854738693}},{"state":{"x":1.9809704292363628},"intercept":98200.54921210898,"coefficients":{"x":-44014.1853271646}},{"state":{"x":1.6460220674786443},"intercept":112943.02851381642,"coefficients":{"x":-44014.18548229417}},{"state":{"x":0.24602223169633125},"intercept":358619.0250450039,"coefficients":{"x":-334495.0267840191}},{"state":{"x":0.9460221429760423},"intercept":178597.95842843936,"coefficients":{"x":-123751.78515354462}},{"state":{"x":1.646022054948558},"intercept":113696.12044254085,"coefficients":{"x":-44028.60006315346}},{"state":{"x":1.646022054948558},"intercept":113696.12044242029,"coefficients":{"x":-44028.600056070085}},{"state":{"x":1.6460220549485578},"intercept":113696.1204425578,"coefficients":{"x":-44028.600067040694}},{"state":{"x":1.6460220549813642},"intercept":113696.1204411134,"coefficients":{"x":-44028.6000670407}},{"state":{"x":1.980970416706276},"intercept":98948.81300252912,"coefficients":{"x":-44028.59985552081}},{"state":{"x":1.6460220549485578},"intercept":113696.12044255785,"coefficients":{"x":-44028.6000670407}},{"state":{"x":1.6460220679395579},"intercept":113696.1198705823,"coefficients":{"x":-44028.60006704068}},{"state":{"x":0.0},"intercept":445286.35871173296,"coefficients":{"x":-359745.266438794}},{"state":{"x":0.0},"intercept":446671.4970090058,"coefficients":{"x":-359745.2664530434}},{"state":{"x":0.27958706321563326},"intercept":351732.2011047319,"coefficients":{"x":-334499.5913951057}},{"state":{"x":0.9795869744953336},"intercept":176054.31190161692,"coefficients":{"x":-123757.79523041315}},{"state":{"x":1.6795868864677672},"intercept":112728.16064389031,"coefficients":{"x":-44030.50327271501}},{"state":{"x":4.344638554029036},"intercept":63107.09429365846,"coefficients":{"x":-5473.86806450855}},{"state":{"x":7.0096901892406525},"intercept":63638.87723376828,"coefficients":{"x":-5200.174676720077}},{"state":{"x":6.674741824135665},"intercept":80082.18644710619,"coefficients":{"x":-4940.165960191955}},{"state":{"x":6.3397934590773914},"intercept":96024.04206022434,"coefficients":{"x":-4693.157680709073}},{"state":{"x":6.004845094313728},"intercept":111473.48115746629,"coefficients":{"x":-4458.499811882806}},{"state":{"x":5.669896729588688},"intercept":126439.89075666116,"coefficients":{"x":-4235.57483750434}},{"state":{"x":5.334948364896324},"intercept":140932.95020864933,"coefficients":{"x":-4023.7961128855613}},{"state":{"x":39.99476805528168},"intercept":23382.919732051232,"coefficients":{"x":-2857.535002808005}},{"state":{"x":40.694768015324556},"intercept":41648.378237550496,"coefficients":{"x":-2714.6582536953565}},{"state":{"x":40.359821914946565},"intercept":61714.856747750935,"coefficients":{"x":-2578.925343250019}},{"state":{"x":39.659821954620526},"intercept":81581.9153136019,"coefficients":{"x":-2449.9790781444144}},{"state":{"x":40.359821914991166},"intercept":97066.48342449844,"coefficients":{"x":-2327.480126445027}},{"state":{"x":39.29476809298417},"intercept":115718.3356885525,"coefficients":{"x":-2211.106121937327}},{"state":{"x":39.99476805399842},"intercept":129571.7258867478,"coefficients":{"x":-2100.5508176859703}},{"state":{"x":40.6947680140413},"intercept":142767.4557590694,"coefficients":{"x":-1995.5232786604702}},{"state":{"x":40.35982191366332},"intercept":157290.29663778277,"coefficients":{"x":-1895.747115457241}},{"state":{"x":40.69476801532456},"intercept":169627.7805945736,"coefficients":{"x":-1800.9597618549767}},{"state":{"x":40.35982191494657},"intercept":182314.711508671,"coefficients":{"x":-1710.9117757985134}},{"state":{"x":39.65982195244316},"intercept":194732.52071017423,"coefficients":{"x":-1625.366189090054}},{"state":{"x":40.35982191281379},"intercept":204121.3199305698,"coefficients":{"x":-1544.0978816804964}},{"state":{"x":40.359821930798816},"intercept":213941.52756125966,"coefficients":{"x":-1466.8929896401887}},{"state":{"x":38.95982200911018},"intercept":225050.67335002864,"coefficients":{"x":-1393.5483422738764}},{"state":{"x":39.65982197024532},"intercept":232564.21658477257,"coefficients":{"x":-1323.8709272335614}},{"state":{"x":40.35982193061596},"intercept":239594.07344099478,"coefficients":{"x":-1257.6773829034064}},{"state":{"x":39.294768103395604},"intercept":248278.7035142014,"coefficients":{"x":-1194.793515840286}},{"state":{"x":39.994768064409854},"intercept":254322.7489470407,"coefficients":{"x":-1135.0538420870332}},{"state":{"x":40.694768024452735},"intercept":259971.98770602804,"coefficients":{"x":-1078.3011519795575}},{"state":{"x":40.35982192407475},"intercept":266310.9746966137,"coefficients":{"x":-1024.386096388092}},{"state":{"x":40.00000038710287},"intercept":272220.63559684245,"coefficients":{"x":-973.1667935882383}},{"state":{"x":35.70000034712644},"intercept":281346.57593558606,"coefficients":{"x":-924.5084561728047}},{"state":{"x":36.4000003087112},"intercept":285318.25012157316,"coefficients":{"x":-878.2830355762336}},{"state":{"x":32.10000027003591},"intercept":293191.5294958454,"coefficients":{"x":-834.3688863145405}},{"state":{"x":27.800000231434023},"intercept":300394.4799474435,"coefficients":{"x":-792.650444928704}},{"state":{"x":23.500000192810077},"intercept":306974.45117240347,"coefficients":{"x":-753.0179261984649}},{"state":{"x":19.200000154191393},"intercept":312975.733721107,"coefficients":{"x":-715.367034307696}},{"state":{"x":19.900000115636352},"intercept":315041.75312585,"coefficients":{"x":-679.5986868235544}},{"state":{"x":15.600000077054567},"intercept":320177.1196471668,"coefficients":{"x":-645.6187581305956}},{"state":{"x":14.300000038522658},"intercept":323001.6263337289,"coefficients":{"x":-613.3378265147595}},{"state":{"x":0.339881032623446},"intercept":487068.9869045248,"coefficients":{"x":-317810.89032958035}},{"state":{"x":0.0},"intercept":642673.040169955,"coefficients":{"x":-318249.9999834007}},{"state":{"x":0.0},"intercept":657919.6291784071,"coefficients":{"x":-318249.9999986452}},{"state":{"x":0.3398810308957508},"intercept":554637.9322542254,"coefficients":{"x":-318249.9999579049}},{"state":{"x":3.0048449780152517},"intercept":333125.9718328692,"coefficients":{"x":-1777.5569654253748}},{"state":{"x":5.669896653127581},"intercept":332607.8335907602,"coefficients":{"x":-1390.4496499271074}},{"state":{"x":5.334948326286492},"intercept":336172.84713732003,"coefficients":{"x":-1513.9514246804406}},{"state":{"x":1.0057439853860277},"intercept":417635.4832503227,"coefficients":{"x":-102208.58465279969}},{"state":{"x":0.9795873403452164},"intercept":420725.96326312143,"coefficients":{"x":-102362.49998827364}},{"state":{"x":1.6795872516241395},"intercept":371177.99557243445,"coefficients":{"x":-33844.20967157056}},{"state":{"x":1.3446388457346277},"intercept":383883.8873662794,"coefficients":{"x":-102362.4993299617}},{"state":{"x":4.0096904400043805},"intercept":342327.0990298809,"coefficients":{"x":-2062.7513016675584}},{"state":{"x":3.674742033389473},"intercept":347358.8664608976,"coefficients":{"x":-2256.409182903229}},{"state":{"x":3.339793626841263},"intercept":352223.2283167776,"coefficients":{"x":-2379.059199800999}},{"state":{"x":3.004845220371589},"intercept":356901.3683763171,"coefficients":{"x":-2456.7375991871286}},{"state":{"x":7.669896813995722},"intercept":349363.786778978,"coefficients":{"x":-2333.9007345585815}},{"state":{"x":7.334948406984083},"intercept":353790.7128752026,"coefficients":{"x":-2217.2057145538306}},{"state":{"x":11.999999471314},"intercept":347386.7854103747,"coefficients":{"x":-2106.3454365643806}},{"state":{"x":9.699999522637814},"intercept":355693.0116546663,"coefficients":{"x":-2001.0281759050224}},{"state":{"x":10.399999575699052},"intercept":357614.1925360331,"coefficients":{"x":-1900.9767759455638}},{"state":{"x":13.09999962861968},"intercept":355674.2850165315,"coefficients":{"x":-1805.9279444192912}},{"state":{"x":15.799999681328512},"intercept":353864.62739312765,"coefficients":{"x":-1715.6315523944975}},{"state":{"x":13.499999730749906},"intercept":360326.2943227255,"coefficients":{"x":-1629.8499821750981}},{"state":{"x":11.199999786149293},"intercept":366087.4276375979,"coefficients":{"x":-1548.3574927459215}},{"state":{"x":8.8999998409603},"intercept":371201.926552878,"coefficients":{"x":-1470.9396325124528}},{"state":{"x":6.599999894250447},"intercept":375720.0516982376,"coefficients":{"x":-1397.392686964547}},{"state":{"x":9.299999946830091},"intercept":373051.0390323821,"coefficients":{"x":-1327.5230657849922}},{"state":{"x":1.3473302461297312},"intercept":409121.99331624946,"coefficients":{"x":-102149.54782510176}},{"state":{"x":0.6473303406984046},"intercept":492263.01708258665,"coefficients":{"x":-318249.9998552065}},{"state":{"x":1.3473302519755095},"intercept":420488.44434173574,"coefficients":{"x":-102149.54889225427}},{"state":{"x":5.334948387667381},"intercept":379167.6876637666,"coefficients":{"x":-1474.097958355838}},{"state":{"x":1.6993785241704116},"intercept":409447.28143870714,"coefficients":{"x":-33764.154874227585}},{"state":{"x":1.699378523505987},"intercept":409892.9470806135,"coefficients":{"x":-33930.69046192327}},{"state":{"x":2.0343269492582894},"intercept":398680.7795216649,"coefficients":{"x":-12328.052805747753}},{"state":{"x":1.6993785492547173},"intercept":411621.6451450907,"coefficients":{"x":-36884.573832618684}},{"state":{"x":0.9993786372866662},"intercept":461364.01871737896,"coefficients":{"x":-105633.0498932618}},{"state":{"x":1.6993785492547167},"intercept":413310.2618786633,"coefficients":{"x":-37987.682485182486}},{"state":{"x":0.9993786119243647},"intercept":461364.0213964857,"coefficients":{"x":-105633.04989921045}},{"state":{"x":1.6993785238924157},"intercept":413310.2628499922,"coefficients":{"x":-37987.682489400526}},{"state":{"x":1.3343270122064812},"intercept":427177.72377975594,"coefficients":{"x":-37987.68301808363}},{"state":{"x":2.034326924173984},"intercept":400964.868472619,"coefficients":{"x":-16566.650251025887}},{"state":{"x":1.699378524170413},"intercept":413310.26283583743,"coefficients":{"x":-37987.68247470157}},{"state":{"x":1.6993785191072073},"intercept":413310.263032208,"coefficients":{"x":-37987.682459527095}},{"state":{"x":2.369275324671299},"intercept":396259.0117555039,"coefficients":{"x":-13612.766141667373}},{"state":{"x":2.0343269237622557},"intercept":401118.510175433,"coefficients":{"x":-16973.4761741962}},{"state":{"x":1.6993785237586847},"intercept":413600.170231026,"coefficients":{"x":-38394.508340029584}},{"state":{"x":2.669896802781309},"intercept":392245.30315810954,"coefficients":{"x":-13741.593987616112}},{"state":{"x":5.334948401829856},"intercept":380910.165675386,"coefficients":{"x":-2050.1310232421033}},{"state":{"x":4.772094358858579},"intercept":384345.3764232275,"coefficients":{"x":-2248.4163362530912}},{"state":{"x":2.472095179515127},"intercept":398162.89266590076,"coefficients":{"x":-13820.259448848421}},{"state":{"x":3.172095098759223},"intercept":393060.169633855,"coefficients":{"x":-6038.414016812903}},{"state":{"x":3.172095096718453},"intercept":393418.6161279571,"coefficients":{"x":-7000.578157824312}},{"state":{"x":4.0720933469199885},"intercept":389928.78057389834,"coefficients":{"x":-3878.8482279779278}},{"state":{"x":4.772094332162709},"intercept":388709.5511476443,"coefficients":{"x":-3168.6024112330592}},{"state":{"x":5.472095152819264},"intercept":388303.56323235587,"coefficients":{"x":-2956.7815816466086}},{"state":{"x":3.172095100804358},"intercept":397112.25508611393,"coefficients":{"x":-4103.164269345684}},{"state":{"x":5.47209515282811},"intercept":390342.33113372227,"coefficients":{"x":-3185.649415772464}},{"state":{"x":3.1720951008132046},"intercept":398909.26011100656,"coefficients":{"x":-3316.913295498505}},{"state":{"x":4.772094360818698},"intercept":394814.83786425355,"coefficients":{"x":-3151.067660886331}},{"state":{"x":2.4720951814752454},"intercept":405180.3261729064,"coefficients":{"x":-14153.93711531144}},{"state":{"x":3.1720951007193405},"intercept":402001.1781964223,"coefficients":{"x":-6477.756293703705}},{"state":{"x":2.4720951815602543},"intercept":406582.6702723629,"coefficients":{"x":-7531.207490495066}},{"state":{"x":3.1720951008043494},"intercept":402445.2538429714,"coefficients":{"x":-4380.558396460619}},{"state":{"x":5.472095150774125},"intercept":394325.53355611104,"coefficients":{"x":-3382.8530405591046}},{"state":{"x":3.1720950987592196},"intercept":403458.93732136354,"coefficients":{"x":-4527.355764529125}},{"state":{"x":3.1720950975829467},"intercept":403473.3189536261,"coefficients":{"x":-3576.1365431923014}},{"state":{"x":5.199999641159109},"intercept":396900.71117928496,"coefficients":{"x":-3397.3297385555297}},{"state":{"x":5.899999589360851},"intercept":395564.41862720344,"coefficients":{"x":-3227.4632861545324}},{"state":{"x":6.599999539868806},"intercept":394671.4780027833,"coefficients":{"x":-3066.090150615348}},{"state":{"x":7.299999495029196},"intercept":394180.89485359826,"coefficients":{"x":-2912.7856600871105}},{"state":{"x":7.49959796774602},"intercept":395273.64273037773,"coefficients":{"x":-2767.146403371604}},{"state":{"x":8.199597672299491},"intercept":394973.188818968,"coefficients":{"x":-2628.78910616915}},{"state":{"x":10.899597376448638},"intercept":389736.87141512864,"coefficients":{"x":-2497.3496656524885}},{"state":{"x":11.599597111119127},"intercept":389798.69157103007,"coefficients":{"x":-2372.4821961296784}},{"state":{"x":9.299596849956522},"intercept":396658.53630911146,"coefficients":{"x":-2253.8581054556325}},{"state":{"x":6.999596553416902},"intercept":402874.87449778337,"coefficients":{"x":-2141.1652301776376}},{"state":{"x":4.699596258520763},"intercept":408494.9071415249,"coefficients":{"x":-2034.1070612989515}},{"state":{"x":5.399595966714386},"intercept":407765.5189911429,"coefficients":{"x":-1932.4017520642974}},{"state":{"x":6.099595674569965},"intercept":406986.3880035952,"coefficients":{"x":-1857.187744522982}},{"state":{"x":3.7995953616569276},"intercept":411778.04712568887,"coefficients":{"x":-2171.442774647716}},{"state":{"x":1.4995950771972564},"intercept":438107.8184744797,"coefficients":{"x":-35088.08936001597}},{"state":{"x":2.199594990883535},"intercept":423036.65793674556,"coefficients":{"x":-12748.85186403154}},{"state":{"x":4.4995952824796674},"intercept":411212.06916245166,"coefficients":{"x":-2270.9568977006043}},{"state":{"x":2.1995949916489876},"intercept":423872.4085335143,"coefficients":{"x":-12780.364668597538}},{"state":{"x":3.400001957787866},"intercept":415353.6037889313,"coefficients":{"x":-5716.251755850247}},{"state":{"x":6.100001705615746},"intercept":408514.05211616924,"coefficients":{"x":-2302.4696815146326}},{"state":{"x":8.800001392700942},"intercept":403474.1738725385,"coefficients":{"x":-2091.5641482528704}},{"state":{"x":11.500001096393188},"intercept":399003.6557557058,"coefficients":{"x":-1986.9859500834896}},{"state":{"x":9.200001055020557},"intercept":404402.366637071,"coefficients":{"x":-1887.6366763243032}},{"state":{"x":11.90000075852391},"intercept":400127.7183135035,"coefficients":{"x":-1793.2548661496708}},{"state":{"x":14.60000051059244},"intercept":396099.82344144874,"coefficients":{"x":-1703.5921396168128}},{"state":{"x":12.300000214351932},"intercept":400396.7561217685,"coefficients":{"x":-1618.4125618477592}}],"multi_cuts":[]}] \ No newline at end of file diff --git a/dev/tutorial/decision_hazard/index.html b/dev/tutorial/decision_hazard/index.html index 89b21c9f5..452c444ec 100644 --- a/dev/tutorial/decision_hazard/index.html +++ b/dev/tutorial/decision_hazard/index.html @@ -70,4 +70,4 @@ end end -train_and_compute_cost(decision_hazard_2)
      Cost = $410.0

      Now we find that the cost of choosing the thermal generation before observing the inflow adds a much more reasonable cost of $10.

      Summary

      To summarize, the difference between here-and-now and wait-and-see variables is a modeling choice.

      To create a here-and-now decision, add it as a state variable to the previous stage

      In some cases, you'll need to add an additional "first-stage" problem to enable the model to choose an optimal value for the here-and-now decision variable. You do not need to do this if the first stage is deterministic. You must make sure that the subproblem is feasible for all possible incoming values of the here-and-now decision variable.


      This page was generated using Literate.jl.

      +train_and_compute_cost(decision_hazard_2)
      Cost = $410.0

      Now we find that the cost of choosing the thermal generation before observing the inflow adds a much more reasonable cost of $10.

      Summary

      To summarize, the difference between here-and-now and wait-and-see variables is a modeling choice.

      To create a here-and-now decision, add it as a state variable to the previous stage

      In some cases, you'll need to add an additional "first-stage" problem to enable the model to choose an optimal value for the here-and-now decision variable. You do not need to do this if the first stage is deterministic. You must make sure that the subproblem is feasible for all possible incoming values of the here-and-now decision variable.


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/example_milk_producer/index.html b/dev/tutorial/example_milk_producer/index.html index 85e2ff685..46f962a08 100644 --- a/dev/tutorial/example_milk_producer/index.html +++ b/dev/tutorial/example_milk_producer/index.html @@ -14,18 +14,18 @@ end simulator()
      12-element Vector{Float64}:
      - 4.273193758180922
      - 4.433686472508527
      - 4.746264095414407
      - 4.22102456166003
      - 3.883209354209702
      - 3.7147702786619523
      - 3.842637468368145
      - 3.9681857345802505
      - 3.887559255947527
      - 4.18907697596713
      - 4.137717759937826
      - 4.444756670062763

      It may be helpful to visualize a number of simulations of the price process:

      plot = Plots.plot(
      + 4.877152280220573
      + 5.079543517740338
      + 5.400806543689856
      + 5.724807135228363
      + 6.528364099349721
      + 6.63153661978813
      + 6.257825066296184
      + 6.436783959062588
      + 7.297381258609721
      + 7.226304331049737
      + 7.460551462104687
      + 7.6067487051031755

      It may be helpful to visualize a number of simulations of the price process:

      plot = Plots.plot(
           [simulator() for _ in 1:500];
           color = "gray",
           opacity = 0.2,
      @@ -37,546 +37,546 @@
       )
      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      The prices gradually revert to the mean of $6/kg, and there is high volatility.

      We can't incorporate this price process directly into SDDP.jl, but we can fit a SDDP.MarkovianGraph directly from the simulator:

      graph = SDDP.MarkovianGraph(simulator; budget = 60, scenarios = 10_000);

      Here budget is the number of nodes in the policy graph, and scenarios is the number of simulations to use when estimating the transition probabilities.

      The graph contains too many nodes to be show, but we can plot it:

      for ((t, price), edges) in graph.nodes
           for ((t′, price′), probability) in edges
      @@ -593,873 +593,870 @@
       plot
      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      That looks okay. Try changing budget and scenarios to see how different Markovian policy graphs can be created.

      Model

      Now that we have a Markovian graph, we can build the model. See if you can work out how we arrived at this formulation by reading the background description. Do all the variables and constraints make sense?

      model = SDDP.PolicyGraph(
           graph;
      @@ -1514,7 +1511,7 @@
               )
           end
       end
      A policy graph with 60 nodes.
      - Node indices: (1, 4.585480195979982), ..., (12, 7.835650525845394)
      + Node indices: (1, 4.57529091712132), ..., (12, 7.889755827663258)
       

      Training a policy

      Now we have a model, we train a policy. The SDDP.SimulatorSamplingScheme is used in the forward pass. It generates an out-of-sample sequence of prices using simulator and traverses the closest sequence of nodes in the policy graph. When calling SDDP.parameterize for each subproblem, it uses the new out-of-sample price instead of the price associated with the Markov node.

      SDDP.train(
           model;
           time_limit = 20,
      @@ -1526,7 +1523,7 @@
       problem
         nodes           : 60
         state variables : 9
      -  scenarios       : 1.97983e+14
      +  scenarios       : 4.73116e+14
         existing cuts   : false
       options
         solver          : serial mode
      @@ -1546,32 +1543,32 @@
       -------------------------------------------------------------------
        iteration    simulation      bound        time (s)     solves  pid
       -------------------------------------------------------------------
      -         1  -1.083408e+03  4.999236e+02  2.646229e-01       227   1
      -        16   5.464957e+00  8.526075e+00  1.279864e+00      4642   1
      -        34   8.729848e+00  6.782521e+00  2.317225e+00      8693   1
      -        50   9.343050e+00  6.555701e+00  3.351536e+00     12335   1
      -        65   9.810967e+00  6.455388e+00  4.381311e+00     15690   1
      -        81   8.301378e+00  6.384775e+00  5.435284e+00     19182   1
      -        91   9.613478e+00  6.351201e+00  6.449486e+00     22632   1
      -       106   7.451472e+00  6.310884e+00  7.453968e+00     25812   1
      -       120   6.109025e+00  6.248287e+00  8.468015e+00     28885   1
      -       134   9.483938e+00  6.244512e+00  9.493798e+00     31968   1
      -       193   8.369255e+00  6.216856e+00  1.459561e+01     45891   1
      -       248   1.012527e+01  6.201280e+00  1.960202e+01     57846   1
      -       253   8.853247e+00  6.196918e+00  2.007769e+01     58951   1
      +         1  -8.247974e+02  5.824634e+02  2.382989e-01       252   1
      +        24   7.629740e+00  8.046865e+00  1.239766e+00      6798   1
      +        50   8.398914e+00  6.631298e+00  2.276382e+00     12715   1
      +        74   9.910696e+00  6.365864e+00  3.318086e+00     18238   1
      +        95   9.460788e+00  6.315605e+00  4.319570e+00     22920   1
      +       110   7.639985e+00  6.309557e+00  5.335199e+00     27665   1
      +       129   8.615365e+00  6.273265e+00  6.364477e+00     32128   1
      +       148   7.973974e+00  6.272666e+00  7.416542e+00     36421   1
      +       165   7.957840e+00  6.266964e+00  8.425059e+00     40470   1
      +       181   9.355880e+00  6.260511e+00  9.426192e+00     44232   1
      +       251   9.734387e+00  6.245928e+00  1.445950e+01     61932   1
      +       313   8.952395e+00  6.240952e+00  1.946521e+01     77751   1
      +       320   7.669271e+00  6.240801e+00  2.001882e+01     79390   1
       -------------------------------------------------------------------
       status         : time_limit
      -total time (s) : 2.007769e+01
      -total solves   : 58951
      -best bound     :  6.196918e+00
      -simulation ci  : -3.164338e+01 ± 6.122435e+01
      +total time (s) : 2.001882e+01
      +total solves   : 79390
      +best bound     :  6.240801e+00
      +simulation ci  : -3.262793e+01 ± 6.778629e+01
       numeric issues : 0
       -------------------------------------------------------------------
      Warning

      We're intentionally terminating the training early so that the documentation doesn't take too long to build. If you run this example locally, increase the time limit.

      Simulating the policy

      When simulating the policy, we can also use the SDDP.SimulatorSamplingScheme.

      simulations = SDDP.simulate(
           model,
           200,
           Symbol[:x_forward, :x_stock, :u_spot_sell, :u_spot_buy];
           sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
      -);

      To show how the sampling scheme uses the new out-of-sample price instead of the price associated with the Markov node, compare the index of the Markov state visited in stage 12 of the first simulation:

      simulations[1][12][:node_index]
      (12, 5.723626805703308)

      to the realization of the noise (price, ω) passed to SDDP.parameterize:

      simulations[1][12][:noise_term]
      (5.792123234793774, (production = 0.15,))

      Visualizing the policy

      Finally, we can plot the policy to gain insight (although note that we terminated the training early, so we should run the re-train the policy for more iterations before making too many judgements).

      Plots.plot(
      +);

      To show how the sampling scheme uses the new out-of-sample price instead of the price associated with the Markov node, compare the index of the Markov state visited in stage 12 of the first simulation:

      simulations[1][12][:node_index]
      (12, 7.889755827663258)

      to the realization of the noise (price, ω) passed to SDDP.parameterize:

      simulations[1][12][:noise_term]
      (7.753879305610619, (production = 0.1,))

      Visualizing the policy

      Finally, we can plot the policy to gain insight (although note that we terminated the training early, so we should run the re-train the policy for more iterations before making too many judgements).

      Plots.plot(
           SDDP.publication_plot(simulations; title = "sum(x_stock.out)") do data
               return sum(y.out for y in data[:x_stock])
           end,
      @@ -1588,147 +1585,145 @@
       )
      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + -

      This page was generated using Literate.jl.

      +

      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/example_newsvendor/index.html b/dev/tutorial/example_newsvendor/index.html index 314b56063..8aeebd276 100644 --- a/dev/tutorial/example_newsvendor/index.html +++ b/dev/tutorial/example_newsvendor/index.html @@ -2,16 +2,16 @@ Example: Two-stage Newsvendor · SDDP.jl

      Example: Two-stage Newsvendor

      This example is based on the classical newsvendor problem.

      using SDDP
       import HiGHS
       import Plots

      First, we need some discretized distribution of demand. For simplicity, we're going to sample 10 points from the uniform [0, 1) distribution three times and add them together. This is a rough approximation of a normal distribution.

      Ω = rand(10) .+ rand(10) .+ rand(10)
      10-element Vector{Float64}:
      - 1.1161248325957254
      - 1.5187654318250348
      - 1.2933554108874876
      - 1.0093503221189701
      - 1.790517367219246
      - 2.017602205750709
      - 2.0719819599876317
      - 1.4196518358122963
      - 1.6226579288974445
      - 2.472349140734798

      We also need some price data. We assume the agent can buy a newspaper for $1 and sell it for $1.20.

      buy_price, sales_price = 1.0, 1.2
      (1.0, 1.2)

      Now we can formulate and train a policy for the two-stage newsvendor problem:

      model = SDDP.LinearPolicyGraph(
      + 1.086034027079558
      + 1.587532071598321
      + 2.0532015328990445
      + 1.4999427131413632
      + 1.2094754777815795
      + 2.4604896375111007
      + 2.182070881220729
      + 1.6185298469706542
      + 0.4736404095780452
      + 1.8632222153948566

      We also need some price data. We assume the agent can buy a newspaper for $1 and sell it for $1.20.

      buy_price, sales_price = 1.0, 1.2
      (1.0, 1.2)

      Now we can formulate and train a policy for the two-stage newsvendor problem:

      model = SDDP.LinearPolicyGraph(
           stages = 2,
           sense = :Max,
           upper_bound = maximum(Ω) * sales_price,
      @@ -53,25 +53,25 @@
       numerical stability report
         matrix range     [1e+00, 1e+00]
         objective range  [1e+00, 1e+00]
      -  bounds range     [1e+00, 3e+00]
      +  bounds range     [5e-01, 3e+00]
         rhs range        [0e+00, 0e+00]
       -------------------------------------------------------------------
        iteration    simulation      bound        time (s)     solves  pid
       -------------------------------------------------------------------
      -         1   0.000000e+00  4.944698e-01  3.010988e-03        13   1
      -        40   2.232250e-01  2.104120e-01  7.277203e-02       560   1
      +         1   0.000000e+00  4.920979e-01  2.429008e-03        13   1
      +        40   2.172068e-01  1.437196e-01  4.517603e-02       560   1
       -------------------------------------------------------------------
       status         : simulation_stopping
      -total time (s) : 7.277203e-02
      +total time (s) : 4.517603e-02
       total solves   : 560
      -best bound     :  2.104120e-01
      -simulation ci  :  2.046787e-01 ± 2.065867e-02
      +best bound     :  1.437196e-01
      +simulation ci  :  1.233057e-01 ± 8.897681e-02
       numeric issues : 0
       -------------------------------------------------------------------

      To check the first-stage buy decision, we need to obtain a decision rule for the first-stage node 1:

      first_stage_rule = SDDP.DecisionRule(model, node = 1)
      A decision rule for node 1

      Then we can evaluate it, passing in a starting point for the incoming state:

      solution = SDDP.evaluate(
           first_stage_rule;
           incoming_state = Dict(:inventory => 0.0),
           controls_to_record = [:buy],
      -)
      (stage_objective = -1.1161248325957265, outgoing_state = Dict(:inventory => 1.1161248325957265), controls = Dict(:buy => 1.1161248325957265))

      The optimal value of the buy variable is stored here:

      solution.controls[:buy]
      1.1161248325957265

      Introducing risk

      The solution of a single newsvendor problem offers little insight about how a decision-maker should act. In particular, they may be averse to bad outcomes, such as when they purchase a larger number of newspapers only for there to be little demand.

      We can explore how the optimal decision changes with risk by creating a function:

      function solve_risk_averse_newsvendor(Ω, risk_measure)
      +)
      (stage_objective = -1.0860340270795585, outgoing_state = Dict(:inventory => 1.0860340270795585), controls = Dict(:buy => 1.0860340270795585))

      The optimal value of the buy variable is stored here:

      solution.controls[:buy]
      1.0860340270795585

      Introducing risk

      The solution of a single newsvendor problem offers little insight about how a decision-maker should act. In particular, they may be averse to bad outcomes, such as when they purchase a larger number of newspapers only for there to be little demand.

      We can explore how the optimal decision changes with risk by creating a function:

      function solve_risk_averse_newsvendor(Ω, risk_measure)
           model = SDDP.LinearPolicyGraph(
               stages = 2,
               sense = :Max,
      @@ -100,7 +100,7 @@
               controls_to_record = [:buy],
           )
           return solution.controls[:buy]
      -end
      solve_risk_averse_newsvendor (generic function with 1 method)

      Now we can see how many units a risk-neutral decision maker would order:

      solve_risk_averse_newsvendor(Ω, SDDP.Expectation())
      1.1161248325957265

      as well as a decision-maker who cares only about the worst-case outcome:

      solve_risk_averse_newsvendor(Ω, SDDP.WorstCase())
      1.0093503221189701

      In general, the decision-maker will be somewhere between the two extremes. The SDDP.Entropic risk measure is a risk measure that has a single parameter that lets us explore the space of policies between the two extremes. When the parameter is small, the measure acts like SDDP.Expectation, and when it is large, it acts like SDDP.WorstCase.

      Here is what we get if we solve our problem multiple times for different values of the risk aversion parameter $\gamma$:

      Γ = [10^i for i in -3:0.2:3]
      +end
      solve_risk_averse_newsvendor (generic function with 1 method)

      Now we can see how many units a risk-neutral decision maker would order:

      solve_risk_averse_newsvendor(Ω, SDDP.Expectation())
      1.0860340270795585

      as well as a decision-maker who cares only about the worst-case outcome:

      solve_risk_averse_newsvendor(Ω, SDDP.WorstCase())
      0.4736404095780452

      In general, the decision-maker will be somewhere between the two extremes. The SDDP.Entropic risk measure is a risk measure that has a single parameter that lets us explore the space of policies between the two extremes. When the parameter is small, the measure acts like SDDP.Expectation, and when it is large, it acts like SDDP.WorstCase.

      Here is what we get if we solve our problem multiple times for different values of the risk aversion parameter $\gamma$:

      Γ = [10^i for i in -3:0.2:3]
       buy = [solve_risk_averse_newsvendor(Ω, SDDP.Entropic(γ)) for γ in Γ]
       Plots.plot(
           Γ,
      @@ -112,40 +112,44 @@
       )
      - + - + - + - + - - + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + -

      Things to try

      There are a number of things you can try next:

      • Experiment with different buy and sales prices
      • Experiment with different distributions of demand
      • Explore how the optimal policy changes if you use a different risk measure
      • What happens if you can only buy and sell integer numbers of newspapers? Try this by adding Int to the variable definitions: @variable(subproblem, buy >= 0, Int)

      This page was generated using Literate.jl.

      +

      Things to try

      There are a number of things you can try next:


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/example_reservoir/index.html b/dev/tutorial/example_reservoir/index.html index a6c4935d1..983544368 100644 --- a/dev/tutorial/example_reservoir/index.html +++ b/dev/tutorial/example_reservoir/index.html @@ -14,111 +14,111 @@ ) - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + +

      The number of weeks will be useful later:

      T = size(data, 1)
      52

      Deterministic JuMP model

      To start, we construct a deterministic model in pure JuMP.

      Create a JuMP model, using HiGHS as the optimizer:

      model = Model(HiGHS.Optimizer)
       set_silent(model)

      x[t]: the amount of water in the reservoir at the start of stage t:

      reservoir_max = 320.0
      @@ -284,7 +284,7 @@
         Dual objective value : 6.82910e+02
       
       * Work counters
      -  Solve time (sec)   : 1.00994e-03
      +  Solve time (sec)   : 1.04022e-03
         Simplex iterations : 53
         Barrier iterations : 0
         Node count         : -1
      @@ -293,101 +293,101 @@
       Plots.plot!(value.(u), label = "Hydro")
      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      And here's the storage over time:

      Plots.plot(value.(x); label = "Storage", xlabel = "Week")
      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Deterministic SDDP model

      For the next step, we show how to decompose our JuMP model into SDDP.jl. It should obtain the same solution!

      model = SDDP.LinearPolicyGraph(
           stages = T,
           sense = :Min,
      @@ -436,11 +436,11 @@ 

      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Perfect. That's the same as we got before.

      Now let's look at x. This is a little more complicated, because we need to grab the outgoing value of the state variable in each stage:

      x_sim = [sim[:x].out for sim in simulations[1]]
       
       Plots.plot(x_sim; label = "Storage", xlabel = "Week")
      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Stochastic SDDP model

      Now we add some randomness to our model. In each stage, we assume that the inflow could be: 2 units lower, with 30% probability; the same as before, with 40% probability; or 5 units higher, with 30% probability.

      model = SDDP.LinearPolicyGraph(
           stages = T,
           sense = :Min,
      @@ -616,192 +616,190 @@ 

      n_replications = 100 simulations = SDDP.simulate(model, n_replications, [:x, :u, :r, :i]);

      100-element Vector{Vector{Dict{Symbol, Any}}}:
      - [Dict(:i => 1.0, :bellman_term => 227.00362981688795, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 2.0, :bellman_term => 162.52410114721442, :noise_term => 2, :node_index => 2, :stage_objective => 73.84, :objective_state => nothing, :u => 0.0, :r => 7.1, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 303.0)…), Dict(:i => 8.0, :bellman_term => 120.85206054480932, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.199999999999193, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(303.0, 303.8000000000008)…), Dict(:i => 7.0, :bellman_term => 84.27249963936174, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000068, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(303.8000000000008, 303.50000000000074)…), Dict(:i => 1.0, :bellman_term => 108.22852972327928, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(303.50000000000074, 297.10000000000076)…), Dict(:i => 2.0, :bellman_term => 115.16718427760816, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.600000000006162, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(297.10000000000076, 291.4999999999946)…), Dict(:i => 1.0, :bellman_term => 142.6056336688548, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000000637, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.4999999999946, 284.69999999999396)…), Dict(:i => 2.0, :bellman_term => 151.539091109561, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000364, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(284.69999999999396, 278.5999999999936)…), Dict(:i => 3.0, :bellman_term => 158.97919171404055, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(278.5999999999936, 273.2999999999936)…), Dict(:i => 7.0, :bellman_term => 118.12292276234166, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(273.2999999999936, 271.69999999999357)…)  …  Dict(:i => 2.0, :bellman_term => 188.11965627760122, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(30.899999999993568, 24.599999999993567)…), Dict(:i => 3.0, :bellman_term => 201.03181080430642, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(24.599999999993567, 19.49999999999357)…), Dict(:i => 0.0, :bellman_term => 248.48219977411, :noise_term => 0, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(19.49999999999357, 11.699999999993569)…), Dict(:i => 8.0, :bellman_term => 178.98850620010384, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(11.699999999993569, 12.09999999999357)…), Dict(:i => 7.0, :bellman_term => 113.09370420009216, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(12.09999999999357, 11.69999999999357)…), Dict(:i => 1.0, :bellman_term => 156.33465400010647, :noise_term => 1, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(11.69999999999357, 5.399999999993571)…), Dict(:i => 7.0, :bellman_term => 88.9862200000922, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(5.399999999993571, 5.1999999999935715)…), Dict(:i => 3.0, :bellman_term => 99.22120000010368, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(5.1999999999935715, 1.0999999999935728)…), Dict(:i => 1.0, :bellman_term => 60.52000000000001, :noise_term => 1, :node_index => 51, :stage_objective => 86.73000000011376, :objective_state => nothing, :u => 2.099999999993573, :r => 4.900000000006427, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(1.0999999999935728, 0.0)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 7.0, :bellman_term => 116.42764471359253, :noise_term => 7, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0999999999999135, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 307.56223338314476)…), Dict(:i => 8.0, :bellman_term => 76.07381623692208, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.199999999999989, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(307.56223338314476, 308.36223338314477)…), Dict(:i => 2.0, :bellman_term => 88.00902471844347, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999999386, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(308.36223338314477, 303.0622333831454)…), Dict(:i => 3.0, :bellman_term => 94.14152235664187, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(303.0622333831454, 298.6622333831454)…), Dict(:i => 0.0, :bellman_term => 119.53704086936159, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.599999999995589, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(298.6622333831454, 291.0622333831498)…), Dict(:i => 1.0, :bellman_term => 147.22382460477002, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8000000000014325, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.0622333831498, 284.2622333831484)…), Dict(:i => 7.0, :bellman_term => 107.0322551515037, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000307, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(284.2622333831484, 283.1622333831481)…), Dict(:i => 1.0, :bellman_term => 133.36098393047314, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(283.1622333831481, 275.86223338314807)…), Dict(:i => 0.0, :bellman_term => 161.89681504223518, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(275.86223338314807, 267.26223338314804)…)  …  Dict(:i => 0.0, :bellman_term => 44.16085795108853, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(44.46223338314806, 36.16223338314806)…), Dict(:i => 1.0, :bellman_term => 69.17079898249358, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(36.16223338314806, 29.06223338314806)…), Dict(:i => 7.0, :bellman_term => 30.375028678022375, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(29.06223338314806, 28.26223338314806)…), Dict(:i => 1.0, :bellman_term => 50.0967858497973, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(28.26223338314806, 21.662233383148063)…), Dict(:i => 7.0, :bellman_term => 16.345755234354385, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(21.662233383148063, 21.262233383148065)…), Dict(:i => 3.0, :bellman_term => 17.043120397016537, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(21.262233383148065, 16.962233383148064)…), Dict(:i => 0.0, :bellman_term => 32.78572760850619, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(16.962233383148064, 9.762233383148065)…), Dict(:i => 3.0, :bellman_term => 35.87296955387383, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(9.762233383148065, 5.662233383148065)…), Dict(:i => 1.0, :bellman_term => 60.52000000000001, :noise_term => 1, :node_index => 51, :stage_objective => 5.978469118279251, :objective_state => nothing, :u => 6.662233383148065, :r => 0.3377666168519351, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(5.662233383148065, 0.0)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 106.80000000000001, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 2.0, :bellman_term => 132.07724725626557, :noise_term => 2, :node_index => 2, :stage_objective => 34.99466634322792, :objective_state => nothing, :u => 3.7351282362280847, :r => 3.364871763771915, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 305.9271051469166)…), Dict(:i => 1.0, :bellman_term => 161.35646369814867, :noise_term => 1, :node_index => 3, :stage_objective => 0.1140545784889241, :objective_state => nothing, :u => 7.189240134104807, :r => 0.01075986589518152, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.9271051469166, 299.73786501281177)…), Dict(:i => 0.0, :bellman_term => 192.25084559533616, :noise_term => 0, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999999443, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(299.73786501281177, 292.4378650128123)…), Dict(:i => 1.0, :bellman_term => 224.2950214013199, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(292.4378650128123, 286.03786501281235)…), Dict(:i => 2.0, :bellman_term => 235.28740400238758, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.599999999999852, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(286.03786501281235, 280.4378650128125)…), Dict(:i => 8.0, :bellman_term => 187.04256946027408, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999992906, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(280.4378650128125, 280.6378650128196)…), Dict(:i => 2.0, :bellman_term => 196.77608766302637, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000534, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(280.6378650128196, 274.53786501281905)…), Dict(:i => 8.0, :bellman_term => 148.7226835655565, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(274.53786501281905, 274.23786501281904)…), Dict(:i => 2.0, :bellman_term => 158.03326175467737, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(274.23786501281904, 267.637865012819)…)  …  Dict(:i => 0.0, :bellman_term => 40.85677500665827, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(44.83786501281905, 36.537865012819054)…), Dict(:i => 3.0, :bellman_term => 46.6440031256293, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(36.537865012819054, 31.437865012819053)…), Dict(:i => 2.0, :bellman_term => 51.41274353312377, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(31.437865012819053, 25.637865012819052)…), Dict(:i => 1.0, :bellman_term => 78.45532698183376, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(25.637865012819052, 19.037865012819054)…), Dict(:i => 0.0, :bellman_term => 113.98442253812613, :noise_term => 0, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(19.037865012819054, 11.637865012819054)…), Dict(:i => 1.0, :bellman_term => 157.36355843702697, :noise_term => 1, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(11.637865012819054, 5.337865012819054)…), Dict(:i => 7.0, :bellman_term => 89.87772036907474, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(5.337865012819054, 5.137865012819055)…), Dict(:i => 1.0, :bellman_term => 116.96200000000002, :noise_term => 1, :node_index => 50, :stage_objective => 16.93357577438463, :objective_state => nothing, :u => 6.137865012819055, :r => 0.9621349871809448, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(5.137865012819055, 0.0)…), Dict(:i => 1.0, :bellman_term => 60.52000000000001, :noise_term => 1, :node_index => 51, :stage_objective => 106.19999999999999, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 71.2, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 1.0, :bellman_term => 227.00362981688795, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 7.0, :bellman_term => 132.07724725626605, :noise_term => 7, :node_index => 2, :stage_objective => 52.281893527931814, :objective_state => nothing, :u => 2.0728948530834788, :r => 5.02710514691652, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 305.9271051469165)…), Dict(:i => 3.0, :bellman_term => 141.4545253914589, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000000216, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.9271051469165, 301.7271051469163)…), Dict(:i => 7.0, :bellman_term => 101.9655427052985, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999999784, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(301.7271051469163, 301.4271051469165)…), Dict(:i => 1.0, :bellman_term => 127.96323718853137, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(301.4271051469165, 295.02710514691654)…), Dict(:i => 0.0, :bellman_term => 155.82347403671884, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.600000000001671, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(295.02710514691654, 287.42710514691487)…), Dict(:i => 1.0, :bellman_term => 187.1704280158133, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.7999999999923375, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(287.42710514691487, 280.62710514692253)…), Dict(:i => 7.0, :bellman_term => 141.4246413255578, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.09999999999917, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(280.62710514692253, 279.52710514692336)…), Dict(:i => 1.0, :bellman_term => 171.43747481944365, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(279.52710514692336, 272.22710514692335)…), Dict(:i => 0.0, :bellman_term => 205.17463317868032, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(272.22710514692335, 263.6271051469233)…)  …  Dict(:i => 2.0, :bellman_term => 6.627197536617359, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(50.827105146923344, 44.52710514692335)…), Dict(:i => 3.0, :bellman_term => 7.024764391756207, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(44.52710514692335, 39.427105146923346)…), Dict(:i => 2.0, :bellman_term => 6.568780552915769, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(39.427105146923346, 33.62710514692335)…), Dict(:i => 8.0, :bellman_term => 0.7097970555323627, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(33.62710514692335, 34.02710514692335)…), Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(34.02710514692335, 33.62710514692335)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(33.62710514692335, 34.32710514692335)…), Dict(:i => 0.0, :bellman_term => 0.0, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(34.32710514692335, 18.10000000000001)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.100000000000001, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(18.10000000000001, 11.999999999999986)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(11.999999999999986, 7.999999999999986)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(7.999999999999986, 0.0)…)]
      - [Dict(:i => 1.0, :bellman_term => 227.00362981688795, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 0.0, :bellman_term => 183.32749040914223, :noise_term => 0, :node_index => 2, :stage_objective => 73.83999999999905, :objective_state => nothing, :u => 0.0, :r => 7.099999999999909, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.0)…), Dict(:i => 3.0, :bellman_term => 161.35646369814867, :noise_term => 3, :node_index => 3, :stage_objective => 31.14136913580057, :objective_state => nothing, :u => 4.262134987188233, :r => 2.9378650128113746, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.0, 299.73786501281177)…), Dict(:i => 2.0, :bellman_term => 171.09290872752126, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000068, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(299.73786501281177, 294.4378650128117)…), Dict(:i => 1.0, :bellman_term => 202.22970679455875, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(294.4378650128117, 288.0378650128117)…), Dict(:i => 0.0, :bellman_term => 235.28740400238758, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.599999999999227, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(288.0378650128117, 280.4378650128125)…), Dict(:i => 3.0, :bellman_term => 238.6951536992001, :noise_term => 3, :node_index => 7, :stage_objective => 7.773144179519477, :objective_state => nothing, :u => 7.146794606756316, :r => 0.6532053932369308, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(280.4378650128125, 276.2910704060562)…), Dict(:i => 0.0, :bellman_term => 208.82638083212854, :noise_term => 0, :node_index => 8, :stage_objective => 65.33975450645048, :objective_state => nothing, :u => 2.7878248368743925, :r => 5.312175163126055, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(276.2910704060562, 273.5032455691818)…), Dict(:i => 3.0, :bellman_term => 219.324755802043, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(273.5032455691818, 268.2032455691818)…), Dict(:i => 0.0, :bellman_term => 255.8715246094166, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(268.2032455691818, 259.60324556918175)…)  …  Dict(:i => 2.0, :bellman_term => 225.07004362978313, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(28.591396448163408, 22.291396448163407)…), Dict(:i => 3.0, :bellman_term => 239.0735968890448, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(22.291396448163407, 17.19139644816341)…), Dict(:i => 2.0, :bellman_term => 253.72364713397587, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(17.19139644816341, 11.391396448163409)…), Dict(:i => 1.0, :bellman_term => 303.48743674475355, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(11.391396448163409, 4.791396448163409)…), Dict(:i => 2.0, :bellman_term => 308.44481200000234, :noise_term => 2, :node_index => 47, :stage_objective => 10.650562157140344, :objective_state => nothing, :u => 6.791396448163409, :r => 0.6086035518365911, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(4.791396448163409, 0.0)…), Dict(:i => 1.0, :bellman_term => 245.4294399999935, :noise_term => 1, :node_index => 48, :stage_objective => 113.75000000000651, :objective_state => nothing, :u => 0.7999999999996394, :r => 6.500000000000372, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.20000000000037232)…), Dict(:i => 2.0, :bellman_term => 173.67944000000003, :noise_term => 2, :node_index => 49, :stage_objective => 87.49999999999349, :objective_state => nothing, :u => 2.2000000000003723, :r => 4.999999999999628, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.20000000000037232, 0.0)…), Dict(:i => 1.0, :bellman_term => 116.962, :noise_term => 1, :node_index => 50, :stage_objective => 107.36, :objective_state => nothing, :u => 1.0, :r => 6.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 3.0, :bellman_term => 60.519999999999996, :noise_term => 3, :node_index => 51, :stage_objective => 70.8, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 71.2, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 3.0, :bellman_term => 206.2012573335387, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 0.0, :bellman_term => 162.52410114721442, :noise_term => 0, :node_index => 2, :stage_objective => 73.83999999999905, :objective_state => nothing, :u => 0.0, :r => 7.099999999999909, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 303.0)…), Dict(:i => 8.0, :bellman_term => 120.85206054481887, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000000102, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(303.0, 303.7999999999999)…), Dict(:i => 2.0, :bellman_term => 128.61705906616226, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2999999999995, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(303.7999999999999, 298.5000000000004)…), Dict(:i => 1.0, :bellman_term => 157.41356356082042, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(298.5000000000004, 292.1000000000004)…), Dict(:i => 2.0, :bellman_term => 165.69323816902715, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.600000000004172, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.1000000000004, 286.49999999999625)…), Dict(:i => 1.0, :bellman_term => 198.18713826594058, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999991769, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(286.49999999999625, 279.7000000000045)…), Dict(:i => 2.0, :bellman_term => 207.69947444738864, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000989, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(279.7000000000045, 273.6000000000035)…), Dict(:i => 3.0, :bellman_term => 218.17329747563917, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(273.6000000000035, 268.3000000000035)…), Dict(:i => 7.0, :bellman_term => 168.27959617613897, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(268.3000000000035, 266.70000000000346)…)  …  Dict(:i => 7.0, :bellman_term => 8.57419943651098, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(44.90000000000346, 43.60000000000346)…), Dict(:i => 8.0, :bellman_term => 1.494730729798448, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(43.60000000000346, 43.50000000000346)…), Dict(:i => 7.0, :bellman_term => 0.03931788599987862, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(43.50000000000346, 42.70000000000346)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(42.70000000000346, 38.99999999999999)…), Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999999999, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(38.99999999999999, 31.60000000000001)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999999016, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(31.60000000000001, 27.300000000000008)…), Dict(:i => 2.0, :bellman_term => 0.0, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.199999999999999, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(27.300000000000008, 22.10000000000001)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.100000000000001, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(22.10000000000001, 23.000000000000007)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(23.000000000000007, 6.0000000000000036)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(6.0000000000000036, 7.0000000000000036)…)]
      - [Dict(:i => 1.0, :bellman_term => 227.00362981688795, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 0.0, :bellman_term => 183.32749040914223, :noise_term => 0, :node_index => 2, :stage_objective => 73.83999999999905, :objective_state => nothing, :u => 0.0, :r => 7.099999999999909, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.0)…), Dict(:i => 1.0, :bellman_term => 161.35646369814867, :noise_term => 1, :node_index => 3, :stage_objective => 52.34136913580573, :objective_state => nothing, :u => 2.262134987188233, :r => 4.937865012811861, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.0, 299.73786501281177)…), Dict(:i => 2.0, :bellman_term => 171.09290872752126, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000068, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(299.73786501281177, 294.4378650128117)…), Dict(:i => 3.0, :bellman_term => 180.16439218779124, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(294.4378650128117, 290.0378650128117)…), Dict(:i => 2.0, :bellman_term => 189.35592297262974, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.599999999989279, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(290.0378650128117, 284.43786501282244)…), Dict(:i => 8.0, :bellman_term => 143.26112274346497, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.80000000000291, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(284.43786501282244, 284.63786501281953)…), Dict(:i => 2.0, :bellman_term => 152.21696590045576, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000001046, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(284.63786501281953, 278.5378650128185)…), Dict(:i => 3.0, :bellman_term => 159.69132250057828, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(278.5378650128185, 273.2378650128185)…), Dict(:i => 2.0, :bellman_term => 169.0025559849746, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(273.2378650128185, 266.63786501281845)…)  …  Dict(:i => 2.0, :bellman_term => 409.417455354939, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(17.83786501281847, 11.537865012818468)…), Dict(:i => 3.0, :bellman_term => 425.09857995534134, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(11.537865012818468, 6.4378650128184685)…), Dict(:i => 7.0, :bellman_term => 353.3649597799044, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(6.4378650128184685, 5.637865012818469)…), Dict(:i => 1.0, :bellman_term => 380.19481199434637, :noise_term => 1, :node_index => 46, :stage_objective => 23.837362281332695, :objective_state => nothing, :u => 6.237865012495274, :r => 1.3621349875047255, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(5.637865012818469, 0.4000000003231945)…), Dict(:i => 2.0, :bellman_term => 308.4448120000029, :noise_term => 2, :node_index => 47, :stage_objective => 87.49999999434411, :objective_state => nothing, :u => 2.4000000003231943, :r => 4.999999999676806, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.4000000003231945, 0.0)…), Dict(:i => 8.0, :bellman_term => 236.73068000000018, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000402, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.7000000000000002)…), Dict(:i => 2.0, :bellman_term => 173.67943999999986, :noise_term => 2, :node_index => 49, :stage_objective => 78.75, :objective_state => nothing, :u => 2.7, :r => 4.5, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.7000000000000002, 0.0)…), Dict(:i => 8.0, :bellman_term => 102.44680000000002, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.9000000000000004)…), Dict(:i => 3.0, :bellman_term => 60.52000000000001, :noise_term => 3, :node_index => 51, :stage_objective => 54.86999999999999, :objective_state => nothing, :u => 3.9000000000000004, :r => 3.0999999999999996, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.9000000000000004, 0.0)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 106.80000000000001, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 3.0, :bellman_term => 206.2012573335387, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 141.72071188528662, :noise_term => 2, :node_index => 2, :stage_objective => 73.83999999999905, :objective_state => nothing, :u => 0.0, :r => 7.099999999999909, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 305.0)…), Dict(:i => 8.0, :bellman_term => 100.97409633839652, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000000614, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.0, 305.7999999999994)…), Dict(:i => 7.0, :bellman_term => 67.20164528358282, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000296, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(305.7999999999994, 305.4999999999991)…), Dict(:i => 3.0, :bellman_term => 75.79089298684812, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(305.4999999999991, 301.0999999999991)…), Dict(:i => 2.0, :bellman_term => 80.98166051594171, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.59999999999053, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(301.0999999999991, 295.5000000000086)…), Dict(:i => 1.0, :bellman_term => 105.09379799851695, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8000000000049, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(295.5000000000086, 288.7000000000037)…), Dict(:i => 7.0, :bellman_term => 70.45035192635487, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000534, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(288.7000000000037, 287.60000000000315)…), Dict(:i => 1.0, :bellman_term => 92.32024460621233, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(287.60000000000315, 280.30000000000314)…), Dict(:i => 0.0, :bellman_term => 118.12292276225071, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(280.30000000000314, 271.7000000000031)…)  …  Dict(:i => 2.0, :bellman_term => 390.8019022486176, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(18.900000000003132, 12.600000000003131)…), Dict(:i => 3.0, :bellman_term => 406.4963598257397, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(12.600000000003131, 7.500000000003132)…), Dict(:i => 0.0, :bellman_term => 451.9448120000024, :noise_term => 0, :node_index => 45, :stage_objective => 5.279999999944879, :objective_state => nothing, :u => 7.500000000003132, :r => 0.2999999999968681, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(7.500000000003132, 0.0)…), Dict(:i => 8.0, :bellman_term => 380.1948119944564, :noise_term => 8, :node_index => 46, :stage_objective => 5.546019800819097e-9, :objective_state => nothing, :u => 7.599999999683084, :r => 3.1691541718966265e-10, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(0.0, 0.4000000003169158)…), Dict(:i => 2.0, :bellman_term => 308.4448120000029, :noise_term => 2, :node_index => 47, :stage_objective => 87.49999999445399, :objective_state => nothing, :u => 2.4000000003169157, :r => 4.999999999683085, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.4000000003169158, 0.0)…), Dict(:i => 3.0, :bellman_term => 245.42943999999414, :noise_term => 3, :node_index => 48, :stage_objective => 78.75000000000598, :objective_state => nothing, :u => 2.7999999999996397, :r => 4.500000000000342, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.2000000000003425)…), Dict(:i => 2.0, :bellman_term => 173.67943999999986, :noise_term => 2, :node_index => 49, :stage_objective => 87.499999999994, :objective_state => nothing, :u => 2.200000000000343, :r => 4.999999999999657, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.2000000000003425, 0.0)…), Dict(:i => 3.0, :bellman_term => 116.96200000000002, :noise_term => 3, :node_index => 50, :stage_objective => 72.16, :objective_state => nothing, :u => 3.0, :r => 4.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 1.0, :bellman_term => 60.51999999999998, :noise_term => 1, :node_index => 51, :stage_objective => 106.19999999999999, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 106.80000000000001, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 2.0, :bellman_term => 132.07724725626557, :noise_term => 2, :node_index => 2, :stage_objective => 34.99466634322792, :objective_state => nothing, :u => 3.7351282362280847, :r => 3.364871763771915, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 305.9271051469166)…), Dict(:i => 3.0, :bellman_term => 141.45452539145845, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000000273, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.9271051469166, 301.7271051469163)…), Dict(:i => 0.0, :bellman_term => 171.20673700918678, :noise_term => 0, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000466, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(301.7271051469163, 294.42710514691584)…), Dict(:i => 8.0, :bellman_term => 127.96323718853773, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(294.42710514691584, 295.02710514691586)…), Dict(:i => 7.0, :bellman_term => 89.62973187431771, :noise_term => 7, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.59999999999161, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(295.02710514691586, 294.42710514692425)…), Dict(:i => 1.0, :bellman_term => 114.18748857498258, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000003649, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(294.42710514692425, 287.6271051469206)…), Dict(:i => 2.0, :bellman_term => 121.90317865453471, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000534, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(287.6271051469206, 281.52710514692006)…), Dict(:i => 1.0, :bellman_term => 148.8330347632982, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(281.52710514692006, 274.22710514692005)…), Dict(:i => 2.0, :bellman_term => 158.14393218811256, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(274.22710514692005, 267.62710514692003)…)  …  Dict(:i => 0.0, :bellman_term => 116.5279045958286, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(37.82710514692006, 29.52710514692006)…), Dict(:i => 3.0, :bellman_term => 126.1024770533939, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(29.52710514692006, 24.427105146920063)…), Dict(:i => 2.0, :bellman_term => 136.88368566293474, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000000001, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(24.427105146920063, 18.627105146920062)…), Dict(:i => 3.0, :bellman_term => 148.1512174275854, :noise_term => 3, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(18.627105146920062, 14.027105146920062)…), Dict(:i => 0.0, :bellman_term => 193.80078841097827, :noise_term => 0, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(14.027105146920062, 6.627105146920062)…), Dict(:i => 8.0, :bellman_term => 124.8801072285065, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(6.627105146920062, 7.327105146920062)…), Dict(:i => 2.0, :bellman_term => 137.19908802523318, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(7.327105146920062, 2.1271051469200613)…), Dict(:i => 1.0, :bellman_term => 116.96200000000002, :noise_term => 1, :node_index => 50, :stage_objective => 69.92294941420693, :objective_state => nothing, :u => 3.1271051469200613, :r => 3.9728948530799384, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(2.1271051469200613, 0.0)…), Dict(:i => 1.0, :bellman_term => 60.51999999999998, :noise_term => 1, :node_index => 51, :stage_objective => 106.19999999999999, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 71.2, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 1.0, :bellman_term => 227.00362981688795, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 0.0, :bellman_term => 183.32749040914223, :noise_term => 0, :node_index => 2, :stage_objective => 73.83999999999905, :objective_state => nothing, :u => 0.0, :r => 7.099999999999909, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.0)…), Dict(:i => 3.0, :bellman_term => 161.35646369814867, :noise_term => 3, :node_index => 3, :stage_objective => 31.141369135796356, :objective_state => nothing, :u => 4.262134987188233, :r => 2.937865012810977, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.0, 299.73786501281177)…), Dict(:i => 2.0, :bellman_term => 171.09290872752126, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000068, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(299.73786501281177, 294.4378650128117)…), Dict(:i => 8.0, :bellman_term => 127.86079938012472, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(294.4378650128117, 295.0378650128117)…), Dict(:i => 2.0, :bellman_term => 135.75174650801137, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.599999999994964, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(295.0378650128117, 289.43786501281676)…), Dict(:i => 1.0, :bellman_term => 164.3599948740257, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8000000000025125, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(289.43786501281676, 282.63786501281425)…), Dict(:i => 7.0, :bellman_term => 121.80532141064032, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000364, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(282.63786501281425, 281.5378650128139)…), Dict(:i => 8.0, :bellman_term => 84.20857084032286, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(281.5378650128139, 281.23786501281387)…), Dict(:i => 7.0, :bellman_term => 53.57002836461061, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(281.23786501281387, 279.63786501281385)…)  …  Dict(:i => 0.0, :bellman_term => 23.559114642764314, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(47.83786501281389, 39.537865012813896)…), Dict(:i => 3.0, :bellman_term => 25.293830052521685, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(39.537865012813896, 34.437865012813894)…), Dict(:i => 7.0, :bellman_term => 6.551705241628724, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(34.437865012813894, 33.6378650128139)…), Dict(:i => 1.0, :bellman_term => 14.211517923669163, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(33.6378650128139, 27.037865012813896)…), Dict(:i => 2.0, :bellman_term => 14.66068835969682, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(27.037865012813896, 21.637865012813897)…), Dict(:i => 3.0, :bellman_term => 15.18786660788544, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(21.637865012813897, 17.337865012813896)…), Dict(:i => 7.0, :bellman_term => 0.4624020748416397, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.199999999999999, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(17.337865012813896, 17.137865012813897)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(17.137865012813897, 13.037865012813898)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(13.037865012813898, 9.037865012813896)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(9.037865012813896, 10.037865012813896)…)]
      + [Dict(:i => 3.0, :bellman_term => 206.46856062955294, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 7.0, :bellman_term => 163.98989397035484, :noise_term => 7, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.100000000001046, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 302.89999999999895)…), Dict(:i => 3.0, :bellman_term => 172.64831994319593, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000000102, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(302.89999999999895, 298.69999999999885)…), Dict(:i => 7.0, :bellman_term => 130.4847776053757, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000003308, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(298.69999999999885, 298.39999999999554)…), Dict(:i => 3.0, :bellman_term => 137.75015123106914, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000000489, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(298.39999999999554, 293.99999999999505)…), Dict(:i => 0.0, :bellman_term => 166.65544755364454, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(293.99999999999505, 286.39999999999503)…), Dict(:i => 3.0, :bellman_term => 175.6774675239385, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(286.39999999999503, 281.599999999995)…), Dict(:i => 0.0, :bellman_term => 208.37072794574624, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(281.599999999995, 273.499999999995)…), Dict(:i => 8.0, :bellman_term => 160.37983926042898, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(273.499999999995, 273.199999999995)…), Dict(:i => 0.0, :bellman_term => 191.64170776556148, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(273.199999999995, 264.59999999999496)…)  …  Dict(:i => 0.0, :bellman_term => 288.77913358608714, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(26.79999999999498, 18.49999999999498)…), Dict(:i => 3.0, :bellman_term => 303.8021302253028, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(18.49999999999498, 13.39999999999498)…), Dict(:i => 2.0, :bellman_term => 319.1531303920869, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(13.39999999999498, 7.5999999999949805)…), Dict(:i => 8.0, :bellman_term => 247.82863016008588, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(7.5999999999949805, 7.999999999994982)…), Dict(:i => 0.0, :bellman_term => 297.9632584000883, :noise_term => 0, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(7.999999999994982, 0.5999999999949814)…), Dict(:i => 1.0, :bellman_term => 245.4294399999983, :noise_term => 1, :node_index => 48, :stage_objective => 103.25000000008963, :objective_state => nothing, :u => 1.3999999999948969, :r => 5.900000000005122, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.5999999999949814, 0.2000000000001032)…), Dict(:i => 0.0, :bellman_term => 173.67944000000006, :noise_term => 0, :node_index => 49, :stage_objective => 122.4999999999982, :objective_state => nothing, :u => 0.2000000000001032, :r => 6.999999999999897, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.2000000000001032, 0.0)…), Dict(:i => 3.0, :bellman_term => 116.96200000000002, :noise_term => 3, :node_index => 50, :stage_objective => 72.16, :objective_state => nothing, :u => 3.0, :r => 4.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 3.0, :bellman_term => 60.519999999999996, :noise_term => 3, :node_index => 51, :stage_objective => 70.8, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 106.80000000000001, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      + [Dict(:i => 3.0, :bellman_term => 206.46856062955294, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 179.36444292143096, :noise_term => 2, :node_index => 2, :stage_objective => 36.39497484777858, :objective_state => nothing, :u => 3.600483187713621, :r => 3.4995168122864015, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 301.3995168122864)…), Dict(:i => 1.0, :bellman_term => 190.8347832533277, :noise_term => 1, :node_index => 3, :stage_objective => 18.833572492301062, :objective_state => nothing, :u => 5.423247878084851, :r => 1.7767521219151945, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122864, 296.9762689342015)…), Dict(:i => 2.0, :bellman_term => 200.1377558625859, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999999898, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.9762689342015, 291.67626893420163)…), Dict(:i => 3.0, :bellman_term => 209.5679311672925, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000000432, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(291.67626893420163, 287.2762689342012)…), Dict(:i => 0.0, :bellman_term => 235.28844479107056, :noise_term => 0, :node_index => 6, :stage_objective => 7.707058932886767, :objective_state => nothing, :u => 6.929820962357672, :r => 0.6701790376423276, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(287.2762689342012, 280.34644797184353)…), Dict(:i => 8.0, :bellman_term => 186.7346180444806, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(280.34644797184353, 280.5464479718435)…), Dict(:i => 2.0, :bellman_term => 197.160440426494, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(280.5464479718435, 274.4464479718435)…), Dict(:i => 3.0, :bellman_term => 207.46337121565693, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(274.4464479718435, 269.1464479718435)…), Dict(:i => 7.0, :bellman_term => 159.09460766177313, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(269.1464479718435, 267.54644797184346)…)  …  Dict(:i => 7.0, :bellman_term => 104.94450346677365, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(31.74644797184348, 30.44644797184348)…), Dict(:i => 1.0, :bellman_term => 141.54133037164803, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(30.44644797184348, 23.34644797184348)…), Dict(:i => 0.0, :bellman_term => 184.4998180765866, :noise_term => 0, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(23.34644797184348, 15.546447971843481)…), Dict(:i => 8.0, :bellman_term => 119.58930318366669, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(15.546447971843481, 15.946447971843481)…), Dict(:i => 2.0, :bellman_term => 130.4613033173631, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(15.946447971843481, 10.546447971843483)…), Dict(:i => 3.0, :bellman_term => 142.45207313266528, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.29999999999957, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(10.546447971843483, 6.246447971843483)…), Dict(:i => 7.0, :bellman_term => 76.84155378958408, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(6.246447971843483, 6.046447971843484)…), Dict(:i => 8.0, :bellman_term => 24.141080789581142, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(6.046447971843484, 6.946447971843485)…), Dict(:i => 1.0, :bellman_term => 48.72725827083019, :noise_term => 1, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(6.946447971843485, 0.9464479718434848)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 54.353226101185975, :objective_state => nothing, :u => 3.946447971843485, :r => 3.053552028156515, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.9464479718434848, 0.0)…)]
      + [Dict(:i => 1.0, :bellman_term => 227.17639983640765, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 2.0, :bellman_term => 179.36444292143096, :noise_term => 2, :node_index => 2, :stage_objective => 57.194974847778575, :objective_state => nothing, :u => 1.6004831877136212, :r => 5.4995168122864015, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.3995168122864)…), Dict(:i => 3.0, :bellman_term => 188.47937560049604, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000002888, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122864, 297.1995168122835)…), Dict(:i => 2.0, :bellman_term => 197.71117185922685, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.30000000000291, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(297.1995168122835, 291.8995168122806)…), Dict(:i => 8.0, :bellman_term => 153.42860909207047, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999998954, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(291.8995168122806, 292.4995168122816)…), Dict(:i => 7.0, :bellman_term => 113.4564969391131, :noise_term => 7, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.4995168122816, 291.8995168122816)…), Dict(:i => 1.0, :bellman_term => 140.22515102972875, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.8995168122816, 285.0995168122816)…), Dict(:i => 7.0, :bellman_term => 101.85666655562727, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(285.0995168122816, 283.99951681228157)…), Dict(:i => 1.0, :bellman_term => 126.15285530112214, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(283.99951681228157, 276.69951681228156)…), Dict(:i => 7.0, :bellman_term => 90.59157642941, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(276.69951681228156, 275.09951681228154)…)  …  Dict(:i => 0.0, :bellman_term => 1.609441838918201, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(56.299516812281546, 47.99951681228154)…), Dict(:i => 1.0, :bellman_term => 3.3208712465709596, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(47.99951681228154, 40.89951681228154)…), Dict(:i => 2.0, :bellman_term => 3.600831810909426, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(40.89951681228154, 35.09951681228154)…), Dict(:i => 8.0, :bellman_term => 0.20118752946416407, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(35.09951681228154, 35.49951681228154)…), Dict(:i => 2.0, :bellman_term => 0.06490189980161176, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(35.49951681228154, 30.099516812281543)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000125, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(30.099516812281543, 30.799516812281542)…), Dict(:i => 2.0, :bellman_term => 0.0, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(30.799516812281542, 18.1)…), Dict(:i => 1.0, :bellman_term => 2.842170943040401e-14, :noise_term => 1, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(18.1, 12.000000000000002)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(12.000000000000002, 6.000000000000002)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => -0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(6.000000000000002, 0.0)…)]
      + [Dict(:i => 8.0, :bellman_term => 169.54614924602208, :noise_term => 8, :node_index => 1, :stage_objective => 56.7735291113664, :objective_state => nothing, :u => 1.433967734179764, :r => 5.566032265820236, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 306.56603226582024)…), Dict(:i => 0.0, :bellman_term => 179.36444292143096, :noise_term => 0, :node_index => 2, :stage_objective => 20.10823928324808, :objective_state => nothing, :u => 5.1665154535338615, :r => 1.9334845464661612, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(306.56603226582024, 301.3995168122864)…), Dict(:i => 8.0, :bellman_term => 136.93379961364963, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000001694, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122864, 302.1995168122847)…), Dict(:i => 2.0, :bellman_term => 145.1995965663168, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999997624, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(302.1995168122847, 296.89951681228706)…), Dict(:i => 8.0, :bellman_term => 105.10068859052762, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000000091, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.89951681228706, 297.49951681228697)…), Dict(:i => 2.0, :bellman_term => 113.45649693906262, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(297.49951681228697, 291.89951681228695)…), Dict(:i => 8.0, :bellman_term => 78.03458976656066, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.89951681228695, 292.09951681228694)…), Dict(:i => 7.0, :bellman_term => 46.44462110094719, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(292.09951681228694, 290.9995168122869)…), Dict(:i => 3.0, :bellman_term => 54.127877365953736, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(290.9995168122869, 285.6995168122869)…), Dict(:i => 7.0, :bellman_term => 25.30321908236101, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(285.6995168122869, 284.0995168122869)…)  …  Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(66.2995168122869, 58.65384615384616)…), Dict(:i => 1.0, :bellman_term => 0.0021560455384615346, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(58.65384615384616, 51.55384615384616)…), Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(51.55384615384616, 45.6)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(45.6, 46.0)…), Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(46.0, 31.599999999998936)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.29999999999957, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(31.599999999998936, 25.299999999998935)…), Dict(:i => 0.0, :bellman_term => 0.0, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(25.299999999998935, 18.099999999999998)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(18.099999999999998, 11.999999999999993)…), Dict(:i => 1.0, :bellman_term => 2.842170943040401e-14, :noise_term => 1, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(11.999999999999993, 5.999999999999993)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => -0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(5.999999999999993, 0.0)…)]
      + [Dict(:i => 3.0, :bellman_term => 206.46856062955294, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 179.36444292143096, :noise_term => 2, :node_index => 2, :stage_objective => 36.39497484777858, :objective_state => nothing, :u => 3.600483187713621, :r => 3.4995168122864015, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 301.3995168122864)…), Dict(:i => 3.0, :bellman_term => 188.47937560049604, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000002888, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122864, 297.1995168122835)…), Dict(:i => 7.0, :bellman_term => 145.19959656634182, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2999999999988745, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(297.1995168122835, 296.8995168122846)…), Dict(:i => 1.0, :bellman_term => 174.81772466777284, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.39999999999992, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.8995168122846, 290.4995168122847)…), Dict(:i => 0.0, :bellman_term => 205.95628472382987, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(290.4995168122847, 282.8995168122847)…), Dict(:i => 1.0, :bellman_term => 210.88449940143528, :noise_term => 1, :node_index => 7, :stage_objective => 28.431209079776384, :objective_state => nothing, :u => 5.410822766405346, :r => 2.389177233594654, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(282.8995168122847, 278.4886940458793)…), Dict(:i => 7.0, :bellman_term => 164.7478110252332, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(278.4886940458793, 277.3886940458793)…), Dict(:i => 8.0, :bellman_term => 122.59771632659431, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(277.3886940458793, 277.08869404587927)…), Dict(:i => 2.0, :bellman_term => 129.9608391067868, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(277.08869404587927, 270.48869404587924)…)  …  Dict(:i => 2.0, :bellman_term => 0.05635080172894458, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(60.688694045879224, 54.38869404587922)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(54.38869404587922, 54.28869404587922)…), Dict(:i => 2.0, :bellman_term => 0.0, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(54.28869404587922, 45.599999999999994)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(45.599999999999994, 40.99999999999999)…), Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(40.99999999999999, 31.600000000000527)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000679, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(31.600000000000527, 27.300000000000527)…), Dict(:i => 0.0, :bellman_term => 0.0, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(27.300000000000527, 18.099999999999998)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(18.099999999999998, 12.00000000000002)…), Dict(:i => 1.0, :bellman_term => -1.1368683772161603e-13, :noise_term => 1, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(12.00000000000002, 6.0000000000000195)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => -3.4781066915456906e-13, :objective_state => nothing, :u => 7.0000000000000195, :r => -1.9539925233402755e-14, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(6.0000000000000195, 0.0)…)]
      + [Dict(:i => 3.0, :bellman_term => 206.46856062955294, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 179.36444292143096, :noise_term => 2, :node_index => 2, :stage_objective => 36.39497484777858, :objective_state => nothing, :u => 3.600483187713621, :r => 3.4995168122864015, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 301.3995168122864)…), Dict(:i => 1.0, :bellman_term => 190.8347832533277, :noise_term => 1, :node_index => 3, :stage_objective => 18.833572492301062, :objective_state => nothing, :u => 5.423247878084851, :r => 1.7767521219151945, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122864, 296.9762689342015)…), Dict(:i => 7.0, :bellman_term => 147.38892606786067, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999998306, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.9762689342015, 296.6762689342032)…), Dict(:i => 1.0, :bellman_term => 177.21023314195781, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000000432, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.6762689342032, 290.2762689342028)…), Dict(:i => 0.0, :bellman_term => 208.5211754117904, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(290.2762689342028, 282.67626893420277)…), Dict(:i => 1.0, :bellman_term => 210.88449940146484, :noise_term => 1, :node_index => 7, :stage_objective => 31.08785882892303, :objective_state => nothing, :u => 5.187574888325796, :r => 2.612425111674204, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(282.67626893420277, 278.48869404587697)…), Dict(:i => 2.0, :bellman_term => 221.5912223595742, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(278.48869404587697, 272.38869404587695)…), Dict(:i => 8.0, :bellman_term => 172.67852433216558, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(272.38869404587695, 272.08869404587693)…), Dict(:i => 0.0, :bellman_term => 205.67089127182453, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(272.08869404587693, 263.4886940458769)…)  …  Dict(:i => 7.0, :bellman_term => 464.6163804541162, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(9.688694045876888, 8.388694045876885)…), Dict(:i => 1.0, :bellman_term => 515.5139067678912, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(8.388694045876885, 1.2886940458768859)…), Dict(:i => 2.0, :bellman_term => 451.94481200000064, :noise_term => 2, :node_index => 45, :stage_objective => 79.39898479256681, :objective_state => nothing, :u => 3.288694045876886, :r => 4.511305954123114, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(1.2886940458768859, 0.0)…), Dict(:i => 8.0, :bellman_term => 380.19481200000064, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(0.0, 0.40000000000000036)…), Dict(:i => 0.0, :bellman_term => 308.44481200000075, :noise_term => 0, :node_index => 47, :stage_objective => 122.5, :objective_state => nothing, :u => 0.40000000000000036, :r => 7.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.40000000000000036, 0.0)…), Dict(:i => 8.0, :bellman_term => 236.73067999999992, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000125, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.7000000000000002)…), Dict(:i => 2.0, :bellman_term => 173.67944000000006, :noise_term => 2, :node_index => 49, :stage_objective => 78.75, :objective_state => nothing, :u => 2.7, :r => 4.5, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.7000000000000002, 0.0)…), Dict(:i => 1.0, :bellman_term => 116.962, :noise_term => 1, :node_index => 50, :stage_objective => 107.36, :objective_state => nothing, :u => 1.0, :r => 6.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 8.0, :bellman_term => 48.06000000000001, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.0, 2.0)…)]
      + [Dict(:i => 1.0, :bellman_term => 227.17639983640765, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 7.0, :bellman_term => 179.36444292143096, :noise_term => 7, :node_index => 2, :stage_objective => 5.194974847778576, :objective_state => nothing, :u => 6.600483187713621, :r => 0.49951681228640155, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.3995168122864)…), Dict(:i => 1.0, :bellman_term => 190.8347832533277, :noise_term => 1, :node_index => 3, :stage_objective => 18.833572492301062, :objective_state => nothing, :u => 5.423247878084851, :r => 1.7767521219151945, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122864, 296.9762689342015)…), Dict(:i => 0.0, :bellman_term => 200.50899488738787, :noise_term => 0, :node_index => 4, :stage_objective => 21.427717921032134, :objective_state => nothing, :u => 5.334154319171375, :r => 1.965845680828636, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.9762689342015, 291.64211461503015)…), Dict(:i => 8.0, :bellman_term => 156.14260871153783, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000000659, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(291.64211461503015, 292.2421146150295)…), Dict(:i => 2.0, :bellman_term => 163.9792497012495, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.2421146150295, 286.64211461502947)…), Dict(:i => 3.0, :bellman_term => 173.19151059154865, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(286.64211461502947, 281.84211461502946)…), Dict(:i => 0.0, :bellman_term => 205.49044560431867, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(281.84211461502946, 273.74211461502944)…), Dict(:i => 3.0, :bellman_term => 215.79038648060578, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(273.74211461502944, 268.4421146150294)…), Dict(:i => 2.0, :bellman_term => 226.70157501742779, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(268.4421146150294, 261.8421146150294)…)  …  Dict(:i => 7.0, :bellman_term => 101.09570983678734, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(32.04211461502938, 30.742114615029376)…), Dict(:i => 1.0, :bellman_term => 137.4398073740834, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(30.742114615029376, 23.642114615029378)…), Dict(:i => 2.0, :bellman_term => 148.50984837181247, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000000001, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(23.642114615029378, 17.842114615029377)…), Dict(:i => 8.0, :bellman_term => 88.16635122395226, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(17.842114615029377, 18.24211461502938)…), Dict(:i => 0.0, :bellman_term => 125.90626537777426, :noise_term => 0, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(18.24211461502938, 10.842114615029379)…), Dict(:i => 8.0, :bellman_term => 65.67628232722134, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000679, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(10.842114615029379, 11.54211461502938)…), Dict(:i => 2.0, :bellman_term => 72.70630574047841, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(11.54211461502938, 6.34211461502938)…), Dict(:i => 3.0, :bellman_term => 80.80117548880617, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(6.34211461502938, 2.242114615029381)…), Dict(:i => 8.0, :bellman_term => 20.12325189673392, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(2.242114615029381, 3.242114615029381)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 13.490359852477017, :objective_state => nothing, :u => 6.242114615029381, :r => 0.7578853849706189, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(3.242114615029381, 0.0)…)]
      + [Dict(:i => 8.0, :bellman_term => 169.54614924602208, :noise_term => 8, :node_index => 1, :stage_objective => 56.7735291113664, :objective_state => nothing, :u => 1.433967734179764, :r => 5.566032265820236, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 306.56603226582024)…), Dict(:i => 0.0, :bellman_term => 179.36444292143096, :noise_term => 0, :node_index => 2, :stage_objective => 20.10823928324808, :objective_state => nothing, :u => 5.1665154535338615, :r => 1.9334845464661612, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(306.56603226582024, 301.3995168122864)…), Dict(:i => 1.0, :bellman_term => 190.8347832533277, :noise_term => 1, :node_index => 3, :stage_objective => 18.833572492301062, :objective_state => nothing, :u => 5.423247878084851, :r => 1.7767521219151945, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122864, 296.9762689342015)…), Dict(:i => 7.0, :bellman_term => 147.38892606786067, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999998306, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.9762689342015, 296.6762689342032)…), Dict(:i => 8.0, :bellman_term => 106.95547866059133, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3999999999995225, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.6762689342032, 297.2762689342037)…), Dict(:i => 2.0, :bellman_term => 115.57642699735106, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(297.2762689342037, 291.6762689342037)…), Dict(:i => 3.0, :bellman_term => 122.69477569021865, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.6762689342037, 286.87626893420367)…), Dict(:i => 2.0, :bellman_term => 129.500931208554, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(286.87626893420367, 280.77626893420364)…), Dict(:i => 3.0, :bellman_term => 137.32724021223703, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(280.77626893420364, 275.47626893420363)…), Dict(:i => 0.0, :bellman_term => 165.73064029864327, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(275.47626893420363, 266.8762689342036)…)  …  Dict(:i => 2.0, :bellman_term => 457.82222642548925, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(15.076268934203627, 8.776268934203625)…), Dict(:i => 3.0, :bellman_term => 473.57356385139434, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(8.776268934203625, 3.676268934203625)…), Dict(:i => 0.0, :bellman_term => 451.94481200000075, :noise_term => 0, :node_index => 45, :stage_objective => 72.5776667580162, :objective_state => nothing, :u => 3.676268934203625, :r => 4.123731065796375, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(3.676268934203625, 0.0)…), Dict(:i => 8.0, :bellman_term => 380.19481200000064, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(0.0, 0.40000000000000036)…), Dict(:i => 2.0, :bellman_term => 308.4448120000006, :noise_term => 2, :node_index => 47, :stage_objective => 87.5, :objective_state => nothing, :u => 2.4000000000000004, :r => 5.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.40000000000000036, 0.0)…), Dict(:i => 3.0, :bellman_term => 245.4294400002313, :noise_term => 3, :node_index => 48, :stage_objective => 78.74999999976866, :objective_state => nothing, :u => 2.8000000000130902, :r => 4.49999999998678, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.19999999998678053)…), Dict(:i => 7.0, :bellman_term => 173.679440000227, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.19999999998678053, -1.3219647598816664e-11)…), Dict(:i => 8.0, :bellman_term => 102.44680000000001, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.9000000000000004)…), Dict(:i => 3.0, :bellman_term => 60.52000000000001, :noise_term => 3, :node_index => 51, :stage_objective => 54.86999999999999, :objective_state => nothing, :u => 3.9000000000000004, :r => 3.0999999999999996, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.9000000000000004, 0.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 71.2, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      + [Dict(:i => 1.0, :bellman_term => 227.17639983640765, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 0.0, :bellman_term => 183.55081136963372, :noise_term => 0, :node_index => 2, :stage_objective => 73.84000000000378, :objective_state => nothing, :u => 0.0, :r => 7.100000000000364, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 300.99999999999864)…), Dict(:i => 1.0, :bellman_term => 190.8347832533277, :noise_term => 1, :node_index => 3, :stage_objective => 23.068450702551136, :objective_state => nothing, :u => 5.023731065797108, :r => 2.1762689342029375, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(300.99999999999864, 296.9762689342015)…), Dict(:i => 2.0, :bellman_term => 200.1377558625859, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.299999999999898, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.9762689342015, 291.67626893420163)…), Dict(:i => 3.0, :bellman_term => 209.5679311672925, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000000432, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(291.67626893420163, 287.2762689342012)…), Dict(:i => 2.0, :bellman_term => 220.01015667334514, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(287.2762689342012, 281.6762689342012)…), Dict(:i => 3.0, :bellman_term => 210.8844994013525, :noise_term => 3, :node_index => 7, :stage_objective => 19.18785882905121, :objective_state => nothing, :u => 6.187574888315025, :r => 1.6124251116849755, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(281.6762689342012, 278.4886940458861)…), Dict(:i => 2.0, :bellman_term => 221.59122235946597, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(278.4886940458861, 272.3886940458861)…), Dict(:i => 8.0, :bellman_term => 172.6785243320578, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(272.3886940458861, 272.0886940458861)…), Dict(:i => 2.0, :bellman_term => 181.51855331364914, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(272.0886940458861, 265.48869404588606)…)  …  Dict(:i => 2.0, :bellman_term => 464.6163804539545, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(14.688694045886082, 8.38869404588608)…), Dict(:i => 3.0, :bellman_term => 480.3817371997298, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(8.38869404588608, 3.2886940458860803)…), Dict(:i => 7.0, :bellman_term => 408.4035981501528, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(3.2886940458860803, 2.4886940458860805)…), Dict(:i => 3.0, :bellman_term => 380.1948119999279, :noise_term => 3, :node_index => 46, :stage_objective => 43.94785419706639, :objective_state => nothing, :u => 5.088694045881921, :r => 2.5113059541180793, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(2.4886940458860805, 0.4000000000041601)…), Dict(:i => 0.0, :bellman_term => 308.44481200000064, :noise_term => 0, :node_index => 47, :stage_objective => 122.49999999992721, :objective_state => nothing, :u => 0.4000000000041597, :r => 6.999999999995841, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.4000000000041601, 0.0)…), Dict(:i => 3.0, :bellman_term => 245.4294400002313, :noise_term => 3, :node_index => 48, :stage_objective => 78.74999999976866, :objective_state => nothing, :u => 2.8000000000130902, :r => 4.49999999998678, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.19999999998678053)…), Dict(:i => 7.0, :bellman_term => 173.679440000227, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.19999999998678053, -1.3219647598816664e-11)…), Dict(:i => 1.0, :bellman_term => 116.962, :noise_term => 1, :node_index => 50, :stage_objective => 107.36, :objective_state => nothing, :u => 1.0, :r => 6.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 8.0, :bellman_term => 48.06000000000001, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 53.400000000000006, :objective_state => nothing, :u => 4.0, :r => 3.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.0, 0.0)…)]
      + [Dict(:i => 8.0, :bellman_term => 169.54614924602208, :noise_term => 8, :node_index => 1, :stage_objective => 56.7735291113664, :objective_state => nothing, :u => 1.433967734179764, :r => 5.566032265820236, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 306.56603226582024)…), Dict(:i => 7.0, :bellman_term => 127.73781496054971, :noise_term => 7, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.100000000000657, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(306.56603226582024, 306.4660322658202)…), Dict(:i => 3.0, :bellman_term => 136.27624482014016, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2000000000032855, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(306.4660322658202, 302.26603226581693)…), Dict(:i => 2.0, :bellman_term => 144.54729811685047, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000068, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(302.26603226581693, 296.96603226581686)…), Dict(:i => 3.0, :bellman_term => 152.73359315061134, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4000000000002615, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.96603226581686, 292.5660322658166)…), Dict(:i => 2.0, :bellman_term => 160.39884741423384, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.5660322658166, 286.9660322658166)…), Dict(:i => 1.0, :bellman_term => 191.11205724889487, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(286.9660322658166, 280.16603226581657)…), Dict(:i => 0.0, :bellman_term => 225.42972267988443, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(280.16603226581657, 272.06603226581655)…), Dict(:i => 8.0, :bellman_term => 176.493208873208, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(272.06603226581655, 271.76603226581653)…), Dict(:i => 2.0, :bellman_term => 185.19400838221281, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(271.76603226581653, 265.1660322658165)…)  …  Dict(:i => 2.0, :bellman_term => 505.4206691736645, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(12.36603226581653, 6.0660322658165295)…), Dict(:i => 3.0, :bellman_term => 521.1826441216299, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(6.0660322658165295, 0.9660322658165299)…), Dict(:i => 7.0, :bellman_term => 449.0392473482114, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(0.9660322658165299, 0.16603226581653008)…), Dict(:i => 3.0, :bellman_term => 380.19481199999524, :noise_term => 3, :node_index => 46, :stage_objective => 84.59443534821612, :objective_state => nothing, :u => 2.7660322658162215, :r => 4.833967734183778, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(0.16603226581653008, 0.40000000000030816)…), Dict(:i => 0.0, :bellman_term => 308.44481200000064, :noise_term => 0, :node_index => 47, :stage_objective => 122.4999999999946, :objective_state => nothing, :u => 0.40000000000030855, :r => 6.999999999999692, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.40000000000030816, 0.0)…), Dict(:i => 3.0, :bellman_term => 245.4294400002313, :noise_term => 3, :node_index => 48, :stage_objective => 78.74999999976866, :objective_state => nothing, :u => 2.8000000000130902, :r => 4.49999999998678, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.19999999998678053)…), Dict(:i => 0.0, :bellman_term => 173.67944000000017, :noise_term => 0, :node_index => 49, :stage_objective => 122.50000000023134, :objective_state => nothing, :u => 0.19999999998678053, :r => 7.00000000001322, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.19999999998678053, 0.0)…), Dict(:i => 8.0, :bellman_term => 102.44680000000001, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.9000000000000004)…), Dict(:i => 3.0, :bellman_term => 60.52000000000001, :noise_term => 3, :node_index => 51, :stage_objective => 54.86999999999999, :objective_state => nothing, :u => 3.9000000000000004, :r => 3.0999999999999996, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.9000000000000004, 0.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 71.2, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
        ⋮
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 0.0, :bellman_term => 132.07724725626247, :noise_term => 0, :node_index => 2, :stage_objective => 55.79466634323103, :objective_state => nothing, :u => 1.7351282362277851, :r => 5.364871763772214, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 305.92710514691686)…), Dict(:i => 1.0, :bellman_term => 161.3564636981473, :noise_term => 1, :node_index => 3, :stage_objective => 0.1140545784872408, :objective_state => nothing, :u => 7.189240134104978, :r => 0.010759865895022718, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.92710514691686, 299.7378650128119)…), Dict(:i => 7.0, :bellman_term => 119.86532525256371, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(299.7378650128119, 299.43786501281187)…), Dict(:i => 8.0, :bellman_term => 83.78309967960467, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(299.43786501281187, 300.0378650128119)…), Dict(:i => 0.0, :bellman_term => 106.4797064417885, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(300.0378650128119, 292.43786501281187)…), Dict(:i => 8.0, :bellman_term => 71.71706415035351, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999998363, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(292.43786501281187, 292.6378650128135)…), Dict(:i => 0.0, :bellman_term => 94.52136317382201, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000193, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(292.6378650128135, 284.5378650128133)…), Dict(:i => 1.0, :bellman_term => 120.4413690641627, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(284.5378650128133, 277.2378650128133)…), Dict(:i => 0.0, :bellman_term => 148.03454333013133, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(277.2378650128133, 268.6378650128133)…)  …  Dict(:i => 7.0, :bellman_term => 339.7612312829289, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(16.83786501281331, 15.53786501281331)…), Dict(:i => 1.0, :bellman_term => 390.0786068161812, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(15.53786501281331, 8.43786501281331)…), Dict(:i => 0.0, :bellman_term => 440.7839392227174, :noise_term => 0, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(8.43786501281331, 0.6378650128133101)…), Dict(:i => 8.0, :bellman_term => 369.0380574323556, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(0.6378650128133101, 1.0378650128133113)…), Dict(:i => 7.0, :bellman_term => 297.30178479772104, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(1.0378650128133113, 0.637865012813311)…), Dict(:i => 1.0, :bellman_term => 245.42944000000114, :noise_term => 1, :node_index => 48, :stage_objective => 102.5873622757676, :objective_state => nothing, :u => 1.4378650128132797, :r => 5.86213498718672, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.637865012813311, 0.20000000000003032)…), Dict(:i => 7.0, :bellman_term => 173.67944000000003, :noise_term => 7, :node_index => 49, :stage_objective => -5.284661597215715e-13, :objective_state => nothing, :u => 7.20000000000003, :r => -3.019806626980409e-14, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.20000000000003032, 0.0)…), Dict(:i => 1.0, :bellman_term => 116.962, :noise_term => 1, :node_index => 50, :stage_objective => 107.36, :objective_state => nothing, :u => 1.0, :r => 6.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 8.0, :bellman_term => 48.06, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 89.0, :objective_state => nothing, :u => 2.0, :r => 5.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.0, 0.0)…)]
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 7.0, :bellman_term => 116.42764471359253, :noise_term => 7, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0999999999999135, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 307.56223338314476)…), Dict(:i => 8.0, :bellman_term => 76.07381623692208, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(307.56223338314476, 308.36223338314477)…), Dict(:i => 7.0, :bellman_term => 45.331888828966385, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(308.36223338314477, 308.06223338314476)…), Dict(:i => 1.0, :bellman_term => 71.56027685813251, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(308.06223338314476, 301.6622333831448)…), Dict(:i => 0.0, :bellman_term => 92.6556623155534, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(301.6622333831448, 294.06223338314476)…), Dict(:i => 3.0, :bellman_term => 100.32839493655865, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000001603, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(294.06223338314476, 289.26223338314315)…), Dict(:i => 2.0, :bellman_term => 107.032255151551, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.10000000000042, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(289.26223338314315, 283.16223338314273)…), Dict(:i => 3.0, :bellman_term => 114.70748054419437, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(283.16223338314273, 277.8622333831427)…), Dict(:i => 2.0, :bellman_term => 122.2985212328258, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(277.8622333831427, 271.2622333831427)…)  …  Dict(:i => 7.0, :bellman_term => 503.70633604516837, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(7.462233383142733, 6.162233383142732)…), Dict(:i => 3.0, :bellman_term => 519.4975891349607, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(6.162233383142732, 1.0622333831427326)…), Dict(:i => 7.0, :bellman_term => 447.35645338425843, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(1.0622333831427326, 0.2622333831427328)…), Dict(:i => 3.0, :bellman_term => 380.19481199997017, :noise_term => 3, :node_index => 46, :stage_objective => 82.91091579503453, :objective_state => nothing, :u => 2.862233383140884, :r => 4.7377666168591155, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(0.2622333831427328, 0.40000000000184893)…), Dict(:i => 2.0, :bellman_term => 308.4448120000001, :noise_term => 2, :node_index => 47, :stage_objective => 87.49999999996764, :objective_state => nothing, :u => 2.4000000000018495, :r => 4.999999999998151, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.40000000000184893, 0.0)…), Dict(:i => 8.0, :bellman_term => 236.7306800000041, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.6999999999993632)…), Dict(:i => 7.0, :bellman_term => 165.10024000001096, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.6999999999993632, 0.4999999999993632)…), Dict(:i => 1.0, :bellman_term => 116.96200000000003, :noise_term => 1, :node_index => 50, :stage_objective => 98.56000000001121, :objective_state => nothing, :u => 1.4999999999993632, :r => 5.6000000000006365, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.4999999999993632, 0.0)…), Dict(:i => 1.0, :bellman_term => 60.52000000000001, :noise_term => 1, :node_index => 51, :stage_objective => 106.19999999999999, :objective_state => nothing, :u => 1.0, :r => 6.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 71.2, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      - [Dict(:i => 3.0, :bellman_term => 206.2012573335387, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 141.72071188528662, :noise_term => 2, :node_index => 2, :stage_objective => 73.83999999999905, :objective_state => nothing, :u => 0.0, :r => 7.099999999999909, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 305.0)…), Dict(:i => 1.0, :bellman_term => 161.35646369815868, :noise_term => 1, :node_index => 3, :stage_objective => 9.941369135794806, :objective_state => nothing, :u => 6.262134987189169, :r => 0.9378650128108308, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.0, 299.73786501281086)…), Dict(:i => 7.0, :bellman_term => 119.86532525257326, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(299.73786501281086, 299.43786501281085)…), Dict(:i => 8.0, :bellman_term => 83.7830996796124, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(299.43786501281085, 300.03786501281087)…), Dict(:i => 2.0, :bellman_term => 89.54300195134465, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(300.03786501281087, 294.43786501281085)…), Dict(:i => 3.0, :bellman_term => 97.14459943124666, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000001603, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(294.43786501281085, 289.63786501280924)…), Dict(:i => 0.0, :bellman_term => 121.80532141068261, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000023, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(289.63786501280924, 281.5378650128092)…), Dict(:i => 3.0, :bellman_term => 129.814862722199, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(281.5378650128092, 276.2378650128092)…), Dict(:i => 0.0, :bellman_term => 158.03326175477832, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(276.2378650128092, 267.6378650128092)…)  …  Dict(:i => 0.0, :bellman_term => 189.08975078546558, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(32.837865012809175, 24.537865012809174)…), Dict(:i => 1.0, :bellman_term => 234.9532859433008, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(24.537865012809174, 17.437865012809176)…), Dict(:i => 2.0, :bellman_term => 249.53536182079102, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(17.437865012809176, 11.637865012809176)…), Dict(:i => 1.0, :bellman_term => 299.192075321581, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(11.637865012809176, 5.037865012813519)…), Dict(:i => 0.0, :bellman_term => 308.44481200000007, :noise_term => 0, :node_index => 47, :stage_objective => 41.33736227576342, :objective_state => nothing, :u => 5.037865012813519, :r => 2.362134987186481, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(5.037865012813519, 0.0)…), Dict(:i => 1.0, :bellman_term => 245.42943999999622, :noise_term => 1, :node_index => 48, :stage_objective => 113.75000000000537, :objective_state => nothing, :u => 0.7999999999996925, :r => 6.500000000000307, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.20000000000030746)…), Dict(:i => 2.0, :bellman_term => 173.67944000000003, :noise_term => 2, :node_index => 49, :stage_objective => 87.49999999999463, :objective_state => nothing, :u => 2.2000000000003075, :r => 4.999999999999693, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.20000000000030746, 0.0)…), Dict(:i => 8.0, :bellman_term => 102.4468, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.9000000000000004)…), Dict(:i => 8.0, :bellman_term => 36.846, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.9000000000000004, 1.9000000000000004)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 72.97999999999999, :objective_state => nothing, :u => 2.9000000000000004, :r => 4.1, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.9000000000000004, 0.0)…)]
      - [Dict(:i => 3.0, :bellman_term => 206.2012573335387, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 7.0, :bellman_term => 132.0772472562634, :noise_term => 7, :node_index => 2, :stage_objective => 31.4818935279344, :objective_state => nothing, :u => 4.0728948530832305, :r => 3.027105146916769, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 305.92710514691674)…), Dict(:i => 8.0, :bellman_term => 91.75961487538962, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.92710514691674, 306.72710514691676)…), Dict(:i => 2.0, :bellman_term => 101.96554270529668, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(306.72710514691676, 301.42710514691674)…), Dict(:i => 3.0, :bellman_term => 108.9225150449156, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(301.42710514691674, 297.02710514691677)…), Dict(:i => 2.0, :bellman_term => 115.89483239413266, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(297.02710514691677, 291.42710514691674)…), Dict(:i => 1.0, :bellman_term => 143.374633260883, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000000409, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.42710514691674, 284.62710514691634)…), Dict(:i => 0.0, :bellman_term => 174.15377615191483, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.099999999999682, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(284.62710514691634, 276.52710514691665)…), Dict(:i => 3.0, :bellman_term => 183.33830785243845, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(276.52710514691665, 271.22710514691664)…), Dict(:i => 7.0, :bellman_term => 138.2743112863468, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(271.22710514691664, 269.6271051469166)…)  …  Dict(:i => 7.0, :bellman_term => 221.2501277930652, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(23.827105146916637, 22.527105146916636)…), Dict(:i => 8.0, :bellman_term => 155.2257192741593, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(22.527105146916636, 22.427105146916638)…), Dict(:i => 7.0, :bellman_term => 95.2973182870118, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(22.427105146916638, 21.627105146916637)…), Dict(:i => 3.0, :bellman_term => 103.71533695457161, :noise_term => 3, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(21.627105146916637, 17.027105146916185)…), Dict(:i => 7.0, :bellman_term => 51.46582330465339, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999999999, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(17.027105146916185, 16.627105146916186)…), Dict(:i => 3.0, :bellman_term => 56.41368243014006, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(16.627105146916186, 12.327105146916185)…), Dict(:i => 7.0, :bellman_term => 15.564990419384854, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(12.327105146916185, 12.127105146916186)…), Dict(:i => 1.0, :bellman_term => 32.15958890859703, :noise_term => 1, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(12.127105146916186, 6.0271051469161865)…), Dict(:i => 3.0, :bellman_term => 35.262269869424316, :noise_term => 3, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(6.0271051469161865, 2.0271051469161865)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 35.117528384891884, :objective_state => nothing, :u => 5.0271051469161865, :r => 1.9728948530838135, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(2.0271051469161865, 0.0)…)]
      - [Dict(:i => 3.0, :bellman_term => 206.2012573335387, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 141.72071188528662, :noise_term => 2, :node_index => 2, :stage_objective => 73.83999999999905, :objective_state => nothing, :u => 0.0, :r => 7.099999999999909, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 305.0)…), Dict(:i => 3.0, :bellman_term => 150.66900685445626, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.0, 300.8)…), Dict(:i => 2.0, :bellman_term => 159.85661622550424, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(300.8, 295.5)…), Dict(:i => 1.0, :bellman_term => 190.51153547097647, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(295.5, 289.1)…), Dict(:i => 0.0, :bellman_term => 223.0910457486716, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(289.1, 281.5)…), Dict(:i => 3.0, :bellman_term => 233.83587871385862, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999998249, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(281.5, 276.70000000000175)…), Dict(:i => 0.0, :bellman_term => 208.8263808321285, :noise_term => 0, :node_index => 8, :stage_objective => 60.309920500918835, :objective_state => nothing, :u => 3.196754430819965, :r => 4.903245569180393, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(276.70000000000175, 273.5032455691818)…), Dict(:i => 1.0, :bellman_term => 225.33166301838537, :noise_term => 1, :node_index => 9, :stage_objective => 18.989715826064774, :objective_state => nothing, :u => 6.804746785349231, :r => 1.4952532146507698, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(273.5032455691818, 267.6984987838325)…), Dict(:i => 2.0, :bellman_term => 236.29371607381108, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(267.6984987838325, 261.0984987838325)…)  …  Dict(:i => 7.0, :bellman_term => 97.95385141376576, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(32.298498783832514, 30.998498783832513)…), Dict(:i => 8.0, :bellman_term => 51.04850246434015, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(30.998498783832513, 30.898498783832515)…), Dict(:i => 2.0, :bellman_term => 56.55326507353334, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(30.898498783832515, 25.098498783832515)…), Dict(:i => 8.0, :bellman_term => 21.567750894909576, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(25.098498783832515, 25.49849878383221)…), Dict(:i => 0.0, :bellman_term => 37.591579394930164, :noise_term => 0, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999999999, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(25.49849878383221, 18.098498783832213)…), Dict(:i => 8.0, :bellman_term => 8.831101129876174, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(18.098498783832213, 18.798498783832212)…), Dict(:i => 0.0, :bellman_term => 18.761465525190832, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(18.798498783832212, 11.598498783832213)…), Dict(:i => 3.0, :bellman_term => 19.32609360741546, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(11.598498783832213, 7.498498783832213)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(7.498498783832213, 8.498498783832213)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(8.498498783832213, 0.0)…)]
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 0.0, :bellman_term => 132.07724725626247, :noise_term => 0, :node_index => 2, :stage_objective => 55.79466634323103, :objective_state => nothing, :u => 1.7351282362277851, :r => 5.364871763772214, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 305.92710514691686)…), Dict(:i => 1.0, :bellman_term => 161.3564636981523, :noise_term => 1, :node_index => 3, :stage_objective => 0.11405457848262505, :objective_state => nothing, :u => 7.189240134105413, :r => 0.01075986589458727, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.92710514691686, 299.7378650128115)…), Dict(:i => 2.0, :bellman_term => 171.092908727524, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(299.7378650128115, 294.43786501281147)…), Dict(:i => 1.0, :bellman_term => 202.22970679456103, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(294.43786501281147, 288.0378650128115)…), Dict(:i => 0.0, :bellman_term => 235.2874040024194, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(288.0378650128115, 280.43786501281147)…), Dict(:i => 3.0, :bellman_term => 238.695153699186, :noise_term => 3, :node_index => 7, :stage_objective => 7.773144179626354, :objective_state => nothing, :u => 7.146794606754099, :r => 0.6532053932459121, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(280.43786501281147, 276.2910704060574)…), Dict(:i => 7.0, :bellman_term => 189.16815361871613, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.100000000000591, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(276.2910704060574, 275.1910704060568)…), Dict(:i => 8.0, :bellman_term => 142.52917254350587, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(275.1910704060568, 274.89107040605677)…), Dict(:i => 0.0, :bellman_term => 173.03761857962854, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(274.89107040605677, 266.29107040605675)…)  …  Dict(:i => 7.0, :bellman_term => 16.10212624592633, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(42.49107040605675, 41.19107040605675)…), Dict(:i => 8.0, :bellman_term => 3.806782527829693, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(41.19107040605675, 41.09107040605675)…), Dict(:i => 0.0, :bellman_term => 7.3547075466143355, :noise_term => 0, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(41.09107040605675, 33.291070406056754)…), Dict(:i => 1.0, :bellman_term => 15.647827351965576, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(33.291070406056754, 26.691070406056706)…), Dict(:i => 2.0, :bellman_term => 16.216174284019036, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999999999, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(26.691070406056706, 21.291070406056704)…), Dict(:i => 8.0, :bellman_term => 1.4835959694133862, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(21.291070406056704, 21.991070406056703)…), Dict(:i => 0.0, :bellman_term => 4.106557814245746, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(21.991070406056703, 14.791070406056704)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(14.791070406056704, 15.691070406056705)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(15.691070406056705, 16.691070406056703)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(16.691070406056703, 0.0)…)]
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 7.0, :bellman_term => 116.42764471359253, :noise_term => 7, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0999999999999135, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 307.56223338314476)…), Dict(:i => 1.0, :bellman_term => 145.0809793215517, :noise_term => 1, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(307.56223338314476, 301.36223338314477)…), Dict(:i => 7.0, :bellman_term => 105.07987907424376, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(301.36223338314477, 301.06223338314476)…), Dict(:i => 1.0, :bellman_term => 131.43694812454532, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(301.06223338314476, 294.6622333831448)…), Dict(:i => 0.0, :bellman_term => 159.46568251199096, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(294.6622333831448, 287.06223338314476)…), Dict(:i => 3.0, :bellman_term => 168.37771122605682, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999999102, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(287.06223338314476, 282.26223338314566)…), Dict(:i => 0.0, :bellman_term => 201.15109848365864, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.099999999999795, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(282.26223338314566, 274.16223338314586)…), Dict(:i => 1.0, :bellman_term => 225.33166301837537, :noise_term => 1, :node_index => 9, :stage_objective => 10.620570588731692, :objective_state => nothing, :u => 7.463734599312466, :r => 0.8362654006875349, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(274.16223338314586, 267.6984987838334)…), Dict(:i => 0.0, :bellman_term => 262.480328941479, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(267.6984987838334, 259.09849878383335)…)  …  Dict(:i => 2.0, :bellman_term => 471.46261672038986, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(14.298498783833368, 7.998498783833366)…), Dict(:i => 3.0, :bellman_term => 487.2367065411063, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(7.998498783833366, 2.8984987838333662)…), Dict(:i => 0.0, :bellman_term => 451.9448120000023, :noise_term => 0, :node_index => 45, :stage_objective => 86.26642140453276, :objective_state => nothing, :u => 2.8984987838333662, :r => 4.901501216166634, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(2.8984987838333662, 0.0)…), Dict(:i => 1.0, :bellman_term => 380.19481203568444, :noise_term => 1, :node_index => 46, :stage_objective => 122.49999996429908, :objective_state => nothing, :u => 0.6000000020401025, :r => 6.999999997959947, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(0.0, 0.399999997961027)…), Dict(:i => 7.0, :bellman_term => 308.44481203561935, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(0.399999997961027, -2.0389734345371835e-9)…), Dict(:i => 3.0, :bellman_term => 245.4294399999986, :noise_term => 3, :node_index => 48, :stage_objective => 78.75000000000145, :objective_state => nothing, :u => 2.799999999999917, :r => 4.500000000000083, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.2000000000000832)…), Dict(:i => 2.0, :bellman_term => 173.67944000000006, :noise_term => 2, :node_index => 49, :stage_objective => 87.49999999999854, :objective_state => nothing, :u => 2.2000000000000837, :r => 4.9999999999999165, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.2000000000000832, 0.0)…), Dict(:i => 1.0, :bellman_term => 116.962, :noise_term => 1, :node_index => 50, :stage_objective => 107.36, :objective_state => nothing, :u => 1.0, :r => 6.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 8.0, :bellman_term => 48.06, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 53.400000000000006, :objective_state => nothing, :u => 4.0, :r => 3.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.0, 0.0)…)]
      - [Dict(:i => 3.0, :bellman_term => 206.2012573335387, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 7.0, :bellman_term => 132.0772472562634, :noise_term => 7, :node_index => 2, :stage_objective => 31.4818935279344, :objective_state => nothing, :u => 4.0728948530832305, :r => 3.027105146916769, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 305.92710514691674)…), Dict(:i => 3.0, :bellman_term => 141.45452539145435, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.92710514691674, 301.72710514691676)…), Dict(:i => 7.0, :bellman_term => 101.96554270529668, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(301.72710514691676, 301.42710514691674)…), Dict(:i => 1.0, :bellman_term => 127.96323718852909, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(301.42710514691674, 295.02710514691677)…), Dict(:i => 2.0, :bellman_term => 135.8591532154187, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(295.02710514691677, 289.42710514691674)…), Dict(:i => 3.0, :bellman_term => 143.374633260883, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000000409, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(289.42710514691674, 284.62710514691634)…), Dict(:i => 2.0, :bellman_term => 152.33435293439152, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.099999999999682, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(284.62710514691634, 278.52710514691665)…), Dict(:i => 8.0, :bellman_term => 111.35667977179128, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(278.52710514691665, 278.22710514691664)…), Dict(:i => 0.0, :bellman_term => 138.2743112863468, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(278.22710514691664, 269.6271051469166)…)  …  Dict(:i => 0.0, :bellman_term => 0.7053743478671777, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(58.82710514691663, 50.527105146916625)…), Dict(:i => 3.0, :bellman_term => 0.6173164630248027, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(50.527105146916625, 45.427105146916624)…), Dict(:i => 2.0, :bellman_term => 0.45432785815387433, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999999997, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(45.427105146916624, 39.62710514691663)…), Dict(:i => 3.0, :bellman_term => 0.25635850373313224, :noise_term => 3, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.599999999994067, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(39.62710514691663, 35.02710514692305)…), Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(35.02710514692305, 31.600000000000005)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(31.600000000000005, 25.299999999999848)…), Dict(:i => 0.0, :bellman_term => 7.171223614932385e-14, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(25.299999999999848, 18.09999999999985)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(18.09999999999985, 13.999999999999849)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(13.999999999999849, 7.999999999999849)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(7.999999999999849, 0.0)…)]
      - [Dict(:i => 8.0, :bellman_term => 157.70849961329745, :noise_term => 8, :node_index => 1, :stage_objective => 67.95478050807554, :objective_state => nothing, :u => 0.3377666168553374, :r => 6.662233383144662, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 307.66223338314467)…), Dict(:i => 0.0, :bellman_term => 132.07724725626247, :noise_term => 0, :node_index => 2, :stage_objective => 55.79466634323103, :objective_state => nothing, :u => 1.7351282362277851, :r => 5.364871763772214, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(307.66223338314467, 305.92710514691686)…), Dict(:i => 3.0, :bellman_term => 141.454525391453, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(305.92710514691686, 301.72710514691687)…), Dict(:i => 2.0, :bellman_term => 150.0488001413546, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(301.72710514691687, 296.42710514691686)…), Dict(:i => 3.0, :bellman_term => 158.21778749407167, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.42710514691686, 292.0271051469169)…), Dict(:i => 0.0, :bellman_term => 189.47947711681172, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.0271051469169, 284.42710514691686)…), Dict(:i => 8.0, :bellman_term => 143.37463326088346, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000000523, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(284.42710514691686, 284.62710514691634)…), Dict(:i => 2.0, :bellman_term => 152.33435293439152, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.099999999999682, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(284.62710514691634, 278.52710514691665)…), Dict(:i => 3.0, :bellman_term => 159.8146416178115, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(278.52710514691665, 273.22710514691664)…), Dict(:i => 2.0, :bellman_term => 169.1277503552942, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(273.22710514691664, 266.6271051469166)…)  …  Dict(:i => 2.0, :bellman_term => 23.607648355196886, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(45.82710514691661, 39.52710514691661)…), Dict(:i => 8.0, :bellman_term => 7.02476439176913, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(39.52710514691661, 39.42710514691661)…), Dict(:i => 7.0, :bellman_term => 0.9488357658774476, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999999997, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(39.42710514691661, 38.62710514691661)…), Dict(:i => 1.0, :bellman_term => 2.2163898122110934, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.599999999994067, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(38.62710514691661, 32.02710514692295)…), Dict(:i => 2.0, :bellman_term => 1.8204787800304147, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(32.02710514692295, 26.62710514692295)…), Dict(:i => 3.0, :bellman_term => 1.1767492061388247, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(26.62710514692295, 22.32710514692249)…), Dict(:i => 2.0, :bellman_term => 0.4675732663890491, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.200000000000004, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(22.32710514692249, 17.127105146922492)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(17.127105146922492, 18.027105146922494)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(18.027105146922494, 19.027105146922494)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(19.027105146922494, 0.0)…)]

      And let's plot the use of thermal generation in each replication:

      plot = Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
      + [Dict(:i => 1.0, :bellman_term => 227.17639983640765, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 2.0, :bellman_term => 179.3644429214168, :noise_term => 2, :node_index => 2, :stage_objective => 57.19497484807526, :objective_state => nothing, :u => 1.6004831876855408, :r => 5.499516812314929, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.3995168122878)…), Dict(:i => 3.0, :bellman_term => 188.47937560045057, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122878, 297.1995168122878)…), Dict(:i => 7.0, :bellman_term => 145.19959656631136, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(297.1995168122878, 296.8995168122878)…), Dict(:i => 3.0, :bellman_term => 153.42860909209867, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000008447, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.8995168122878, 292.49951681227935)…), Dict(:i => 0.0, :bellman_term => 183.24093903233188, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.49951681227935, 284.89951681227933)…), Dict(:i => 8.0, :bellman_term => 140.22515102975103, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(284.89951681227933, 285.0995168122793)…), Dict(:i => 2.0, :bellman_term => 147.39405309358108, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(285.0995168122793, 278.9995168122793)…), Dict(:i => 8.0, :bellman_term => 108.30164938344342, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(278.9995168122793, 278.6995168122793)…), Dict(:i => 2.0, :bellman_term => 115.0506415523596, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(278.6995168122793, 272.09951681227926)…)  …  Dict(:i => 2.0, :bellman_term => 26.592238984095275, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(45.29951681227928, 38.99951681227928)…), Dict(:i => 8.0, :bellman_term => 7.884838950576182, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(38.99951681227928, 38.89951681227928)…), Dict(:i => 7.0, :bellman_term => 1.1221492316754875, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.799999999999997, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(38.89951681227928, 38.09951681227928)…), Dict(:i => 1.0, :bellman_term => 2.6139183823481855, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(38.09951681227928, 31.499516812279253)…), Dict(:i => 2.0, :bellman_term => 2.054985318233605, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(31.499516812279253, 26.099516812279255)…), Dict(:i => 1.0, :bellman_term => 5.792414322094811, :noise_term => 1, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000679, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(26.099516812279255, 19.799516812278853)…), Dict(:i => 7.0, :bellman_term => 0.0, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(19.799516812278853, 18.09999999999999)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.100000000000001, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(18.09999999999999, 12.000000000000004)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(12.000000000000004, 6.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(6.0, 2.0)…)]
      + [Dict(:i => 8.0, :bellman_term => 169.54614924602208, :noise_term => 8, :node_index => 1, :stage_objective => 56.7735291113664, :objective_state => nothing, :u => 1.433967734179764, :r => 5.566032265820236, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 306.56603226582024)…), Dict(:i => 2.0, :bellman_term => 178.68289906655264, :noise_term => 2, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.100000000002243, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(306.56603226582024, 301.46603226579253)…), Dict(:i => 3.0, :bellman_term => 187.77759509726002, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.46603226579253, 297.26603226579255)…), Dict(:i => 7.0, :bellman_term => 144.5472981170883, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(297.26603226579255, 296.96603226579253)…), Dict(:i => 8.0, :bellman_term => 104.54806428920347, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.40000000000839, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.96603226579253, 297.56603226578414)…), Dict(:i => 2.0, :bellman_term => 112.82487566489817, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(297.56603226578414, 291.9660322657841)…), Dict(:i => 3.0, :bellman_term => 119.83581753868066, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.9660322657841, 287.1660322657841)…), Dict(:i => 0.0, :bellman_term => 146.72419650406982, :noise_term => 0, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(287.1660322657841, 279.0660322657841)…), Dict(:i => 1.0, :bellman_term => 176.4932088735909, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(279.0660322657841, 271.7660322657841)…), Dict(:i => 2.0, :bellman_term => 185.19400838258252, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(271.7660322657841, 265.16603226578405)…)  …  Dict(:i => 7.0, :bellman_term => 165.5581982445113, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(27.366032265784042, 26.06603226578404)…), Dict(:i => 3.0, :bellman_term => 177.63176084967492, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(26.06603226578404, 20.966032265784044)…), Dict(:i => 0.0, :bellman_term => 223.61911045000213, :noise_term => 0, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.800000000000001, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(20.966032265784044, 13.166032265784043)…), Dict(:i => 1.0, :bellman_term => 272.6417566463875, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(13.166032265784043, 6.566032265781217)…), Dict(:i => 0.0, :bellman_term => 308.4448120000007, :noise_term => 0, :node_index => 47, :stage_objective => 14.594435348828704, :objective_state => nothing, :u => 6.566032265781217, :r => 0.833967734218783, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(6.566032265781217, 0.0)…), Dict(:i => 8.0, :bellman_term => 236.7306800000001, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.300000000000125, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(0.0, 0.6999999999998181)…), Dict(:i => 2.0, :bellman_term => 173.6794400000001, :noise_term => 2, :node_index => 49, :stage_objective => 78.75000000000318, :objective_state => nothing, :u => 2.699999999999818, :r => 4.500000000000182, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(0.6999999999998181, 0.0)…), Dict(:i => 1.0, :bellman_term => 116.962, :noise_term => 1, :node_index => 50, :stage_objective => 107.36, :objective_state => nothing, :u => 1.0, :r => 6.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 8.0, :bellman_term => 48.06000000000001, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 53.400000000000006, :objective_state => nothing, :u => 4.0, :r => 3.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.0, 0.0)…)]
      + [Dict(:i => 8.0, :bellman_term => 169.54614924602208, :noise_term => 8, :node_index => 1, :stage_objective => 56.7735291113664, :objective_state => nothing, :u => 1.433967734179764, :r => 5.566032265820236, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 306.56603226582024)…), Dict(:i => 0.0, :bellman_term => 179.364442921398, :noise_term => 0, :node_index => 2, :stage_objective => 20.10823928354444, :objective_state => nothing, :u => 5.16651545350489, :r => 1.9334845464946575, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(306.56603226582024, 301.3995168122896)…), Dict(:i => 1.0, :bellman_term => 190.8347832533371, :noise_term => 1, :node_index => 3, :stage_objective => 18.833572492256692, :objective_state => nothing, :u => 5.423247878088992, :r => 1.7767521219110087, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122896, 296.9762689342006)…), Dict(:i => 2.0, :bellman_term => 200.13775586260044, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.9762689342006, 291.6762689342006)…), Dict(:i => 8.0, :bellman_term => 155.77658284489053, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999997817, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(291.6762689342006, 292.2762689342028)…), Dict(:i => 0.0, :bellman_term => 185.70859465805734, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.2762689342028, 284.67626893420277)…), Dict(:i => 1.0, :bellman_term => 210.88449940121518, :noise_term => 1, :node_index => 7, :stage_objective => 7.287858829166876, :objective_state => nothing, :u => 7.1875748883053046, :r => 0.6124251116946955, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(284.67626893420277, 278.48869404589743)…), Dict(:i => 0.0, :bellman_term => 231.7881013670535, :noise_term => 0, :node_index => 8, :stage_objective => 14.057142857149174, :objective_state => nothing, :u => 6.957142857142343, :r => 1.1428571428576564, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(278.48869404589743, 271.5315511887551)…), Dict(:i => 1.0, :bellman_term => 240.38849915744365, :noise_term => 1, :node_index => 9, :stage_objective => 27.76713106166587, :objective_state => nothing, :u => 6.113611727427885, :r => 2.186388272572116, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(271.5315511887551, 266.4179394613272)…), Dict(:i => 7.0, :bellman_term => 189.15914941104893, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(266.4179394613272, 264.81793946132717)…)  …  Dict(:i => 7.0, :bellman_term => 58.66533335820549, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(36.017939461327174, 34.71793946132718)…), Dict(:i => 1.0, :bellman_term => 85.31101622263878, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(34.71793946132718, 27.617939461327175)…), Dict(:i => 0.0, :bellman_term => 119.61343570202689, :noise_term => 0, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(27.617939461327175, 19.817939461327175)…), Dict(:i => 3.0, :bellman_term => 130.00725812815878, :noise_term => 3, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(19.817939461327175, 15.217939461327136)…), Dict(:i => 7.0, :bellman_term => 71.04877748714983, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(15.217939461327136, 14.817939461327134)…), Dict(:i => 8.0, :bellman_term => 25.918096687150893, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(14.817939461327134, 15.517939461327135)…), Dict(:i => 2.0, :bellman_term => 28.17192875262381, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(15.517939461327135, 10.317939461327136)…), Dict(:i => 1.0, :bellman_term => 51.99339216805239, :noise_term => 1, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(10.317939461327136, 4.217939461327136)…), Dict(:i => 1.0, :bellman_term => 60.52000000000002, :noise_term => 1, :node_index => 51, :stage_objective => 31.542471534509694, :objective_state => nothing, :u => 5.217939461327136, :r => 1.782060538672864, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(4.217939461327136, 0.0)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…)]
      + [Dict(:i => 3.0, :bellman_term => 206.46856062955294, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 179.364442921398, :noise_term => 2, :node_index => 2, :stage_objective => 36.39497484807772, :objective_state => nothing, :u => 3.600483187685235, :r => 3.4995168123151656, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 301.3995168122896)…), Dict(:i => 8.0, :bellman_term => 136.9337996135987, :noise_term => 8, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122896, 302.19951681228963)…), Dict(:i => 2.0, :bellman_term => 145.19959656629408, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(302.19951681228963, 296.8995168122896)…), Dict(:i => 8.0, :bellman_term => 105.10068859059311, :noise_term => 8, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.400000000008163, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.8995168122896, 297.49951681228146)…), Dict(:i => 2.0, :bellman_term => 113.45649693911446, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(297.49951681228146, 291.89951681228143)…), Dict(:i => 1.0, :bellman_term => 140.22515102973284, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(291.89951681228143, 285.0995168122814)…), Dict(:i => 7.0, :bellman_term => 101.85666655562818, :noise_term => 7, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(285.0995168122814, 283.9995168122814)…), Dict(:i => 8.0, :bellman_term => 68.96716737307133, :noise_term => 8, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(283.9995168122814, 283.6995168122814)…), Dict(:i => 0.0, :bellman_term => 90.59157642941136, :noise_term => 0, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(283.6995168122814, 275.09951681228137)…)  …  Dict(:i => 2.0, :bellman_term => 55.958246169739255, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(41.29951681228138, 34.999516812281385)…), Dict(:i => 3.0, :bellman_term => 61.2532527964442, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(34.999516812281385, 29.899516812281384)…), Dict(:i => 2.0, :bellman_term => 66.78074400244668, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(29.899516812281384, 24.099516812281383)…), Dict(:i => 1.0, :bellman_term => 98.01353105551004, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(24.099516812281383, 17.499516812280305)…), Dict(:i => 7.0, :bellman_term => 46.865007418233205, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(17.499516812280305, 17.099516812280307)…), Dict(:i => 3.0, :bellman_term => 51.03012740831247, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(17.099516812280307, 12.799516812280306)…), Dict(:i => 2.0, :bellman_term => 55.22061930506759, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(12.799516812280306, 7.599516812280306)…), Dict(:i => 8.0, :bellman_term => 12.017838244665477, :noise_term => 8, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(7.599516812280306, 8.499516812280307)…), Dict(:i => 1.0, :bellman_term => 29.376020518987385, :noise_term => 1, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(8.499516812280307, 2.499516812280307)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(2.499516812280307, 3.499516812280307)…)]
      + [Dict(:i => 3.0, :bellman_term => 206.46856062955294, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 7.0, :bellman_term => 163.98989397062522, :noise_term => 7, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.100000000002243, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 302.8999999999726)…), Dict(:i => 1.0, :bellman_term => 190.83478325333073, :noise_term => 1, :node_index => 3, :stage_objective => 2.9284507028237043, :objective_state => nothing, :u => 6.9237310657713484, :r => 0.27626893422865134, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(302.8999999999726, 296.97626893420124)…), Dict(:i => 2.0, :bellman_term => 200.1377558625968, :noise_term => 2, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.97626893420124, 291.67626893420123)…), Dict(:i => 1.0, :bellman_term => 210.70294646481034, :noise_term => 1, :node_index => 5, :stage_objective => 21.237685551916957, :objective_state => nothing, :u => 5.503778075731759, :r => 1.8962219242782998, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(291.67626893420123, 287.1724908584695)…), Dict(:i => 0.0, :bellman_term => 235.28844479107102, :noise_term => 0, :node_index => 6, :stage_objective => 8.90050680380128, :objective_state => nothing, :u => 6.8260428866259755, :r => 0.7739571133740243, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(287.1724908584695, 280.3464479718435)…), Dict(:i => 8.0, :bellman_term => 186.73461804448152, :noise_term => 8, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(280.3464479718435, 280.54644797184346)…), Dict(:i => 2.0, :bellman_term => 197.16044042649446, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(280.54644797184346, 274.44644797184344)…), Dict(:i => 1.0, :bellman_term => 231.36974764022125, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(274.44644797184344, 267.14644797184343)…), Dict(:i => 2.0, :bellman_term => 243.2502788953493, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(267.14644797184343, 260.5464479718434)…)  …  Dict(:i => 0.0, :bellman_term => 222.46131666629628, :noise_term => 0, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(30.74644797184341, 22.44644797184341)…), Dict(:i => 1.0, :bellman_term => 270.2459705671971, :noise_term => 1, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(22.44644797184341, 15.346447971843409)…), Dict(:i => 7.0, :bellman_term => 200.63262139850866, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(15.346447971843409, 14.54644797184341)…), Dict(:i => 8.0, :bellman_term => 133.99656559361358, :noise_term => 8, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(14.54644797184341, 14.94644797184294)…), Dict(:i => 7.0, :bellman_term => 74.19905884944365, :noise_term => 7, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(14.94644797184294, 14.54644797184294)…), Dict(:i => 3.0, :bellman_term => 82.16561668870209, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(14.54644797184294, 10.246447971842937)…), Dict(:i => 2.0, :bellman_term => 91.18935378959192, :noise_term => 2, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(10.246447971842937, 5.0464479718429365)…), Dict(:i => 3.0, :bellman_term => 101.69768711011712, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(5.0464479718429365, 0.9464479718429377)…), Dict(:i => 1.0, :bellman_term => 60.519999999999996, :noise_term => 1, :node_index => 51, :stage_objective => 89.44787089838, :objective_state => nothing, :u => 1.9464479718429377, :r => 5.053552028157062, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.9464479718429377, 0.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 71.2, :objective_state => nothing, :u => 3.0, :r => 4.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…)]
      + [Dict(:i => 1.0, :bellman_term => 227.17639983640765, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 2.0, :bellman_term => 179.3644429214168, :noise_term => 2, :node_index => 2, :stage_objective => 57.19497484807899, :objective_state => nothing, :u => 1.6004831876855408, :r => 5.499516812315288, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.3995168122878)…), Dict(:i => 1.0, :bellman_term => 190.83478325333127, :noise_term => 1, :node_index => 3, :stage_objective => 18.833572492281693, :objective_state => nothing, :u => 5.423247878086633, :r => 1.7767521219133673, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122878, 296.9762689342012)…), Dict(:i => 7.0, :bellman_term => 147.38892606788158, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.9762689342012, 296.6762689342012)…), Dict(:i => 3.0, :bellman_term => 155.77658284483732, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.39999999999344, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.6762689342012, 292.27626893420774)…), Dict(:i => 0.0, :bellman_term => 185.70859465800277, :noise_term => 0, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(292.27626893420774, 284.6762689342077)…), Dict(:i => 3.0, :bellman_term => 194.44636061146684, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(284.6762689342077, 279.8762689342077)…), Dict(:i => 2.0, :bellman_term => 205.0841335673581, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(279.8762689342077, 273.7762689342077)…), Dict(:i => 3.0, :bellman_term => 215.38659538593492, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(273.7762689342077, 268.47626893420767)…), Dict(:i => 2.0, :bellman_term => 226.26534420357984, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(268.47626893420767, 261.87626893420764)…)  …  Dict(:i => 7.0, :bellman_term => 182.98435376132306, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(26.23204975944006, 24.93204975944006)…), Dict(:i => 8.0, :bellman_term => 120.93288488926271, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(24.93204975944006, 24.83204975944006)…), Dict(:i => 7.0, :bellman_term => 67.51825812189287, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(24.83204975944006, 24.03204975944006)…), Dict(:i => 3.0, :bellman_term => 73.56706878970937, :noise_term => 3, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(24.03204975944006, 19.43204975943931)…), Dict(:i => 2.0, :bellman_term => 80.61156160236649, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(19.43204975943931, 14.032049759439309)…), Dict(:i => 3.0, :bellman_term => 89.0241600704294, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(14.032049759439309, 9.73204975943931)…), Dict(:i => 0.0, :bellman_term => 130.51037489348525, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(9.73204975943931, 2.5320497594393094)…), Dict(:i => 3.0, :bellman_term => 116.96200000000002, :noise_term => 3, :node_index => 50, :stage_objective => 27.59592423386815, :objective_state => nothing, :u => 5.532049759439309, :r => 1.5679502405606902, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(2.5320497594393094, 0.0)…), Dict(:i => 8.0, :bellman_term => 48.06000000000001, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 53.400000000000006, :objective_state => nothing, :u => 4.0, :r => 3.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.0, 0.0)…)]
      + [Dict(:i => 8.0, :bellman_term => 169.54614924602208, :noise_term => 8, :node_index => 1, :stage_objective => 56.7735291113664, :objective_state => nothing, :u => 1.433967734179764, :r => 5.566032265820236, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 306.56603226582024)…), Dict(:i => 7.0, :bellman_term => 127.7378149608162, :noise_term => 7, :node_index => 2, :stage_objective => 0.0, :objective_state => nothing, :u => 7.099999999998326, :r => 0.0, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(306.56603226582024, 306.4660322657949)…), Dict(:i => 3.0, :bellman_term => 136.2762448203457, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(306.4660322657949, 302.26603226579493)…), Dict(:i => 0.0, :bellman_term => 165.07941579646103, :noise_term => 0, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(302.26603226579493, 294.9660322657949)…), Dict(:i => 3.0, :bellman_term => 174.10489018277985, :noise_term => 3, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999995487, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(294.9660322657949, 290.56603226579944)…), Dict(:i => 2.0, :bellman_term => 182.5057148748133, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(290.56603226579944, 284.9660322657994)…), Dict(:i => 3.0, :bellman_term => 191.11205724909405, :noise_term => 3, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(284.9660322657994, 280.1660322657994)…), Dict(:i => 2.0, :bellman_term => 201.63700499538845, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(280.1660322657994, 274.0660322657994)…), Dict(:i => 1.0, :bellman_term => 236.0791980811341, :noise_term => 1, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(274.0660322657994, 266.76603226579937)…), Dict(:i => 2.0, :bellman_term => 248.10908027218602, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(266.76603226579937, 260.16603226579934)…)  …  Dict(:i => 7.0, :bellman_term => 12.466592011384364, :noise_term => 7, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(43.36603226579936, 42.066032265799365)…), Dict(:i => 8.0, :bellman_term => 2.3085742448515134, :noise_term => 8, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(42.066032265799365, 41.96603226579936)…), Dict(:i => 7.0, :bellman_term => 0.1387317206090577, :noise_term => 7, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(41.96603226579936, 41.166032265799366)…), Dict(:i => 1.0, :bellman_term => 0.43033864741353123, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(41.166032265799366, 34.56603226579937)…), Dict(:i => 2.0, :bellman_term => 0.2304177762092081, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(34.56603226579937, 29.166032265799373)…), Dict(:i => 1.0, :bellman_term => 0.6846332968079878, :noise_term => 1, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(29.166032265799373, 22.866032265799372)…), Dict(:i => 0.0, :bellman_term => 2.004024465284115, :noise_term => 0, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(22.866032265799372, 15.666032265799373)…), Dict(:i => 3.0, :bellman_term => 0.6952163101894087, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(15.666032265799373, 11.566032265799373)…), Dict(:i => 8.0, :bellman_term => 0.0, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(11.566032265799373, 5.999999999999998)…), Dict(:i => 3.0, :bellman_term => 0.0, :noise_term => 3, :node_index => 52, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(5.999999999999998, 0.0)…)]
      + [Dict(:i => 3.0, :bellman_term => 206.46856062955294, :noise_term => 3, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 303.0)…), Dict(:i => 2.0, :bellman_term => 179.36444292143565, :noise_term => 2, :node_index => 2, :stage_objective => 36.394974848063605, :objective_state => nothing, :u => 3.600483187685235, :r => 3.499516812313808, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(303.0, 301.39951681228604)…), Dict(:i => 1.0, :bellman_term => 190.83478325332504, :noise_term => 1, :node_index => 3, :stage_objective => 18.83357249230669, :objective_state => nothing, :u => 5.4232478780842746, :r => 1.7767521219157256, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.39951681228604, 296.97626893420176)…), Dict(:i => 7.0, :bellman_term => 147.38892606787567, :noise_term => 7, :node_index => 4, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(296.97626893420176, 296.67626893420174)…), Dict(:i => 1.0, :bellman_term => 177.21023314192053, :noise_term => 1, :node_index => 5, :stage_objective => 0.0, :objective_state => nothing, :u => 7.399999999995487, :r => 0.0, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(296.67626893420174, 290.27626893420626)…), Dict(:i => 7.0, :bellman_term => 134.56814218395812, :noise_term => 7, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(290.27626893420626, 289.67626893420623)…), Dict(:i => 1.0, :bellman_term => 162.57313838567234, :noise_term => 1, :node_index => 7, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(289.67626893420623, 282.8762689342062)…), Dict(:i => 2.0, :bellman_term => 171.49446228457464, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(282.8762689342062, 276.7762689342062)…), Dict(:i => 3.0, :bellman_term => 179.91894991216077, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(276.7762689342062, 271.4762689342062)…), Dict(:i => 7.0, :bellman_term => 136.02499927958706, :noise_term => 7, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(271.4762689342062, 269.87626893420617)…)  …  Dict(:i => 2.0, :bellman_term => 88.72293042034033, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(38.07626893420617, 31.77626893420617)…), Dict(:i => 3.0, :bellman_term => 96.67021622669938, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(31.77626893420617, 26.67626893420617)…), Dict(:i => 2.0, :bellman_term => 105.15810314175172, :noise_term => 2, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(26.67626893420617, 20.87626893420617)…), Dict(:i => 3.0, :bellman_term => 114.87273518748997, :noise_term => 3, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(20.87626893420617, 16.2762689342046)…), Dict(:i => 0.0, :bellman_term => 156.555890628988, :noise_term => 0, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(16.2762689342046, 8.876268934204601)…), Dict(:i => 3.0, :bellman_term => 169.9715590034566, :noise_term => 3, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(8.876268934204601, 4.5762689342046015)…), Dict(:i => 7.0, :bellman_term => 100.85081815820993, :noise_term => 7, :node_index => 49, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(4.5762689342046015, 4.376268934204602)…), Dict(:i => 3.0, :bellman_term => 112.50633462914817, :noise_term => 3, :node_index => 50, :stage_objective => 0.0, :objective_state => nothing, :u => 7.1, :r => 0.0, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(4.376268934204602, 0.2762689342046025)…), Dict(:i => 8.0, :bellman_term => 44.617689079810674, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.2762689342046025, 1.2762689342046016)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 84.08241297115809, :objective_state => nothing, :u => 2.2762689342046016, :r => 4.723731065795398, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.2762689342046016, 0.0)…)]
      + [Dict(:i => 1.0, :bellman_term => 227.17639983640765, :noise_term => 1, :node_index => 1, :stage_objective => 71.39999999999999, :objective_state => nothing, :u => 0.0, :r => 7.0, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}(300.0, 301.0)…), Dict(:i => 2.0, :bellman_term => 179.3644429214545, :noise_term => 2, :node_index => 2, :stage_objective => 57.19497484808695, :objective_state => nothing, :u => 1.6004831876855408, :r => 5.4995168123160525, :belief => Dict(2 => 1.0), :x => SDDP.State{Float64}(301.0, 301.3995168122842)…), Dict(:i => 3.0, :bellman_term => 188.4793756004883, :noise_term => 3, :node_index => 3, :stage_objective => 0.0, :objective_state => nothing, :u => 7.2, :r => 0.0, :belief => Dict(3 => 1.0), :x => SDDP.State{Float64}(301.3995168122842, 297.19951681228423)…), Dict(:i => 0.0, :bellman_term => 200.50899488731602, :noise_term => 0, :node_index => 4, :stage_objective => 18.994316050006102, :objective_state => nothing, :u => 5.557402197247146, :r => 1.7425978027528535, :belief => Dict(4 => 1.0), :x => SDDP.State{Float64}(297.19951681228423, 291.6421146150371)…), Dict(:i => 1.0, :bellman_term => 210.70294646482898, :noise_term => 1, :node_index => 5, :stage_objective => 21.62021392653377, :objective_state => nothing, :u => 5.469623756569263, :r => 1.9303762434405152, :belief => Dict(5 => 1.0), :x => SDDP.State{Float64}(291.6421146150371, 287.1724908584678)…), Dict(:i => 2.0, :bellman_term => 221.20246104080434, :noise_term => 2, :node_index => 6, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(6 => 1.0), :x => SDDP.State{Float64}(287.1724908584678, 281.5724908584678)…), Dict(:i => 3.0, :bellman_term => 210.884499401192, :noise_term => 3, :node_index => 7, :stage_objective => 20.422817930436253, :objective_state => nothing, :u => 6.083796812568382, :r => 1.716203187431618, :belief => Dict(7 => 1.0), :x => SDDP.State{Float64}(281.5724908584678, 278.4886940458994)…), Dict(:i => 2.0, :bellman_term => 221.59122235930772, :noise_term => 2, :node_index => 8, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(8 => 1.0), :x => SDDP.State{Float64}(278.4886940458994, 272.3886940458994)…), Dict(:i => 3.0, :bellman_term => 232.08472666889975, :noise_term => 3, :node_index => 9, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(9 => 1.0), :x => SDDP.State{Float64}(272.3886940458994, 267.0886940458994)…), Dict(:i => 2.0, :bellman_term => 243.98793207432254, :noise_term => 2, :node_index => 10, :stage_objective => 0.0, :objective_state => nothing, :u => 8.6, :r => 0.0, :belief => Dict(10 => 1.0), :x => SDDP.State{Float64}(267.0886940458994, 260.48869404589936)…)  …  Dict(:i => 2.0, :bellman_term => 160.59978457572458, :noise_term => 2, :node_index => 43, :stage_objective => 0.0, :objective_state => nothing, :u => 8.3, :r => 0.0, :belief => Dict(43 => 1.0), :x => SDDP.State{Float64}(32.68869404589937, 26.388694045899367)…), Dict(:i => 3.0, :bellman_term => 172.69526648558167, :noise_term => 3, :node_index => 44, :stage_objective => 0.0, :objective_state => nothing, :u => 8.1, :r => 0.0, :belief => Dict(44 => 1.0), :x => SDDP.State{Float64}(26.388694045899367, 21.28869404589937)…), Dict(:i => 0.0, :bellman_term => 218.2461916666241, :noise_term => 0, :node_index => 45, :stage_objective => 0.0, :objective_state => nothing, :u => 7.8, :r => 0.0, :belief => Dict(45 => 1.0), :x => SDDP.State{Float64}(21.28869404589937, 13.488694045899368)…), Dict(:i => 1.0, :bellman_term => 267.038388102336, :noise_term => 1, :node_index => 46, :stage_objective => 0.0, :objective_state => nothing, :u => 7.6, :r => 0.0, :belief => Dict(46 => 1.0), :x => SDDP.State{Float64}(13.488694045899368, 6.888694045899369)…), Dict(:i => 2.0, :bellman_term => 282.4384346065088, :noise_term => 2, :node_index => 47, :stage_objective => 0.0, :objective_state => nothing, :u => 7.4, :r => 0.0, :belief => Dict(47 => 1.0), :x => SDDP.State{Float64}(6.888694045899369, 1.4886940458993685)…), Dict(:i => 8.0, :bellman_term => 210.83109556258486, :noise_term => 8, :node_index => 48, :stage_objective => 0.0, :objective_state => nothing, :u => 7.3, :r => 0.0, :belief => Dict(48 => 1.0), :x => SDDP.State{Float64}(1.4886940458993685, 2.1886940458993687)…), Dict(:i => 2.0, :bellman_term => 173.67944000000003, :noise_term => 2, :node_index => 49, :stage_objective => 52.69785419676105, :objective_state => nothing, :u => 4.188694045899369, :r => 3.0113059541006315, :belief => Dict(49 => 1.0), :x => SDDP.State{Float64}(2.1886940458993687, 0.0)…), Dict(:i => 1.0, :bellman_term => 116.962, :noise_term => 1, :node_index => 50, :stage_objective => 107.36, :objective_state => nothing, :u => 1.0, :r => 6.1, :belief => Dict(50 => 1.0), :x => SDDP.State{Float64}(0.0, 0.0)…), Dict(:i => 8.0, :bellman_term => 48.06000000000001, :noise_term => 8, :node_index => 51, :stage_objective => 0.0, :objective_state => nothing, :u => 7.0, :r => 0.0, :belief => Dict(51 => 1.0), :x => SDDP.State{Float64}(0.0, 1.0)…), Dict(:i => 1.0, :bellman_term => 0.0, :noise_term => 1, :node_index => 52, :stage_objective => 89.0, :objective_state => nothing, :u => 2.0, :r => 5.0, :belief => Dict(52 => 1.0), :x => SDDP.State{Float64}(1.0, 0.0)…)]

      And let's plot the use of thermal generation in each replication:

      plot = Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
       for simulation in simulations
           Plots.plot!(plot, [sim[:r] for sim in simulation], label = "")
       end
       plot
      - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Viewing an interpreting static plots like this is difficult, particularly as the number of simulations grows. SDDP.jl includes an interactive SpaghettiPlot that makes things easier:

      plot = SDDP.SpaghettiPlot(simulations)
       SDDP.add_spaghetti(plot; title = "Storage") do sim
           return sim[:x].out
      @@ -823,4 +821,4 @@ 

      Higher fidelity modeling

      Our model is very basic. There are many aspects that we could improve:

      • Instead of hard-coding a terminal value function, can you solve an infinite horizon model? What are the differences?

      • Can you add a second reservoir to make a river chain?

      • Can you modify the problem and data to use proper units, including a conversion between the volume of water flowing through the turbine and the electrical power output?

      • Can you add random demand or cost data as well as inflows?

      Algorithmic considerations

      The algorithm implemented by SDDP.jl has a number of tuneable parameters:

      • Try using a different lower bound. What happens if it is too low, or too high?

      • Was our stopping rule correct? What happens if we use fewer or more iterations? What other stopping rules could you try?

      • Can you add a risk measure to make the policy risk-averse?


      This page was generated using Literate.jl.

      +end

      Higher fidelity modeling

      Our model is very basic. There are many aspects that we could improve:

      • Instead of hard-coding a terminal value function, can you solve an infinite horizon model? What are the differences?

      • Can you add a second reservoir to make a river chain?

      • Can you modify the problem and data to use proper units, including a conversion between the volume of water flowing through the turbine and the electrical power output?

      • Can you add random demand or cost data as well as inflows?

      Algorithmic considerations

      The algorithm implemented by SDDP.jl has a number of tuneable parameters:

      • Try using a different lower bound. What happens if it is too low, or too high?

      • Was our stopping rule correct? What happens if we use fewer or more iterations? What other stopping rules could you try?

      • Can you add a risk measure to make the policy risk-averse?


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/first_steps/index.html b/dev/tutorial/first_steps/index.html index 6e81bc673..8ebf36a2d 100644 --- a/dev/tutorial/first_steps/index.html +++ b/dev/tutorial/first_steps/index.html @@ -224,14 +224,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.750000e+04 2.500000e+03 4.281044e-03 12 1 - 10 5.000000e+03 8.333333e+03 3.327012e-02 201 1 + 1 1.750000e+04 3.437500e+03 3.243208e-03 12 1 + 10 1.500000e+04 8.333333e+03 2.025819e-02 201 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.327012e-02 +total time (s) : 2.025819e-02 total solves : 201 best bound : 8.333333e+03 -simulation ci : 1.062500e+04 ± 4.215930e+03 +simulation ci : 9.656250e+03 ± 3.622966e+03 numeric issues : 0 -------------------------------------------------------------------

      There's a lot going on in this printout! Let's break it down.

      The first section, "problem," gives some problem statistics. In this example there are 3 nodes, 1 state variable, and 27 scenarios ($3^3$). We haven't solved this problem before so there are no existing cuts.

      The "options" section lists some options we are using to solve the problem. For more information on the numerical stability report, read the Numerical stability report section.

      The "subproblem structure" section also needs explaining. This looks at all of the nodes in the policy graph and reports the minimum and maximum number of variables and each constraint type in the corresponding subproblem. In this case each subproblem has 7 variables and various numbers of different constraint types. Note that the exact numbers may not correspond to the formulation as you wrote it, because SDDP.jl adds some extra variables for the cost-to-go function.

      Then comes the iteration log, which is the main part of the printout. It has the following columns:

      • iteration: the SDDP iteration
      • simulation: the cost of the single forward pass simulation for that iteration. This value is stochastic and is not guaranteed to improve over time. However, it's useful to check that the units are reasonable, and that it is not deterministic if you intended for the problem to be stochastic, etc.
      • bound: this is a lower bound (upper if maximizing) for the value of the optimal policy. This should be monotonically improving (increasing if minimizing, decreasing if maximizing).
      • time (s): the total number of seconds spent solving so far
      • solves: the total number of subproblem solves to date. This can be very large!
      • pid: the ID of the processor used to solve that iteration. This should be 1 unless you are using parallel computation.

      In addition, if the first character of a line is , then SDDP.jl experienced numerical issues during the solve, but successfully recovered.

      The printout finishes with some summary statistics:

      • status: why did the solver stop?
      • total time (s), best bound, and total solves are the values from the last iteration of the solve.
      • simulation ci: a confidence interval that estimates the quality of the policy from the Simulation column.
      • numeric issues: the number of iterations that experienced numerical issues.
      Warning

      The simulation ci result can be misleading if you run a small number of iterations, or if the initial simulations are very bad. On a more technical note, it is an in-sample simulation, which may not reflect the true performance of the policy. See Obtaining bounds for more details.

      Obtaining the decision rule

      After training a policy, we can create a decision rule using SDDP.DecisionRule:

      rule = SDDP.DecisionRule(model; node = 1)
      A decision rule for node 1

      Then, to evaluate the decision rule, we use SDDP.evaluate:

      solution = SDDP.evaluate(
           rule;
      @@ -250,10 +250,10 @@
       replication = 1
       stage = 2
       simulations[replication][stage]
      Dict{Symbol, Any} with 10 entries:
      -  :volume             => State{Float64}(200.0, 100.0)
      +  :volume             => State{Float64}(200.0, 150.0)
         :hydro_spill        => 0.0
      -  :bellman_term       => 2500.0
      -  :noise_term         => 50.0
      +  :bellman_term       => 0.0
      +  :noise_term         => 100.0
         :node_index         => 2
         :stage_objective    => 0.0
         :objective_state    => nothing
      @@ -263,18 +263,18 @@
           return node[:volume].out
       end
      3-element Vector{Float64}:
        200.0
      - 100.0
      -   0.0

      Another is :thermal_generation.

      thermal_generation = map(simulations[1]) do node
      + 150.0
      +  50.0

      Another is :thermal_generation.

      thermal_generation = map(simulations[1]) do node
           return node[:thermal_generation]
       end
      3-element Vector{Float64}:
      - 100.0
      + 150.0
          0.0
          0.0

      Obtaining bounds

      Because the optimal policy is stochastic, one common approach to quantify the quality of the policy is to construct a confidence interval for the expected cost by summing the stage objectives along each simulation.

      objectives = map(simulations) do simulation
           return sum(stage[:stage_objective] for stage in simulation)
       end
       
       μ, ci = SDDP.confidence_interval(objectives)
      -println("Confidence interval: ", μ, " ± ", ci)
      Confidence interval: 8250.0 ± 943.4339811389439

      This confidence interval is an estimate for an upper bound of the policy's quality. We can calculate the lower bound using SDDP.calculate_bound.

      println("Lower bound: ", SDDP.calculate_bound(model))
      Lower bound: 8333.333333333332
      Tip

      The upper- and lower-bounds are reversed if maximizing, i.e., SDDP.calculate_bound. returns an upper bound.

      Custom recorders

      In addition to simulating the primal values of variables, we can also pass custom recorder functions. Each of these functions takes one argument, the JuMP subproblem corresponding to each node. This function gets called after we have solved each node as we traverse the policy graph in the simulation.

      For example, the dual of the demand constraint (which we named demand_constraint) corresponds to the price we should charge for electricity, since it represents the cost of each additional unit of demand. To calculate this, we can go:

      simulations = SDDP.simulate(
      +println("Confidence interval: ", μ, " ± ", ci)
      Confidence interval: 7650.0 ± 907.5862315376802

      This confidence interval is an estimate for an upper bound of the policy's quality. We can calculate the lower bound using SDDP.calculate_bound.

      println("Lower bound: ", SDDP.calculate_bound(model))
      Lower bound: 8333.333333333332
      Tip

      The upper- and lower-bounds are reversed if maximizing, i.e., SDDP.calculate_bound. returns an upper bound.

      Custom recorders

      In addition to simulating the primal values of variables, we can also pass custom recorder functions. Each of these functions takes one argument, the JuMP subproblem corresponding to each node. This function gets called after we have solved each node as we traverse the policy graph in the simulation.

      For example, the dual of the demand constraint (which we named demand_constraint) corresponds to the price we should charge for electricity, since it represents the cost of each additional unit of demand. To calculate this, we can go:

      simulations = SDDP.simulate(
           model,
           1,  ## Perform a single simulation
           custom_recorders = Dict{Symbol,Function}(
      @@ -287,4 +287,4 @@
       end
      3-element Vector{Float64}:
         50.0
         50.0
      - 150.0

      Extracting the marginal water values

      Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space.

      Note

      By "value function" we mean $\mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]$, note the function $V_i(x, \omega)$.

      First, we construct a value function from the first subproblem:

      V = SDDP.ValueFunction(model; node = 1)
      A value function for node 1

      Then we can evaluate V at a point:

      cost, price = SDDP.evaluate(V, Dict("volume" => 10))
      (21499.999999999996, Dict(:volume => -99.99999999999999))

      This returns the cost-to-go (cost), and the gradient of the cost-to-go function with respect to each state variable. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.


      This page was generated using Literate.jl.

      + 150.0

      Extracting the marginal water values

      Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space.

      Note

      By "value function" we mean $\mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]$, note the function $V_i(x, \omega)$.

      First, we construct a value function from the first subproblem:

      V = SDDP.ValueFunction(model; node = 1)
      A value function for node 1

      Then we can evaluate V at a point:

      cost, price = SDDP.evaluate(V, Dict("volume" => 10))
      (21499.999999999993, Dict(:volume => -99.99999999999999))

      This returns the cost-to-go (cost), and the gradient of the cost-to-go function with respect to each state variable. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/markov_uncertainty/index.html b/dev/tutorial/markov_uncertainty/index.html index a329a0004..28a9e1d06 100644 --- a/dev/tutorial/markov_uncertainty/index.html +++ b/dev/tutorial/markov_uncertainty/index.html @@ -81,14 +81,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.750000e+04 1.991887e+03 9.750128e-03 18 1 - 40 2.750000e+04 8.072917e+03 3.397300e-01 1320 1 + 1 1.562500e+04 1.991887e+03 4.980087e-03 18 1 + 40 1.875000e+03 8.072917e+03 1.440580e-01 1320 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.397300e-01 +total time (s) : 1.440580e-01 total solves : 1320 best bound : 8.072917e+03 -simulation ci : 9.772342e+03 ± 2.698713e+03 +simulation ci : 7.827991e+03 ± 2.451185e+03 numeric issues : 0 -------------------------------------------------------------------

      Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.

      We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.

      simulations = SDDP.simulate(
           model,
      @@ -102,4 +102,4 @@
       [stage[:node_index] for stage in simulations[1]]
      3-element Vector{Tuple{Int64, Int64}}:
        (1, 1)
        (2, 2)
      - (3, 1)

      This page was generated using Literate.jl.

      + (3, 1)

      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/mdps/index.html b/dev/tutorial/mdps/index.html index 822f7a7d6..d97db0d0a 100644 --- a/dev/tutorial/mdps/index.html +++ b/dev/tutorial/mdps/index.html @@ -57,22 +57,22 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.499895e+01 1.562631e+00 3.831697e-02 6 1 - 24 8.333333e+00 8.333333e+00 1.041407e+00 150 1 - 40 8.333333e+00 8.333333e+00 1.723777e+00 246 1 + 1 2.499895e+01 1.562631e+00 2.783608e-02 6 1 + 29 8.333333e+00 8.333333e+00 1.030540e+00 180 1 + 40 8.333333e+00 8.333333e+00 1.413587e+00 246 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.723777e+00 +total time (s) : 1.413587e+00 total solves : 246 best bound : 8.333333e+00 simulation ci : 8.810723e+00 ± 8.167195e-01 numeric issues : 0 --------------------------------------------------------------------

      Check that we got the theoretical optimum:

      SDDP.calculate_bound(model), M^2 / N
      (8.333333297449974, 8.333333333333334)

      And check that we found the theoretical value for each $x_i$:

      simulations = SDDP.simulate(model, 1, [:x])
      +-------------------------------------------------------------------

      Check that we got the theoretical optimum:

      SDDP.calculate_bound(model), M^2 / N
      (8.333333297473812, 8.333333333333334)

      And check that we found the theoretical value for each $x_i$:

      simulations = SDDP.simulate(model, 1, [:x])
       for data in simulations[1]
           println("x_$(data[:node_index]) = $(data[:x])")
      -end
      x_1 = 1.666665732482549
      -x_2 = 1.6666672479151778
      -x_3 = 1.666667013035603

      Close enough! We don't get exactly 5/3 because of numerical tolerances within our choice of optimization solver (in this case, Ipopt).

      A more complicated policy

      SDDP.jl is also capable of finding policies for other types of Markov Decision Processes. A classic example of a Markov Decision Process is the problem of finding a path through a maze.

      Here's one example of a maze. Try changing the parameters to explore different mazes:

      M, N = 3, 4
      +end
      x_1 = 1.6666655984419778
      +x_2 = 1.6666670256548375
      +x_3 = 1.6666673693365108

      Close enough! We don't get exactly 5/3 because of numerical tolerances within our choice of optimization solver (in this case, Ipopt).

      A more complicated policy

      SDDP.jl is also capable of finding policies for other types of Markov Decision Processes. A classic example of a Markov Decision Process is the problem of finding a path through a maze.

      Here's one example of a maze. Try changing the parameters to explore different mazes:

      M, N = 3, 4
       initial_square = (1, 1)
       reward, illegal_squares, penalties = (3, 4), [(2, 2)], [(3, 1), (2, 4)]
       path = fill("⋅", M, N)
      @@ -151,15 +151,14 @@
       -------------------------------------------------------------------
        iteration    simulation      bound        time (s)     solves  pid
       -------------------------------------------------------------------
      -         1   0.000000e+00  1.000000e+01  1.858091e-02        41   1
      -        21   0.000000e+00  6.561000e+00  1.231539e+00      2577   1
      -        60   2.500000e+01  6.561000e+00  2.008292e+00      4294   1
      +         1   0.000000e+00  9.958140e+00  4.631996e-03        11   1
      +        40   0.000000e+00  6.561000e+00  7.881789e-01      2736   1
       -------------------------------------------------------------------
       status         : simulation_stopping
      -total time (s) : 2.008292e+00
      -total solves   : 4294
      +total time (s) : 7.881789e-01
      +total solves   : 2736
       best bound     :  6.561000e+00
      -simulation ci  :  4.833333e+00 ± 1.799006e+00
      +simulation ci  :  6.500000e+00 ± 3.106175e+00
       numeric issues : 0
       -------------------------------------------------------------------

      Simulating a cyclic policy graph requires an explicit sampling_scheme that does not terminate early based on the cycle probability:

      simulations = SDDP.simulate(
           model,
      @@ -170,7 +169,7 @@
               terminate_on_dummy_leaf = false,
           ),
       );
      1-element Vector{Vector{Dict{Symbol, Any}}}:
      - [Dict(:bellman_term => 6.561000000000009, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 7.29000000000001, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 8.10000000000001, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 9.00000000000001, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 9.00000000000001, :noise_term => nothing, :node_index => 1, :stage_objective => 1.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 1.0)])]

      Fill in the path with the time-step in which we visit the square:

      for (t, data) in enumerate(simulations[1]), i in 1:M, j in 1:N
      + [Dict(:bellman_term => 6.5610000000000115, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 7.2900000000000125, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 8.100000000000014, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 9.000000000000014, :noise_term => nothing, :node_index => 1, :stage_objective => 0.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 1.0) SDDP.State{Float64}(0.0, 0.0)]), Dict(:bellman_term => 9.000000000000014, :noise_term => nothing, :node_index => 1, :stage_objective => 1.0, :objective_state => nothing, :belief => Dict(1 => 1.0), :x => SDDP.State{Float64}[SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0); SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(0.0, 0.0) SDDP.State{Float64}(1.0, 0.0) SDDP.State{Float64}(0.0, 1.0)])]

      Fill in the path with the time-step in which we visit the square:

      for (t, data) in enumerate(simulations[1]), i in 1:M, j in 1:N
           if data[:x][i, j].in > 0.5
               path[i, j] = "$t"
           end
      @@ -178,4 +177,4 @@
       
       print(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\n'))
      1 2 3 ⋅
       ⋅ ▩ 4 †
      -† ⋅ 5 *
      Tip

      This formulation will likely struggle as the number of cells in the maze increases. Can you think of an equivalent formulation that uses fewer state variables?


      This page was generated using Literate.jl.

      +† ⋅ 5 *
      Tip

      This formulation will likely struggle as the number of cells in the maze increases. Can you think of an equivalent formulation that uses fewer state variables?


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/objective_states/index.html b/dev/tutorial/objective_states/index.html index 3d5cd1eba..ed342d8a9 100644 --- a/dev/tutorial/objective_states/index.html +++ b/dev/tutorial/objective_states/index.html @@ -75,24 +75,24 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 9.112500e+03 3.261069e+03 7.368302e-02 39 1 - 96 9.281250e+03 5.092593e+03 9.007711e-01 4944 1 + 1 7.087500e+03 3.261069e+03 2.431607e-02 39 1 + 60 0.000000e+00 5.092593e+03 3.536880e-01 3240 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 9.007711e-01 -total solves : 4944 +total time (s) : 3.536880e-01 +total solves : 3240 best bound : 5.092593e+03 -simulation ci : 4.991782e+03 ± 7.052548e+02 +simulation ci : 5.274733e+03 ± 9.757726e+02 numeric issues : 0 ------------------------------------------------------------------- Finished training and simulating.

      To demonstrate how the objective states are updated, consider the sequence of noise observations:

      [stage[:noise_term] for stage in simulations[1]]
      3-element Vector{NamedTuple{(:fuel, :inflow), Tuple{Float64, Float64}}}:
      - (fuel = 1.1, inflow = 0.0)
      - (fuel = 0.75, inflow = 0.0)
      - (fuel = 1.1, inflow = 0.0)

      This, the fuel cost in the first stage should be 0.75 * 50 = 37.5. The fuel cost in the second stage should be 1.1 * 37.5 = 41.25. The fuel cost in the third stage should be 0.75 * 41.25 = 30.9375.

      To confirm this, the values of the objective state in a simulation can be queried using the :objective_state key.

      [stage[:objective_state] for stage in simulations[1]]
      3-element Vector{Float64}:
      - 55.00000000000001
      - 41.25000000000001
      - 45.375000000000014

      Multi-dimensional objective states

      You can construct multi-dimensional price processes using NTuples. Just replace every scalar value associated with the objective state by a tuple. For example, initial_value = 1.0 becomes initial_value = (1.0, 2.0).

      Here is an example:

      model = SDDP.LinearPolicyGraph(
      + (fuel = 1.25, inflow = 0.0)
      + (fuel = 1.25, inflow = 50.0)
      + (fuel = 0.75, inflow = 100.0)

      This, the fuel cost in the first stage should be 0.75 * 50 = 37.5. The fuel cost in the second stage should be 1.1 * 37.5 = 41.25. The fuel cost in the third stage should be 0.75 * 41.25 = 30.9375.

      To confirm this, the values of the objective state in a simulation can be queried using the :objective_state key.

      [stage[:objective_state] for stage in simulations[1]]
      3-element Vector{Float64}:
      + 62.5
      + 78.125
      + 58.59375

      Multi-dimensional objective states

      You can construct multi-dimensional price processes using NTuples. Just replace every scalar value associated with the objective state by a tuple. For example, initial_value = 1.0 becomes initial_value = (1.0, 2.0).

      Here is an example:

      model = SDDP.LinearPolicyGraph(
           stages = 3,
           sense = :Min,
           lower_bound = 0.0,
      @@ -166,18 +166,18 @@
       -------------------------------------------------------------------
        iteration    simulation      bound        time (s)     solves  pid
       -------------------------------------------------------------------
      -         1   9.125000e+03  3.477859e+03  8.873916e-02        39   1
      -        80   7.125000e+03  5.135984e+03  9.182131e-01      4320   1
      +         1   1.512500e+04  4.523098e+03  2.660584e-02        39   1
      +        61   5.500000e+03  5.135984e+03  3.949859e-01      3279   1
       -------------------------------------------------------------------
       status         : simulation_stopping
      -total time (s) : 9.182131e-01
      -total solves   : 4320
      +total time (s) : 3.949859e-01
      +total solves   : 3279
       best bound     :  5.135984e+03
      -simulation ci  :  5.529798e+03 ± 7.601528e+02
      +simulation ci  :  5.834490e+03 ± 1.028638e+03
       numeric issues : 0
       -------------------------------------------------------------------
       
       Finished training and simulating.

      This time, since our objective state is two-dimensional, the objective states are tuples with two elements:

      [stage[:objective_state] for stage in simulations[1]]
      3-element Vector{Tuple{Float64, Float64}}:
      - (60.0, 50.0)
      - (70.0, 60.0)
      - (65.0, 70.0)

      Warnings

      There are number of things to be aware of when using objective states.

      • The key assumption is that price is independent of the states and actions in the model.

        That means that the price cannot appear in any @constraints. Nor can you use any @variables in the update function.

      • Choosing an appropriate Lipschitz constant is difficult.

        The points discussed in Choosing an initial bound are relevant. The Lipschitz constant should not be chosen as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen to small, it may cut of the feasible region and lead to a sub-optimal solution.

      • You need to ensure that the cost-to-go function is concave with respect to the objective state before the update.

        If the update function is linear, this is always the case. In some situations, the update function can be nonlinear (e.g., multiplicative as we have above). In general, placing constraints on the price (e.g., clamp(price, 0, 1)) will destroy concavity. Caveat emptor. It's up to you if this is a problem. If it isn't you'll get a good heuristic with no guarantee of global optimality.


      This page was generated using Literate.jl.

      + (45.0, 50.0) + (47.5, 45.0) + (53.75, 47.5)

      Warnings

      There are number of things to be aware of when using objective states.

      • The key assumption is that price is independent of the states and actions in the model.

        That means that the price cannot appear in any @constraints. Nor can you use any @variables in the update function.

      • Choosing an appropriate Lipschitz constant is difficult.

        The points discussed in Choosing an initial bound are relevant. The Lipschitz constant should not be chosen as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen to small, it may cut of the feasible region and lead to a sub-optimal solution.

      • You need to ensure that the cost-to-go function is concave with respect to the objective state before the update.

        If the update function is linear, this is always the case. In some situations, the update function can be nonlinear (e.g., multiplicative as we have above). In general, placing constraints on the price (e.g., clamp(price, 0, 1)) will destroy concavity. Caveat emptor. It's up to you if this is a problem. If it isn't you'll get a good heuristic with no guarantee of global optimality.


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/objective_uncertainty/index.html b/dev/tutorial/objective_uncertainty/index.html index ef1ccc143..c12aa87ea 100644 --- a/dev/tutorial/objective_uncertainty/index.html +++ b/dev/tutorial/objective_uncertainty/index.html @@ -78,16 +78,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.812500e+04 3.958333e+03 4.574060e-03 12 1 - 40 9.375000e+03 1.062500e+04 1.368001e-01 642 1 + 1 3.375000e+04 5.735677e+03 3.451824e-03 12 1 + 40 1.125000e+04 1.062500e+04 7.753396e-02 642 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.368001e-01 +total time (s) : 7.753396e-02 total solves : 642 best bound : 1.062500e+04 -simulation ci : 1.146154e+04 ± 2.573397e+03 +simulation ci : 1.373820e+04 ± 3.341683e+03 numeric issues : 0 ------------------------------------------------------------------- -Confidence interval: 10957.5 ± 752.63 -Lower bound: 10625.0

      This page was generated using Literate.jl.

      +Confidence interval: 11507.5 ± 787.86 +Lower bound: 10625.0

      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/pglib_opf/index.html b/dev/tutorial/pglib_opf/index.html index af80c8541..6af44fb09 100644 --- a/dev/tutorial/pglib_opf/index.html +++ b/dev/tutorial/pglib_opf/index.html @@ -58,25 +58,24 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.371244e+06 8.466849e+04 9.043729e-01 67 1 - 2 2.473966e+06 1.120339e+05 2.813140e+01 1833 1 - 5 2.100208e+06 3.550252e+05 3.582787e+01 2214 1 - 7 2.777179e+06 3.603473e+05 4.138662e+01 2452 1 - 10 3.298793e+05 4.052698e+05 4.605443e+01 2701 1 + 1 1.234104e+06 5.017239e+04 2.624431e-01 35 1 + 2 2.507229e+06 1.429995e+05 2.269262e+01 2224 1 + 9 5.105361e+05 4.087944e+05 2.801921e+01 2649 1 + 10 4.827186e+05 4.254646e+05 2.911346e+01 2740 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.605443e+01 -total solves : 2701 -best bound : 4.052698e+05 -simulation ci : 1.253494e+06 ± 6.226140e+05 +total time (s) : 2.911346e+01 +total solves : 2740 +best bound : 4.254646e+05 +simulation ci : 7.571370e+05 ± 4.554385e+05 numeric issues : 0 -------------------------------------------------------------------

      To more accurately simulate the dynamics of the problem, a common approach is to write the cuts representing the policy to a file, and then read them into a non-convex model:

      SDDP.write_cuts_to_file(convex, "convex.cuts.json")
       non_convex = build_model(PowerModels.ACPPowerModel)
       SDDP.read_cuts_from_file(non_convex, "convex.cuts.json")

      Now we can simulate non_convex to evaluate the policy.

      result = SDDP.simulate(non_convex, 1)
      1-element Vector{Vector{Dict{Symbol, Any}}}:
      - [Dict(:bellman_term => 380847.0545552986, :noise_term => 5, :node_index => 1, :stage_objective => 18704.285904090026, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 382498.25324698834, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37544787066, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 384149.4519495484, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375447870676, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 380847.05455956043, :noise_term => 5, :node_index => 1, :stage_objective => 19791.150630740263, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 380456.2116459432, :noise_term => 5, :node_index => 1, :stage_objective => 17571.98239776068, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 386344.43732608476, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375447870683, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 387995.63602864475, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375447870683, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 382799.03830131324, :noise_term => 5, :node_index => 1, :stage_objective => 21433.3754478696, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 384450.2370038733, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375447870676, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 390666.6333263593, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375447870683, :objective_state => nothing, :belief => Dict(1 => 1.0))  …  Dict(:bellman_term => 397670.30506684235, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745433028, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 402952.0204716376, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553482406518, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 397670.30506772565, :noise_term => 2, :node_index => 1, :stage_objective => 25694.765215841144, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 402952.02047445835, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553482406533, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 394095.63813114475, :noise_term => 5, :node_index => 1, :stage_objective => 21433.37544787072, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 397670.3050673835, :noise_term => 0, :node_index => 1, :stage_objective => 24871.214960380785, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 392473.70733529027, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375447870705, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 387277.1096079612, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375447870683, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 393493.50593044725, :noise_term => 0, :node_index => 1, :stage_objective => 21433.37544787071, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 397670.3050670551, :noise_term => 0, :node_index => 1, :stage_objective => 24086.51241007108, :objective_state => nothing, :belief => Dict(1 => 1.0))]

      A problem with reading and writing the cuts to file is that the cuts have been generated from trial points of the convex model. Therefore, the policy may be arbitrarily bad at points visited by the non-convex model.

      Training a non-convex model

      We can also build and train a non-convex formulation of the optimal power flow problem.

      The problem with the non-convex model is that because it is non-convex, SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the true cost of operation.

      non_convex = build_model(PowerModels.ACPPowerModel)
      + [Dict(:bellman_term => 403600.1147331171, :noise_term => 5, :node_index => 1, :stage_objective => 17579.722738829965, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 411357.3504947405, :noise_term => 0, :node_index => 1, :stage_objective => 17583.96068071937, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 410806.69805605325, :noise_term => 5, :node_index => 1, :stage_objective => 17583.96067274214, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 415378.0522259531, :noise_term => 0, :node_index => 1, :stage_objective => 21884.082253087283, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 419439.8105708234, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553466408484, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 426747.9459497207, :noise_term => 0, :node_index => 1, :stage_objective => 27420.55349350752, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 416130.82374893036, :noise_term => 2, :node_index => 1, :stage_objective => 27420.55346640832, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 420192.58215253433, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553466408353, :objective_state => nothing, :belief => Dict(1 => 1.0))]

      A problem with reading and writing the cuts to file is that the cuts have been generated from trial points of the convex model. Therefore, the policy may be arbitrarily bad at points visited by the non-convex model.

      Training a non-convex model

      We can also build and train a non-convex formulation of the optimal power flow problem.

      The problem with the non-convex model is that because it is non-convex, SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the true cost of operation.

      non_convex = build_model(PowerModels.ACPPowerModel)
       SDDP.train(non_convex; iteration_limit = 10)
       result = SDDP.simulate(non_convex, 1)
      1-element Vector{Vector{Dict{Symbol, Any}}}:
      - [Dict(:bellman_term => 360721.96637683467, :noise_term => 0, :node_index => 1, :stage_objective => 17578.21701015957, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91382800444, :noise_term => 0, :node_index => 1, :stage_objective => 20947.017605802746, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697368546713, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745486107, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745486107, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 394017.38168559724, :noise_term => 0, :node_index => 1, :stage_objective => 27420.55350204033, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.9138260979, :noise_term => 5, :node_index => 1, :stage_objective => 18341.120804959097, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 394017.38044350874, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553502040322, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 448037.41057775903, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553506222695, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 370344.35749852186, :noise_term => 2, :node_index => 1, :stage_objective => 27420.5535020606, :objective_state => nothing, :belief => Dict(1 => 1.0))  …  Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697364598265, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745486107, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697454861092, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 394017.3816855972, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553502040322, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.9138260979, :noise_term => 5, :node_index => 1, :stage_objective => 18341.12080495909, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364502.58423180896, :noise_term => 5, :node_index => 1, :stage_objective => 17578.21701017796, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 380941.69929536217, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553502056904, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.9138701779, :noise_term => 2, :node_index => 1, :stage_objective => 24742.890619545782, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745497611, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 364958.91387012013, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745486107, :objective_state => nothing, :belief => Dict(1 => 1.0))]

      Combining convex and non-convex models

      To summarize, training with the convex model constructs cuts at points that may never be visited by the non-convex model, and training with the non-convex model may construct arbitrarily poor cuts because a key assumption of SDDP is convexity.

      As a compromise, we can train a policy using a combination of the convex and non-convex models; we'll use the non-convex model to generate trial points on the forward pass, and we'll use the convex model to build cuts on the backward pass.

      convex = build_model(PowerModels.DCPPowerModel)
      A policy graph with 1 nodes.
      + [Dict(:bellman_term => 392538.5992568735, :noise_term => 0, :node_index => 1, :stage_objective => 17584.731700730572, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 391982.29671039723, :noise_term => 5, :node_index => 1, :stage_objective => 17584.731700609642, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 391425.9941640281, :noise_term => 5, :node_index => 1, :stage_objective => 17584.731700502733, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398574.49077475653, :noise_term => 0, :node_index => 1, :stage_objective => 18596.95926580127, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 442866.2331994867, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553505745633, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398574.4907736592, :noise_term => 5, :node_index => 1, :stage_objective => 18341.12074159791, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 442866.23312795337, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553505745633, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398574.4907736593, :noise_term => 5, :node_index => 1, :stage_objective => 18341.12074034454, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398574.4909147812, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697212127907, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 442866.24228877056, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553505745625, :objective_state => nothing, :belief => Dict(1 => 1.0))  …  Dict(:bellman_term => 397461.88580934564, :noise_term => 5, :node_index => 1, :stage_objective => 17584.731704761278, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 400918.1964070562, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553501669878, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398126.8115056009, :noise_term => 5, :node_index => 1, :stage_objective => 17584.731709251, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 415264.9586299844, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553501683953, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 493346.922987794, :noise_term => 0, :node_index => 1, :stage_objective => 27420.55350574612, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 704639.51991749, :noise_term => 0, :node_index => 1, :stage_objective => 86892.06860633056, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 421751.1949954971, :noise_term => 2, :node_index => 1, :stage_objective => 27420.553505747823, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398574.4909149199, :noise_term => 2, :node_index => 1, :stage_objective => 25134.186766164083, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 442866.2422975713, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553505745625, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398574.4907736592, :noise_term => 5, :node_index => 1, :stage_objective => 18341.120901010854, :objective_state => nothing, :belief => Dict(1 => 1.0))]

      Combining convex and non-convex models

      To summarize, training with the convex model constructs cuts at points that may never be visited by the non-convex model, and training with the non-convex model may construct arbitrarily poor cuts because a key assumption of SDDP is convexity.

      As a compromise, we can train a policy using a combination of the convex and non-convex models; we'll use the non-convex model to generate trial points on the forward pass, and we'll use the convex model to build cuts on the backward pass.

      convex = build_model(PowerModels.DCPPowerModel)
      A policy graph with 1 nodes.
        Node indices: 1
       
      non_convex = build_model(PowerModels.ACPPowerModel)
      A policy graph with 1 nodes.
        Node indices: 1
      @@ -112,16 +111,15 @@
       -------------------------------------------------------------------
        iteration    simulation      bound        time (s)     solves  pid
       -------------------------------------------------------------------
      -         1   5.740190e+05  4.874882e+04  5.277851e-01        27   1
      -         2   5.943082e+04  6.694876e+04  3.211227e+01      2095   1
      -         7   1.635773e+06  3.624599e+05  3.897763e+01      2314   1
      -         9   8.573350e+05  4.046607e+05  4.439623e+01      2473   1
      -        10   3.731937e+06  4.049763e+05  4.670413e+01      2536   1
      +         1   1.885912e+06  6.803197e+04  9.583139e-01        72   1
      +         2   1.251866e+05  1.645924e+05  2.313234e+01      2082   1
      +         6   6.644346e+05  3.605045e+05  2.831640e+01      2310   1
      +        10   3.856251e+05  4.126893e+05  3.301754e+01      2547   1
       -------------------------------------------------------------------
       status         : iteration_limit
      -total time (s) : 4.670413e+01
      -total solves   : 2536
      -best bound     :  4.049763e+05
      -simulation ci  :  7.959406e+05 ± 7.054922e+05
      +total time (s) : 3.301754e+01
      +total solves   : 2547
      +best bound     :  4.126893e+05
      +simulation ci  :  9.463587e+05 ± 5.532752e+05
       numeric issues : 0
      --------------------------------------------------------------------

      In practice, if we were to simulate non_convex now, we should obtain a better policy than either of the two previous approaches.


      This page was generated using Literate.jl.

      +-------------------------------------------------------------------

      In practice, if we were to simulate non_convex now, we should obtain a better policy than either of the two previous approaches.


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/plotting/index.html b/dev/tutorial/plotting/index.html index 507c473ff..c0becb90b 100644 --- a/dev/tutorial/plotting/index.html +++ b/dev/tutorial/plotting/index.html @@ -72,14 +72,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.875000e+04 3.753370e+03 1.785588e-02 18 1 - 20 5.000000e+03 8.072917e+03 1.256318e-01 660 1 + 1 2.812500e+04 1.991887e+03 1.509690e-02 18 1 + 20 1.625000e+04 8.072917e+03 1.686609e-01 660 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.256318e-01 +total time (s) : 1.686609e-01 total solves : 660 best bound : 8.072917e+03 -simulation ci : 9.419861e+03 ± 4.758022e+03 +simulation ci : 1.003599e+04 ± 4.615369e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -105,85 +105,85 @@ ) - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -

      You can save this plot as a PDF using the Plots.jl function savefig:

      Plots.savefig("my_picture.pdf")

      Plotting the value function

      You can obtain an object representing the value function of a node using SDDP.ValueFunction.

      V = SDDP.ValueFunction(model[(1, 1)])
      A value function for node (1, 1)

      The value function can be evaluated using SDDP.evaluate.

      SDDP.evaluate(V; volume = 1)
      (22885.860196351266, Dict(:volume => -124.47916666666667))

      evaluate returns the height of the value function, and a subgradient with respect to the convex state variables.

      You can also plot the value function using SDDP.plot

      SDDP.plot(V, volume = 0:200, filename = "value_function.html")

      This should open a webpage that looks like this one.

      Convergence dashboard

      If the text-based logging isn't to your liking, you can open a visualization of the training by passing dashboard = true to SDDP.train.

      SDDP.train(model; dashboard = true)

      By default, dashboard = false because there is an initial overhead associated with opening and preparing the plot.

      Warning

      The dashboard is experimental. There are known bugs associated with it, e.g., SDDP.jl#226.


      This page was generated using Literate.jl.

      +

      You can save this plot as a PDF using the Plots.jl function savefig:

      Plots.savefig("my_picture.pdf")

      Plotting the value function

      You can obtain an object representing the value function of a node using SDDP.ValueFunction.

      V = SDDP.ValueFunction(model[(1, 1)])
      A value function for node (1, 1)

      The value function can be evaluated using SDDP.evaluate.

      SDDP.evaluate(V; volume = 1)
      (23019.270833333332, Dict(:volume => -157.8125))

      evaluate returns the height of the value function, and a subgradient with respect to the convex state variables.

      You can also plot the value function using SDDP.plot

      SDDP.plot(V, volume = 0:200, filename = "value_function.html")

      This should open a webpage that looks like this one.

      Convergence dashboard

      If the text-based logging isn't to your liking, you can open a visualization of the training by passing dashboard = true to SDDP.train.

      SDDP.train(model; dashboard = true)

      By default, dashboard = false because there is an initial overhead associated with opening and preparing the plot.

      Warning

      The dashboard is experimental. There are known bugs associated with it, e.g., SDDP.jl#226.


      This page was generated using Literate.jl.

      diff --git a/dev/tutorial/spaghetti_plot.html b/dev/tutorial/spaghetti_plot.html index b50768a87..cd3c8af92 100644 --- a/dev/tutorial/spaghetti_plot.html +++ b/dev/tutorial/spaghetti_plot.html @@ -230,7 +230,7 @@
      diff --git a/dev/tutorial/warnings/index.html b/dev/tutorial/warnings/index.html index 85639d5a7..226503aa7 100644 --- a/dev/tutorial/warnings/index.html +++ b/dev/tutorial/warnings/index.html @@ -84,11 +84,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 3.000000e+00 3.839016e-03 6 1 - 5 3.500000e+00 3.500000e+00 9.364128e-03 33 1 + 1 6.500000e+00 3.000000e+00 2.931118e-03 6 1 + 5 3.500000e+00 3.500000e+00 6.483078e-03 33 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 9.364128e-03 +total time (s) : 6.483078e-03 total solves : 33 best bound : 3.500000e+00 simulation ci : 4.100000e+00 ± 1.176000e+00 @@ -129,13 +129,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 1.100000e+01 3.684044e-03 6 1 - 5 5.500000e+00 1.100000e+01 8.239031e-03 33 1 + 1 6.500000e+00 1.100000e+01 2.869129e-03 6 1 + 5 5.500000e+00 1.100000e+01 5.830050e-03 33 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 8.239031e-03 +total time (s) : 5.830050e-03 total solves : 33 best bound : 1.100000e+01 simulation ci : 5.700000e+00 ± 3.920000e-01 numeric issues : 0 --------------------------------------------------------------------

      How do we tell which is more appropriate? There are a few clues that you should look out for.

      • The bound converges to a value above (if minimizing) the simulated cost of the policy. In this case, the problem is deterministic, so it is easy to tell. But you can also check by performing a Monte Carlo simulation like we did in An introduction to SDDP.jl.

      • The bound converges to different values when we change the bound. This is another clear give-away. The bound provided by the user is only used in the initial iterations. It should not change the value of the converged policy. Thus, if you don't know an appropriate value for the bound, choose an initial value, and then increase (or decrease) the value of the bound to confirm that the value of the policy doesn't change.

      • The bound converges to a value close to the bound provided by the user. This varies between models, but notice that 11.0 is quite close to 10.0 compared with 3.5 and 0.0.


      This page was generated using Literate.jl.

      +-------------------------------------------------------------------

      How do we tell which is more appropriate? There are a few clues that you should look out for.

      • The bound converges to a value above (if minimizing) the simulated cost of the policy. In this case, the problem is deterministic, so it is easy to tell. But you can also check by performing a Monte Carlo simulation like we did in An introduction to SDDP.jl.

      • The bound converges to different values when we change the bound. This is another clear give-away. The bound provided by the user is only used in the initial iterations. It should not change the value of the converged policy. Thus, if you don't know an appropriate value for the bound, choose an initial value, and then increase (or decrease) the value of the bound to confirm that the value of the policy doesn't change.

      • The bound converges to a value close to the bound provided by the user. This varies between models, but notice that 11.0 is quite close to 10.0 compared with 3.5 and 0.0.


      This page was generated using Literate.jl.